text stringlengths 4 1.02M | meta dict |
|---|---|
import bandit
from bandit.core.test_properties import *
@checks('Call')
def request_with_no_cert_validation(context):
if (
'requests' in context.call_function_name_qual and (
'get' in context.call_function_name or
'post' in context.call_function_name)
):
if context.check_call_arg_value('verify', 'False'):
return bandit.Issue(
severity=bandit.HIGH,
confidence=bandit.HIGH,
text="Requests call with verify=False disabling SSL "
"certificate checks, security issue. %s" %
context.call_args_string
)
| {
"content_hash": "fdcc24a73068911b42591a383a4acb97",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5729323308270676,
"repo_name": "austin987/bandit",
"id": "d0273bdbd123caa2912d781bf926c8b8d9ffd142",
"size": "1297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bandit/plugins/crypto_request_no_cert_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "180988"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Abode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(b'SH', b'Small house'), (b'H', b'House'), (b'SB', b'Small building'), (b'B', b'Building')], max_length=2)),
],
),
migrations.CreateModel(
name='Citizen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=20)),
('middle_name', models.CharField(blank=True, default=b'', max_length=20)),
('last_name', models.CharField(max_length=20)),
('birth', models.DateField()),
('gender', models.CharField(choices=[(b'M', b'Male'), (b'F', b'Female')], max_length=1)),
('abode', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='citizens', to='demo.Abode')),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Hobby',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('citizens', models.ManyToManyField(to='demo.Citizen')),
],
),
migrations.CreateModel(
name='World',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('description', models.TextField(blank=True, default=b'')),
],
),
migrations.AddField(
model_name='city',
name='world',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cities', to='demo.World'),
),
migrations.AddField(
model_name='abode',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='abodes', to='demo.City'),
),
migrations.AddField(
model_name='abode',
name='owner',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owned_abode', to='demo.Citizen'),
),
]
| {
"content_hash": "82daaebfbb9471907b246008ae9686d2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 159,
"avg_line_length": 41.861111111111114,
"alnum_prop": 0.5491041804910418,
"repo_name": "novafloss/populous",
"id": "d8933a5459dc668d3788d51d6142a1b2ec7fddd0",
"size": "3087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132109"
}
],
"symlink_target": ""
} |
"""
Created on Tue Aug 20 18:13:37 2019.
@author: tageldim
"""
import os
import sys
import pytest
from imageio import imread
from pandas import read_csv
from histomicstk.annotations_and_masks.annotation_and_mask_utils import \
delete_annotations_in_slide
from histomicstk.annotations_and_masks.masks_to_annotations_handler import (
get_annotation_documents_from_contours, get_contours_from_mask)
thisDir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(thisDir, '../../../tests'))
from tests.htk_test_utilities import getTestFilePath, girderClient # noqa
class TestMasksToAnnotations:
"""Test methods for getting ROI mask from annotations."""
def _setup(self):
# read GTCodes dataframe
gtcodePath = getTestFilePath('sample_GTcodes.csv')
self.GTCodes_df = read_csv(gtcodePath)
self.GTCodes_df.index = self.GTCodes_df.loc[:, 'group']
# read sample contours_df dataframe to test against
contoursDfPath = getTestFilePath(os.path.join(
'annotations_and_masks', 'sample_contours_df.tsv'))
self.CONTOURS_DF = read_csv(contoursDfPath, sep='\t', index_col=0)
# read mask
self.X_OFFSET = 59206
self.Y_OFFSET = 33505
self.MASKNAME = (
'TCGA-A2-A0YE-01Z-00-DX1.8A2E3094-5755-42BC-969D-7F0A2ECA0F39_'
'left-%d_top-%d_mag-BASE.png' % (self.X_OFFSET, self.Y_OFFSET))
MASKPATH = getTestFilePath(os.path.join(
'annotations_and_masks', self.MASKNAME))
self.MASK = imread(MASKPATH)
def test_get_contours_from_mask(self):
"""Test get_contours_from_mask()."""
self._setup()
# get contours from mask
# groups_to_get = [
# 'mostly_tumor', 'mostly_stroma']
groups_to_get = None
contours_df = get_contours_from_mask(
MASK=self.MASK, GTCodes_df=self.GTCodes_df,
groups_to_get=groups_to_get,
get_roi_contour=True, roi_group='roi',
discard_nonenclosed_background=True,
background_group='mostly_stroma',
MIN_SIZE=30, MAX_SIZE=None, verbose=False,
monitorPrefix=self.MASKNAME[:12] + ": getting contours")
# make sure it is what we expect
assert set(contours_df.columns) == set(self.CONTOURS_DF.columns)
assert all(contours_df.iloc[:10, :] == self.CONTOURS_DF.iloc[:10, :])
def test_get_contours_from_mask_with_groups(self):
"""Test get_contours_from_mask()."""
self._setup()
# get contours from mask
groups_to_get = [
'mostly_tumor', 'mostly_stroma']
contours_df = get_contours_from_mask(
MASK=self.MASK, GTCodes_df=self.GTCodes_df,
groups_to_get=groups_to_get,
get_roi_contour=True, roi_group='roi',
discard_nonenclosed_background=True,
background_group='mostly_stroma',
MIN_SIZE=30, MAX_SIZE=None, verbose=False,
monitorPrefix=self.MASKNAME[:12] + ": getting contours")
# make sure it is what we expect
assert set(contours_df.columns) == set(self.CONTOURS_DF.columns)
assert all(contours_df.iloc[:10, :] == self.CONTOURS_DF.iloc[:10, :])
assert len(contours_df) == 26
def test_get_contours_from_mask_with_zeroes(self):
"""Test get_contours_from_mask()."""
self._setup()
groups_to_get = None
gtcodes = self.GTCodes_df.append({
'group': 'zeroes',
'overlay_order': 4,
'GT_code': 0,
'is_roi': 0,
'is_background_class': 0,
'color': 'rgb(0,128,0)',
'comments': 'zeroes'}, ignore_index=True)
gtcodes.index = gtcodes.loc[:, 'group']
contours_df = get_contours_from_mask(
MASK=self.MASK, GTCodes_df=gtcodes,
groups_to_get=groups_to_get,
get_roi_contour=True, roi_group='roi',
discard_nonenclosed_background=True,
background_group='mostly_stroma',
MIN_SIZE=30, MAX_SIZE=None)
# make sure it is what we expect
assert set(contours_df.columns) == set(self.CONTOURS_DF.columns)
assert all(contours_df.iloc[:10, :] == self.CONTOURS_DF.iloc[:10, :])
assert len(contours_df) == 49
@pytest.mark.usefixtures('girderClient') # noqa
def test_get_annotation_documents_from_contours(self, girderClient): # noqa
"""Test get_contours_from_bin_mask()."""
self._setup()
sampleSlideItem = girderClient.resourceLookup(
'/user/admin/Public/TCGA-A2-A0YE-01Z-00-DX1.8A2E3094-5755-42BC-969D-7F0A2ECA0F39.svs') # noqa
sampleSlideId = str(sampleSlideItem['_id'])
# get list of annotation documents
annprops = {
'X_OFFSET': self.X_OFFSET,
'Y_OFFSET': self.Y_OFFSET,
'opacity': 0.2,
'lineWidth': 4.0,
}
annotation_docs = get_annotation_documents_from_contours(
self.CONTOURS_DF.copy(), separate_docs_by_group=True,
annots_per_doc=10, docnamePrefix='test', annprops=annprops,
verbose=False,
monitorPrefix=self.MASKNAME[:12] + ": annotation docs")
# make sure its what we expect
assert len(annotation_docs) == 8
assert {j['name'] for j in annotation_docs} == {
'test_blood_vessel-0',
'test_exclude-0',
'test_mostly_lymphocytic_infiltrate-0',
'test_mostly_stroma-0',
'test_mostly_tumor-0',
'test_mostly_tumor-1',
'test_normal_acinus_or_duct-0',
'test_roi-0'
}
# deleting existing annotations in target slide (if any)
delete_annotations_in_slide(girderClient, sampleSlideId)
# post annotations to slide -- make sure it posts without errors
resp = girderClient.post(
"/annotation?itemId=" + sampleSlideId,
json=annotation_docs[0])
assert 'annotation' in resp.keys()
| {
"content_hash": "8f00267ad951c8e55748a763cde1f879",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 106,
"avg_line_length": 39.68627450980392,
"alnum_prop": 0.6009552042160737,
"repo_name": "DigitalSlideArchive/HistomicsTK",
"id": "fd7fb9ded6fa8a9061f3e496e6ceaa983cde8da3",
"size": "6072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "histomicstk/annotations_and_masks/tests/test_masks_to_annotations_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "1669"
},
{
"name": "Cython",
"bytes": "19226"
},
{
"name": "Dockerfile",
"bytes": "3235"
},
{
"name": "Python",
"bytes": "772710"
},
{
"name": "Shell",
"bytes": "965"
}
],
"symlink_target": ""
} |
__author__ = "Mateusz Kobos mkobos@icm.edu.pl"
from vipe.oozie.reader.reader import read as oozie_read
from vipe.oozie.converter.converter import convert as oozie_convert
from vipe.oozie.converter.iis import IISPipelineConverter
from vipe.graphviz.converter import Converter as PipelineConverter
from vipe.graphviz.image_converter import ImageConverter
def convert_oozie_to_dot(xml_oozie_string, detail_level,
show_input_ports, show_output_ports,
vertical_orientation):
"""Convert XML Oozie workflow definition to a graph described in DOT format.
See docstring for `convert_oozie_to_png` function for description of
parameters.
Return:
string
"""
oozie_graph = oozie_read(xml_oozie_string)
pipeline = oozie_convert(oozie_graph, IISPipelineConverter())
dot_converter = PipelineConverter(detail_level,
show_input_ports, show_output_ports,
vertical_orientation)
dot_string = dot_converter.run(pipeline)
return dot_string
def convert_oozie_to_png(xml_oozie_string, detail_level,
show_input_ports, show_output_ports,
vertical_orientation,
dot_program_path='/usr/bin/dot'):
"""Convert XML Oozie workflow definition to a PNG image
Args:
xml_oozie_string (string): Oozie XML
detail_level (DetailLevel): level of presentation details
show_input_ports (bool):
show_output_ports (bool):
vertical_orientation (bool): True if the graph should be drawn
from top to bottom, False if it should be drawn from left to
right.
dot_program_path (string): path to the 'dot' program
Return:
byte string
"""
dot_string = convert_oozie_to_dot(xml_oozie_string, detail_level,
show_input_ports, show_output_ports,
vertical_orientation)
dot_processor = ImageConverter(dot_string, dot_program_path)
image = dot_processor.to_image()
return image
| {
"content_hash": "bc817c8c5d579afb9fc7c015c1bb82da",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 40.03703703703704,
"alnum_prop": 0.6304347826086957,
"repo_name": "openaire/vipe",
"id": "6b8549a5800a2ee1b521ddf7b3d3535a39265a73",
"size": "2752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vipe/oozie2png.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3118"
},
{
"name": "Python",
"bytes": "133908"
},
{
"name": "Shell",
"bytes": "4350"
}
],
"symlink_target": ""
} |
from hk2.types import enum, flags, flags_seq
import unittest
#===========================================================
@enum
class DayOfWeek(object):
Monday, Tuesday, Wednesday, \
Thursday, Friday, Saturday, Sunday = range(7)
#===========================================================
class TestEnum(unittest.TestCase):
def testBasicValues(self):
self.assertEqual(DayOfWeek.Monday, 0)
self.assertEqual(DayOfWeek.Sunday, 6)
def testToString(self):
e = DayOfWeek.Thursday
self.assertEqual('Thursday', DayOfWeek.toString(e))
def testToStringDefault(self):
e = 100500
self.assertEqual('none', DayOfWeek.toString(e, 'none'))
def testParse(self):
e = DayOfWeek.parse('sunday')
self.assertEqual(e, DayOfWeek.Sunday)
def testParseDefault(self):
e = DayOfWeek.parse('funday', -1)
self.assertEqual(e, -1)
def testIterValues(self):
vals = DayOfWeek.values
self.assertEqual(len(vals), 7)
self.assertIn(DayOfWeek.Monday, vals)
self.assertIn(DayOfWeek.Sunday, vals)
#===========================================================
@flags
class Caps(object):
Empty = 0x0
Read, Write, Create, Delete = flags_seq(4)
All = 0xff
#===========================================================
class TestFlagsEnum(unittest.TestCase):
def testFlagsBasic(self):
self.assertEqual(Caps.Read, 0x1)
self.assertEqual(Caps.Delete, 0x8)
def testToStringSingle(self):
c = Caps.toString(Caps.Write)
self.assertEqual(c, 'Write')
def testToStringDefault(self):
c = Caps.toString(100500, 'Invalid')
self.assertEqual(c, 'Invalid')
def testToStringMulti(self):
c = Caps.toString(Caps.Read | Caps.Write)
self.assertEqual(c, 'Read | Write')
c = Caps.toString(Caps.Empty)
self.assertEqual(c, 'Empty')
c = Caps.toString(Caps.All)
self.assertEqual(c, 'All')
def testParseSingle(self):
c = Caps.parse('write')
self.assertEqual(c, Caps.Write)
def testParseDefault(self):
c = Caps.parse('blah', -1)
self.assertEqual(c, -1)
def testParseMulti(self):
c = Caps.parse('write|read | delete')
self.assertEqual(c, Caps.Read | Caps.Write | Caps.Delete)
| {
"content_hash": "d83c1ea55cda83e4bb73035d790ab5db",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 65,
"avg_line_length": 27.238636363636363,
"alnum_prop": 0.5561118064246975,
"repo_name": "mikhtonyuk/pyhk2",
"id": "cca090d49879773779d91e7f9d0f7ca63eec4662",
"size": "2397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/types/enum_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73033"
}
],
"symlink_target": ""
} |
import cherrypy
import datetime
import re
import six
from .. import base
from girder import events
from girder.constants import AccessType, SettingKey
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class UserTestCase(base.TestCase):
def _verifyAuthCookie(self, resp):
self.assertTrue('girderToken' in resp.cookie)
self.cookieVal = resp.cookie['girderToken'].value
self.assertFalse(not self.cookieVal)
lifetime = int(self.model('setting').get(SettingKey.COOKIE_LIFETIME))
self.assertEqual(
resp.cookie['girderToken']['expires'],
lifetime * 3600 * 24)
def _verifyDeletedCookie(self, resp):
self.assertTrue('girderToken' in resp.cookie)
self.assertEqual(resp.cookie['girderToken'].value, '')
self.assertEqual(resp.cookie['girderToken']['expires'], 0)
def _verifyUserDocument(self, doc, admin=True):
self.assertHasKeys(
doc, ['_id', 'firstName', 'lastName', 'public', 'login', 'admin'])
if admin:
self.assertHasKeys(doc, ['email', 'size'])
else:
self.assertNotHasKeys(doc, ['access', 'email', 'size'])
self.assertNotHasKeys(doc, ['salt'])
def testRegisterAndLoginBcrypt(self):
"""
Test user registration and logging in.
"""
cherrypy.config['auth']['hash_alg'] = 'bcrypt'
# Set this to minimum so test runs faster.
cherrypy.config['auth']['bcrypt_rounds'] = 4
params = {
'email': 'bad_email',
'login': 'illegal@login',
'firstName': 'First',
'lastName': 'Last',
'password': 'bad'
}
# First test all of the required parameters.
self.ensureRequiredParams(
path='/user', method='POST', required=params.keys())
# Now test parameter validation
resp = self.request(path='/user', method='POST', params=params)
self.assertValidationError(resp, 'password')
self.assertEqual(cherrypy.config['users']['password_description'],
resp.json['message'])
params['password'] = 'good:password'
resp = self.request(path='/user', method='POST', params=params)
self.assertValidationError(resp, 'login')
# Make login something that violates the regex but doesn't contain @
params['login'] = ' '
resp = self.request(path='/user', method='POST', params=params)
self.assertValidationError(resp, 'login')
self.assertEqual(cherrypy.config['users']['login_description'],
resp.json['message'])
params['login'] = 'goodlogin'
resp = self.request(path='/user', method='POST', params=params)
self.assertValidationError(resp, 'email')
# Now successfully create the user
params['email'] = 'good@email.com'
resp = self.request(path='/user', method='POST', params=params)
self.assertStatusOk(resp)
self._verifyUserDocument(resp.json)
user = self.model('user').load(resp.json['_id'], force=True)
self.assertEqual(user['hashAlg'], 'bcrypt')
# Try logging in without basic auth, should get 401
resp = self.request(path='/user/authentication', method='GET')
self.assertStatus(resp, 401)
# Bad authentication header
resp = self.request(
path='/user/authentication', method='GET',
additionalHeaders=[('Girder-Authorization', 'Basic Not-Valid-64')])
self.assertStatus(resp, 401)
self.assertEqual('Invalid HTTP Authorization header',
resp.json['message'])
resp = self.request(
path='/user/authentication', method='GET',
additionalHeaders=[('Girder-Authorization', 'Basic NotValid')])
self.assertStatus(resp, 401)
self.assertEqual('Invalid HTTP Authorization header',
resp.json['message'])
# Login with unregistered email
resp = self.request(path='/user/authentication', method='GET',
basicAuth='incorrect@email.com:badpassword')
self.assertStatus(resp, 403)
self.assertEqual('Login failed.', resp.json['message'])
# Correct email, but wrong password
resp = self.request(path='/user/authentication', method='GET',
basicAuth='good@email.com:badpassword')
self.assertStatus(resp, 403)
self.assertEqual('Login failed.', resp.json['message'])
# Login successfully with email
resp = self.request(path='/user/authentication', method='GET',
basicAuth='good@email.com:good:password')
self.assertStatusOk(resp)
self.assertHasKeys(resp.json, ['authToken'])
self.assertHasKeys(
resp.json['authToken'], ['token', 'expires'])
self._verifyAuthCookie(resp)
# Invalid login
resp = self.request(path='/user/authentication', method='GET',
basicAuth='badlogin:good:password')
self.assertStatus(resp, 403)
self.assertEqual('Login failed.', resp.json['message'])
# Login successfully with fallback Authorization header
resp = self.request(path='/user/authentication', method='GET',
basicAuth='goodlogin:good:password',
authHeader='Authorization')
self.assertStatusOk(resp)
# Login successfully with login
resp = self.request(path='/user/authentication', method='GET',
basicAuth='goodlogin:good:password')
self.assertStatusOk(resp)
# Make sure we got a nice cookie
self._verifyAuthCookie(resp)
# Test user/me
resp = self.request(path='/user/me', method='GET', user=user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['login'], user['login'])
def testRegisterAndLoginSha512(self):
cherrypy.config['auth']['hash_alg'] = 'sha512'
params = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}
# Register a user with sha512 storage backend
resp = self.request(path='/user', method='POST', params=params)
self.assertStatusOk(resp)
self._verifyUserDocument(resp.json)
user = self.model('user').load(resp.json['_id'], force=True)
self.assertEqual(user['hashAlg'], 'sha512')
# Login unsuccessfully
resp = self.request(path='/user/authentication', method='GET',
basicAuth='goodlogin:badpassword')
self.assertStatus(resp, 403)
self.assertEqual('Login failed.', resp.json['message'])
# Login successfully
resp = self.request(path='/user/authentication', method='GET',
basicAuth='goodlogin:goodpassword')
self.assertStatusOk(resp)
self.assertEqual('Login succeeded.', resp.json['message'])
self.assertEqual('good@email.com', resp.json['user']['email'])
self._verifyUserDocument(resp.json['user'])
# Make sure we got a nice cookie
self._verifyAuthCookie(resp)
token = self.model('token').load(
self.cookieVal, objectId=False, force=True)
self.assertEqual(str(token['userId']), resp.json['user']['_id'])
# Hit the logout endpoint
resp = self.request(path='/user/authentication', method='DELETE',
token=token['_id'])
self._verifyDeletedCookie(resp)
token = self.model('token').load(
token['_id'], objectId=False, force=True)
self.assertEqual(token, None)
def testGetAndUpdateUser(self):
"""
Tests for the GET and PUT user endpoints.
"""
params = {
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}
user = self.model('user').createUser(**params)
params['email'] = 'notasgood@email.com'
params['login'] = 'notasgoodlogin'
nonAdminUser = self.model('user').createUser(**params)
# Test that invalid objectID gives us a 400
resp = self.request(path='/user/bad_id')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid ObjectId: bad_id')
resp = self.request(path='/user/{}'.format(user['_id']))
self._verifyUserDocument(resp.json, admin=False)
params = {
'email': 'bad',
'firstName': 'NewFirst ',
'lastName': ' New Last ',
}
resp = self.request(path='/user/{}'.format(user['_id']), method='PUT',
user=user, params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid email address.')
params['email'] = 'valid@email.com '
resp = self.request(path='/user/{}'.format(user['_id']), method='PUT',
user=user, params=params)
self.assertStatusOk(resp)
self._verifyUserDocument(resp.json)
self.assertEqual(resp.json['email'], 'valid@email.com')
self.assertEqual(resp.json['firstName'], 'NewFirst')
self.assertEqual(resp.json['lastName'], 'New Last')
# test admin checkbox
params = {
'email': 'valid@email.com',
'firstName': 'NewFirst ',
'lastName': ' New Last ',
'admin': 'true'
}
resp = self.request(path='/user/{}'.format(user['_id']), method='PUT',
user=user, params=params)
self.assertStatusOk(resp)
self._verifyUserDocument(resp.json)
self.assertEqual(resp.json['admin'], True)
# test admin flag as non-admin
params['admin'] = 'true'
resp = self.request(path='/user/{}'.format(nonAdminUser['_id']),
method='PUT', user=nonAdminUser, params=params)
self.assertStatus(resp, 403)
def testDeleteUser(self):
"""
Test the behavior of deleting users.
"""
# Create a couple of users
users = [self.model('user').createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num)
for num in [0, 1]]
# Create a folder and give both users some access on it
folder = self.model('folder').createFolder(
parent=users[0], name='x', parentType='user', public=False,
creator=users[0])
self.model('folder').setUserAccess(folder, users[0], AccessType.WRITE)
self.model('folder').setUserAccess(folder, users[1], AccessType.READ)
folder = self.model('folder').save(folder)
self.assertEqual(len(folder['access']['users']), 2)
# Create a token for user 1
token = self.model('token').createToken(users[1])
# Create a group, and have user 1 request to join it
group = self.model('group').createGroup('test', users[0], public=True)
resp = self.request(path='/group/%s/member' % group['_id'],
method='POST', user=users[1])
self.assertStatusOk(resp)
# Make sure non-admin users can't delete other users
resp = self.request(path='/user/%s' % users[0]['_id'], method='DELETE',
user=users[1])
self.assertStatus(resp, 403)
# Delete user 1 as admin, should work
resp = self.request(path='/user/%s' % users[1]['_id'], method='DELETE',
user=users[0])
self.assertStatusOk(resp)
self.assertEqual(
resp.json['message'], 'Deleted user %s.' % users[1]['login'])
users[1] = self.model('user').load(users[1]['_id'], force=True)
folder = self.model('folder').load(folder['_id'], force=True)
token = self.model('token').load(token['_id'], force=True,
objectId=False)
group = self.model('group').load(group['_id'], force=True)
# Make sure user and token were deleted
self.assertEqual(users[1], None)
self.assertEqual(token, None)
# Make sure pending invite to group was deleted
self.assertEqual(
len(list(self.model('group').getFullRequestList(group))), 0)
# Make sure access control references for the user were deleted
self.assertEqual(len(folder['access']['users']), 1)
# Delete user 0
resp = self.request(path='/user/%s' % users[0]['_id'], method='DELETE',
user=users[0])
self.assertStatusOk(resp)
# Make sure the user's folder was deleted
folder = self.model('folder').load(folder['_id'], force=True)
self.assertEqual(folder, None)
def testUserIndex(self):
"""
Test user list endpoint.
"""
# Create some users.
for x in ('c', 'a', 'b'):
self.model('user').createUser(
'usr%s' % x, 'passwd', 'tst', '%s_usr' % x, 'u%s@u.com' % x)
resp = self.request(path='/user', method='GET', params={
'limit': 2,
'offset': 1
})
self.assertStatusOk(resp)
# Make sure the limit, order, and offset are respected, and that our
# default sorting is by lastName.
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['lastName'], 'b_usr')
self.assertEqual(resp.json[1]['lastName'], 'c_usr')
def testPasswordChangeAndReset(self):
user = self.model('user').createUser('user1', 'passwd', 'tst', 'usr',
'user@user.com')
# Reset password should require email param
resp = self.request(path='/user/password', method='DELETE', params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], "Parameter 'email' is required.")
# Reset email with an incorrect email
resp = self.request(path='/user/password', method='DELETE', params={
'email': 'bad_email@user.com'
})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], "That email is not registered.")
# Actually reset password
self.assertTrue(base.mockSmtp.isMailQueueEmpty())
resp = self.request(path='/user/password', method='DELETE', params={
'email': 'user@user.com'
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'], "Sent password reset email.")
# Old password should no longer work
resp = self.request(path='/user/authentication', method='GET',
basicAuth='user@user.com:passwd')
self.assertStatus(resp, 403)
self.assertTrue(base.mockSmtp.waitForMail())
msg = base.mockSmtp.getMail()
# Pull out the auto-generated password from the email
search = re.search('Your new password is: <b>(.*)</b>', msg)
newPass = search.group(1)
# Login with the new password
resp = self.request(path='/user/authentication', method='GET',
basicAuth='user@user.com:' + newPass)
self.assertStatusOk(resp)
self.assertHasKeys(resp.json, ('authToken',))
self.assertHasKeys(
resp.json['authToken'], ('token', 'expires'))
self._verifyAuthCookie(resp)
# Now test changing passwords the normal way
# Must be logged in
resp = self.request(path='/user/password', method='PUT', params={
'old': newPass,
'new': 'something_else'
})
self.assertStatus(resp, 401)
# Old password must be correct
resp = self.request(path='/user/password', method='PUT', params={
'old': 'passwd',
'new': 'something_else'
}, user=user)
self.assertStatus(resp, 403)
self.assertEqual(resp.json['message'], 'Old password is incorrect.')
# New password must meet requirements
resp = self.request(path='/user/password', method='PUT', params={
'old': newPass,
'new': 'x'
}, user=user)
self.assertStatus(resp, 400)
# Change password successfully
resp = self.request(path='/user/password', method='PUT', params={
'old': newPass,
'new': 'something_else'
}, user=user)
self.assertStatusOk(resp)
# Make sure we can login with new password
resp = self.request(path='/user/authentication', method='GET',
basicAuth='user@user.com:something_else')
self.assertStatusOk(resp)
self.assertHasKeys(resp.json, ('authToken',))
self.assertHasKeys(
resp.json['authToken'], ('token', 'expires'))
self._verifyAuthCookie(resp)
def testTemporaryPassword(self):
self.model('user').createUser('user1', 'passwd', 'tst', 'usr',
'user@user.com')
# Temporary password should require email param
resp = self.request(path='/user/password/temporary', method='PUT',
params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], "Parameter 'email' is required.")
# Temporary password with an incorrect email
resp = self.request(path='/user/password/temporary', method='PUT',
params={'email': 'bad_email@user.com'})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], "That email is not registered.")
# Actually generate temporary access token
self.assertTrue(base.mockSmtp.isMailQueueEmpty())
resp = self.request(path='/user/password/temporary', method='PUT',
params={'email': 'user@user.com'})
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'], "Sent temporary access email.")
self.assertTrue(base.mockSmtp.waitForMail())
msg = base.mockSmtp.getMail()
# Pull out the auto-generated token from the email
search = re.search('<a href="(.*)">', msg)
link = search.group(1)
linkParts = link.split('/')
userId = linkParts[-3]
tokenId = linkParts[-1]
# Checking if a token is a valid temporary token should fail if the
# token is missing or doesn't match the user ID
path = '/user/password/temporary/' + userId
resp = self.request(path=path, method='GET', params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], "Parameter 'token' is required.")
resp = self.request(path=path, method='GET',
params={'token': 'not valid'})
self.assertStatus(resp, 403)
resp = self.request(path=path, method='GET', params={'token': tokenId})
self.assertStatusOk(resp)
user = resp.json['user']
# We should now be able to change the password
resp = self.request(path='/user/password', method='PUT', params={
'old': tokenId,
'new': 'another_password'
}, user=user)
self.assertStatusOk(resp)
# Artificially adjust the token to have expired.
token = self.model('token').load(tokenId, force=True, objectId=False)
token['expires'] = (datetime.datetime.utcnow() -
datetime.timedelta(days=1))
self.model('token').save(token)
resp = self.request(path=path, method='GET', params={'token': tokenId})
self.assertStatus(resp, 403)
# Generate an email with a forwarded header
self.assertTrue(base.mockSmtp.isMailQueueEmpty())
resp = self.request(
path='/user/password/temporary', method='PUT',
params={'email': 'user@user.com'},
additionalHeaders=[('X-Forwarded-Host', 'anotherhost')])
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'], "Sent temporary access email.")
self.assertTrue(base.mockSmtp.waitForMail())
msg = base.mockSmtp.getMail()
self.assertTrue('anotherhost' in msg)
def testUserCreation(self):
admin = self.model('user').createUser(
'user1', 'passwd', 'tst', 'usr', 'user@user.com')
self.assertTrue(admin['admin'])
# Close registration
self.model('setting').set(SettingKey.REGISTRATION_POLICY, 'closed')
params = {
'email': 'some.email@email.com',
'login': 'otheruser',
'firstName': 'First',
'lastName': 'Last',
'password': 'mypass'
}
# Make sure we get a 400 when trying to register
resp = self.request(path='/user', method='POST', params=params)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Registration on this instance is closed. Contact an '
'administrator to create an account for you.')
# Admins should still be able to create users
resp = self.request(path='/user', method='POST', params=params,
user=admin)
self.assertStatusOk(resp)
user = resp.json
self.assertFalse(user['admin'])
# Normal users should not be able to create new users
resp = self.request(path='/user', method='POST', params=params,
user=user)
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Registration on this instance is closed. Contact an '
'administrator to create an account for you.')
# Admins should be able to create other admin users
params = {
'email': 'other.email@email.com',
'login': 'otheruser2',
'firstName': 'First',
'lastName': 'Last',
'password': 'mypass',
'admin': True
}
resp = self.request(path='/user', method='POST', params=params,
user=admin)
self.assertStatusOk(resp)
self.assertTrue(resp.json['admin'])
def testAdminFlag(self):
admin = self.model('user').createUser(
'user1', 'passwd', 'tst', 'usr', 'user@user.com')
self.assertTrue(admin['admin'])
params = {
'email': 'some.email@email.com',
'login': 'otheruser',
'firstName': 'First',
'lastName': 'Last',
'password': 'mypass',
'admin': True
}
# Setting admin param to True should have no effect for normal
# registration process
resp = self.request(path='/user', method='POST', params=params)
self.assertStatusOk(resp)
self.assertFalse(resp.json['admin'])
def testModelSaveHooks(self):
"""
This tests the general correctness of the model save hooks
"""
self.ctr = 0
def preSave(event):
if '_id' not in event.info:
self.ctr += 1
def postSave(event):
self.ctr += 2
events.bind('model.user.save', 'test', preSave)
user = self.model('user').createUser(
login='myuser', password='passwd', firstName='A', lastName='A',
email='email@email.com')
self.assertEqual(self.ctr, 1)
events.bind('model.user.save.after', 'test', postSave)
self.ctr = 0
user = self.model('user').save(user, triggerEvents=False)
self.assertEqual(self.ctr, 0)
self.model('user').save(user)
self.assertEqual(self.ctr, 2)
events.unbind('model.user.save', 'test')
events.unbind('model.user.save.after', 'test')
def testPrivateUser(self):
"""
Make sure private users behave correctly.
"""
# Create an admin user
self.model('user').createUser(
firstName='Admin', lastName='Admin', login='admin',
email='admin@admin.com', password='adminadmin')
# Register a private user (non-admin)
pvt = self.model('user').createUser(
firstName='Guy', lastName='Noir', login='guynoir',
email='guy.noir@email.com', password='guynoir', public=False)
self.assertEqual(pvt['public'], False)
folder = six.next(self.model('folder').childFolders(
parentType='user', parent=pvt))
# Private users should be able to upload files
resp = self.request(path='/item', method='POST', user=pvt, params={
'name': 'foo.txt',
'folderId': folder['_id']
})
self.assertStatusOk(resp)
itemId = resp.json['_id']
resp = self.request(
path='/file', method='POST', user=pvt, params={
'parentType': 'item',
'parentId': itemId,
'name': 'foo.txt',
'size': 5,
'mimeType': 'text/plain'
})
self.assertStatusOk(resp)
fields = [('offset', 0), ('uploadId', resp.json['_id'])]
files = [('chunk', 'foo.txt', 'hello')]
resp = self.multipartRequest(
path='/file/chunk', user=pvt, fields=fields, files=files)
self.assertStatusOk(resp)
self.assertEqual(resp.json['itemId'], itemId)
| {
"content_hash": "f4b79b7797bec06c34ddaa193ec37edd",
"timestamp": "",
"source": "github",
"line_count": 651,
"max_line_length": 80,
"avg_line_length": 39.50230414746544,
"alnum_prop": 0.5694898117903251,
"repo_name": "chrismattmann/girder",
"id": "e15b139777e51fc978fa8584ffc07f427f379847",
"size": "26510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cases/user_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "36635"
},
{
"name": "CSS",
"bytes": "156740"
},
{
"name": "HTML",
"bytes": "161646"
},
{
"name": "JavaScript",
"bytes": "1358011"
},
{
"name": "Mako",
"bytes": "1483"
},
{
"name": "Python",
"bytes": "1202964"
},
{
"name": "Ruby",
"bytes": "9923"
},
{
"name": "Shell",
"bytes": "3298"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class AuthConfig(AppConfig):
name = 'daiquiri.auth'
label = 'daiquiri_auth'
verbose_name = 'User Profiles'
def ready(self):
from . import handlers
| {
"content_hash": "66aeffbd5cf9882fcac46cf7d687ecc7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 34,
"avg_line_length": 20.8,
"alnum_prop": 0.6682692307692307,
"repo_name": "aipescience/django-daiquiri",
"id": "977e85920f0c238c1d187199c8478da4b847d890",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daiquiri/auth/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "28598"
},
{
"name": "HTML",
"bytes": "236579"
},
{
"name": "JavaScript",
"bytes": "97087"
},
{
"name": "Python",
"bytes": "602159"
}
],
"symlink_target": ""
} |
"""
Pyinvoke tasks.py file for automating releases and admin stuff.
Author: Shyue Ping Ong
"""
import datetime
import glob
import json
import os
import re
import subprocess
import webbrowser
import requests
from invoke import task
from monty.os import cd
from pymatgen.core import __version__ as CURRENT_VER
@task
def make_doc(ctx):
"""
Generate API documentation + run Sphinx.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-{3,}", contents)
n = len(toks[0].split()[-1])
changes = [toks[0]]
changes.append("\n" + "\n".join(toks[1].strip().split("\n")[0:-1]))
changes = ("-" * n).join(changes)
with open("docs_rst/latest_changes.rst", "w") as f:
f.write(changes)
with cd("docs_rst"):
ctx.run("cp ../CHANGES.rst change_log.rst")
ctx.run("rm pymatgen.*.rst", warn=True)
ctx.run("sphinx-apidoc --implicit-namespaces --separate -d 7 -o . -f ../pymatgen")
ctx.run("rm *.tests.*rst")
for f in glob.glob("*.rst"):
if f.startswith("pymatgen") and f.endswith("rst"):
newoutput = []
suboutput = []
subpackage = False
with open(f, "r") as fid:
for line in fid:
clean = line.strip()
if clean == "Subpackages":
subpackage = True
if not subpackage and not clean.endswith("tests"):
newoutput.append(line)
else:
if not clean.endswith("tests"):
suboutput.append(line)
if clean.startswith("pymatgen") and not clean.endswith("tests"):
newoutput.extend(suboutput)
subpackage = False
suboutput = []
with open(f, "w") as fid:
fid.write("".join(newoutput))
ctx.run("make html")
ctx.run("cp _static/* ../docs/html/_static", warn=True)
with cd("docs"):
ctx.run("rm *.html", warn=True)
ctx.run("cp -r html/* .", warn=True)
ctx.run("rm -r html", warn=True)
ctx.run("rm -r doctrees", warn=True)
ctx.run("rm -r _sources", warn=True)
ctx.run("rm -r _build", warn=True)
# This makes sure pymatgen.org works to redirect to the Github page
ctx.run('echo "pymatgen.org" > CNAME')
# Avoid the use of jekyll so that _dir works as intended.
ctx.run("touch .nojekyll")
@task
def make_dash(ctx):
"""
Make customized doc version for Dash
:param ctx:
"""
ctx.run("cp docs_rst/conf-docset.py docs_rst/conf.py")
make_doc(ctx)
ctx.run("rm docs/_static/pymatgen.docset.tgz", warn=True)
ctx.run("doc2dash docs -n pymatgen -i docs/_images/pymatgen.png -u https://pymatgen.org/")
plist = "pymatgen.docset/Contents/Info.plist"
xml = []
with open(plist, "rt") as f:
for l in f:
xml.append(l.strip())
if l.strip() == "<dict>":
xml.append("<key>dashIndexFilePath</key>")
xml.append("<string>index.html</string>")
with open(plist, "wt") as f:
f.write("\n".join(xml))
ctx.run('tar --exclude=".DS_Store" -cvzf pymatgen.tgz pymatgen.docset')
# xml = []
# with open("docs/pymatgen.xml") as f:
# for l in f:
# l = l.strip()
# if l.startswith("<version>"):
# xml.append("<version>%s</version>" % version)
# else:
# xml.append(l)
# with open("docs/pymatgen.xml", "wt") as f:
# f.write("\n".join(xml))
ctx.run("rm -r pymatgen.docset")
ctx.run("cp docs_rst/conf-normal.py docs_rst/conf.py")
@task
def contribute_dash(ctx, version):
make_dash(ctx)
ctx.run("cp pymatgen.tgz ../Dash-User-Contributions/docsets/pymatgen/pymatgen.tgz")
with cd("../Dash-User-Contributions/docsets/pymatgen"):
with open("docset.json", "rt") as f:
data = json.load(f)
data["version"] = version
with open("docset.json", "wt") as f:
json.dump(data, f, indent=4)
ctx.run('git commit --no-verify -a -m "Update to v%s"' % version)
ctx.run("git push")
ctx.run("rm pymatgen.tgz")
@task
def submit_dash_pr(ctx, version):
with cd("../Dash-User-Contributions/docsets/pymatgen"):
payload = {
"title": "Update pymatgen docset to v%s" % version,
"body": "Update pymatgen docset to v%s" % version,
"head": "Dash-User-Contributions:master",
"base": "master",
}
response = requests.post(
"https://api.github.com/repos/materialsvirtuallab/Dash-User-Contributions/pulls", data=json.dumps(payload)
)
print(response.text)
@task
def update_doc(ctx):
"""
Update the web documentation.
:param ctx:
"""
ctx.run("cp docs_rst/conf-normal.py docs_rst/conf.py")
make_doc(ctx)
ctx.run("git add .")
ctx.run('git commit -a -m "Update docs"')
ctx.run("git push")
@task
def publish(ctx):
"""
Upload release to Pypi using twine.
:param ctx:
"""
ctx.run("rm dist/*.*", warn=True)
ctx.run("python setup.py sdist bdist_wheel")
ctx.run("twine upload dist/*")
@task
def set_ver(ctx, version):
with open("pymatgen/core/__init__.py", "rt") as f:
contents = f.read()
contents = re.sub(r"__version__ = .*\n", '__version__ = "%s"\n' % version, contents)
with open("pymatgen/core/__init__.py", "wt") as f:
f.write(contents)
with open("setup.py", "rt") as f:
contents = f.read()
contents = re.sub(r"version=([^,]+),", 'version="%s",' % version, contents)
with open("setup.py", "wt") as f:
f.write(contents)
@task
def release_github(ctx, version):
"""
Release to Github using Github API.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
payload = {
"tag_name": "v" + version,
"target_commitish": "master",
"name": "v" + version,
"body": desc,
"draft": False,
"prerelease": False,
}
response = requests.post(
"https://api.github.com/repos/materialsproject/pymatgen/releases",
data=json.dumps(payload),
headers={"Authorization": "token " + os.environ["GITHUB_RELEASES_TOKEN"]},
)
print(response.text)
@task
def post_discourse(ctx, version):
"""
Post release announcement to http://discuss.matsci.org/c/pymatgen.
:param ctx:
"""
with open("CHANGES.rst") as f:
contents = f.read()
toks = re.split(r"\-+", contents)
desc = toks[1].strip()
toks = desc.split("\n")
desc = "\n".join(toks[:-1]).strip()
raw = "v" + version + "\n\n" + desc
payload = {
"topic_id": 36,
"raw": raw,
}
response = requests.post(
"https://discuss.matsci.org/c/pymatgen/posts.json",
data=payload,
params={"api_username": os.environ["DISCOURSE_API_USERNAME"], "api_key": os.environ["DISCOURSE_API_KEY"]},
)
print(response.text)
@task
def update_changelog(ctx, version):
"""
Create a preliminary change log using the git logs.
:param ctx:
"""
output = subprocess.check_output(["git", "log", "--pretty=format:%s", "v%s..HEAD" % CURRENT_VER])
lines = ["* " + l for l in output.decode("utf-8").strip().split("\n")]
with open("CHANGES.rst") as f:
contents = f.read()
l = "=========="
toks = contents.split(l)
head = "\n\nv%s\n" % version + "-" * (len(version) + 1) + "\n"
toks.insert(-1, head + "\n".join(lines))
with open("CHANGES.rst", "w") as f:
f.write(toks[0] + l + "".join(toks[1:]))
ctx.run("open CHANGES.rst")
@task
def release(ctx, version, nodoc=False):
"""
Run full sequence for releasing pymatgen.
:param ctx:
:param nodoc: Whether to skip doc generation.
"""
ctx.run("rm -r dist build pymatgen.egg-info", warn=True)
set_ver(ctx, version)
if not nodoc:
make_doc(ctx)
ctx.run("git add .")
ctx.run('git commit -a -m "Update docs"')
ctx.run("git push")
release_github(ctx, version)
# post_discourse(ctx, warn=True)
@task
def open_doc(ctx):
"""
Open local documentation in web browser.
:param ctx:
"""
pth = os.path.abspath("docs/_build/html/index.html")
webbrowser.open("file://" + pth)
@task
def lint(ctx):
for cmd in ["pycodestyle", "mypy", "flake8", "pydocstyle"]:
ctx.run("%s pymatgen" % cmd)
| {
"content_hash": "96e0cdf7e529f03f70bf303810478981",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 118,
"avg_line_length": 29.443708609271525,
"alnum_prop": 0.5467836257309941,
"repo_name": "richardtran415/pymatgen",
"id": "5d3257c7495bf0fed6bbef9a89b727bb23399714",
"size": "8892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6783497"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
} |
import string
from django.contrib import messages
from django.shortcuts import render
from django.views import generic
from . import models
from target.target import makeTarget
from target.utils import saveTargetImage
from django import forms
from django.http import HttpResponseRedirect, Http404
from django.urls import reverse, reverse_lazy
from django.contrib.auth.mixins import PermissionRequiredMixin
class TargetList(generic.ListView):
model = models.Target
paginate_by = 10
def get_queryset(self):
if self.request.user.has_perm("target.change_target"):
return models.Target.objects.all()
else:
return models.Target.objects.published()
class TargetDetail(generic.DetailView):
model = models.Target
def get_object(self):
puzzle = super().get_object()
if puzzle.is_published() or \
self.request.user.has_perm("target.change_target"):
return puzzle
else:
raise Http404
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
number = self.object.number
if number > 0:
p = models.Target.objects.published().filter(number__lt=number).first()
context['previous_target'] = p
n = models.Target.objects.published().filter(number__gt=number).last()
context['next_target'] = n
return context
def is_valid_word(word, letters):
for c in word:
if word.count(c) > letters.count(c):
return False
return True
def validate_form(words, letters):
msg = ""
# Exactly 9 letters
if len(letters) != 9:
return {
"status": False,
"msg": "Must be exactly 9 letters",
"field": "letters"
}
# Check for invalid letters
l = [l for l in letters if not (l in string.ascii_lowercase)]
if len(l) > 0:
return {
"status": False,
"msg": "Invalid letters: " + ','.join(l),
"field": "letters"
}
words = words.split("\r\n")
# Check all words contain bullseye
w = [w for w in words if letters[0] not in w]
if len(w) > 0:
return {
"status": False,
"msg": "Words without bullseye: " + ','.join(w),
"field": "words"
}
# Check all words only contain target letters
w = [w for w in words if not is_valid_word(w, letters)]
if len(w) > 0:
return {
"status": False,
"msg": "Words with wrong letters: " + ','.join(w),
"field": "words"
}
# Check for one 9-letter word
if len([w for w in words if len(w) == 9]) != 1:
return {
"status": False,
"msg": "There must be exactly one 9-letter word",
"field": "words"
}
# Check for short words
w = [w for w in words if len(w) < 4]
if len(w):
return {
"status": False,
"msg": "Shorter than 4 letters: " + ','.join(w),
"field": "words"
}
return {
"status": True,
"error": None,
"field": None
}
class TargetUpdate(PermissionRequiredMixin, generic.edit.UpdateView):
permission_required = 'target.change_target'
model = models.Target
fields = [ 'letters', 'words', 'published', 'publish_solution_after',
'clue', 'tweet_text',
'rules', ]
def post(self, request, *args, **kwargs):
if "cancel" in request.POST:
return HttpResponseRedirect(reverse_lazy('target:list'))
elif "delete" in request.POST:
pk = self.get_object().pk
return HttpResponseRedirect(reverse_lazy('target:delete', args=(pk,)))
else:
return super().post(request, *args, **kwargs)
def form_valid(self, form):
form.instance.words = '\r\n'.join(sorted(set(form.instance.words.
split("\r\n"))))
validation = validate_form(form.instance.words,
form.instance.letters)
if validation["status"] == False:
form.add_error(validation["field"], validation["msg"])
return self.form_invalid(form)
if self.request.method == 'POST':
fv = super().form_valid(form)
saveTargetImage(list(self.object.letters.upper()), self.object.pk)
messages.add_message(self.request, messages.INFO, "Target saved")
if self.request.POST.get("save_continue"):
return HttpResponseRedirect(reverse_lazy("target:update",
args=(self.object.pk,)))
else:
return fv
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.object:
try:
context['nine_letter_word'] = [w for w in
self.object.words.split("\r\n")
if len(w) == 9][0]
except:
context['nine_letter_word'] = ""
messages.add_message(self.request, messages.WARNING,
"No nine letter found")
return context
class TargetCreate(TargetUpdate):
permission_required = 'target.add_target'
nine_letter_word = ""
def get_initial(self):
if 'letters' in self.kwargs:
puzzle = makeTarget(user_letters=self.kwargs['letters'])
else:
puzzle = makeTarget()
self.nine_letter_word = puzzle['target']
return {'letters': ''.join(puzzle['letters']),
'bullseye': puzzle['letters'][0],
'words': '\n'.join(puzzle['words'])}
def get_object(self, queryset=None):
try:
return super().get_object(queryset)
except AttributeError:
return None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['nine_letter_word'] = self.nine_letter_word
return context
class TargetDelete(PermissionRequiredMixin, generic.edit.DeleteView):
permission_required = 'target.delete_target'
model = models.Target
success_url = reverse_lazy('target:list')
| {
"content_hash": "7dbe7a7cc63c73b598ddfe97fb4e8373",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 83,
"avg_line_length": 32.60406091370558,
"alnum_prop": 0.5536353728787171,
"repo_name": "groundupnews/gu",
"id": "d41c886ad4ccf62c086fcaa613d954e342976ed9",
"size": "6423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "target/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "222991"
},
{
"name": "HTML",
"bytes": "563742"
},
{
"name": "JavaScript",
"bytes": "790912"
},
{
"name": "PHP",
"bytes": "2275"
},
{
"name": "Python",
"bytes": "598998"
},
{
"name": "Roff",
"bytes": "888"
},
{
"name": "Shell",
"bytes": "803"
},
{
"name": "XSLT",
"bytes": "870"
}
],
"symlink_target": ""
} |
from django import forms
from OpenDataCatalog.suggestions.models import *
class SuggestionForm(forms.Form):
text = forms.CharField(widget=forms.Textarea(), max_length=255, label="My Nomination")
| {
"content_hash": "15773775c204bec3f297fc4f4a3d7ea4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 90,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.7621359223300971,
"repo_name": "azavea/Open-Data-Catalog",
"id": "5b0a7538f153758d4a1120c67b46d1d5b2d5113e",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenDataCatalog/suggestions/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "45"
},
{
"name": "CSS",
"bytes": "14826"
},
{
"name": "JavaScript",
"bytes": "36551"
},
{
"name": "Python",
"bytes": "136469"
}
],
"symlink_target": ""
} |
"""
Utility functions for reading MITgcm mds files (.meta / .data)
"""
# python 3 compatiblity
from __future__ import print_function, division
import re
import os
import numpy as np
import warnings
from functools import reduce
from dask import delayed
import dask.array as dsa
from dask.base import tokenize
def parse_meta_file(fname):
"""Get the metadata as a dict out of the MITgcm mds .meta file.
PARAMETERS
----------
fname : str
Path to the .meta file
RETURNS
-------
flds : dict
Metadata in dictionary form.
"""
flds = {}
basename = re.match('(^.+?)\..+', os.path.basename(fname)).groups()[0]
flds['basename'] = basename
with open(fname) as f:
text = f.read()
# split into items
for item in re.split(';', text):
# remove whitespace at beginning
item = re.sub('^\s+', '', item)
match = re.match('(\w+) = (\[|\{)(.*)(\]|\})', item, re.DOTALL)
if match:
key, _, value, _ = match.groups()
# remove more whitespace
value = re.sub('^\s+', '', value)
value = re.sub('\s+$', '', value)
# print key,':', value
flds[key] = value
# now check the needed things are there
needed_keys = ['dimList', 'nDims', 'nrecords', 'dataprec']
for k in needed_keys:
assert k in flds
# transform datatypes
flds['nDims'] = int(flds['nDims'])
flds['nrecords'] = int(flds['nrecords'])
# endianness is set by _read_mds
flds['dataprec'] = np.dtype(re.sub("'", '', flds['dataprec']))
flds['dimList'] = [[int(h) for h in
re.split(',', g)] for g in
re.split(',\n', flds['dimList'])]
if 'fldList' in flds:
flds['fldList'] = [re.match("'*(\w+)", g).groups()[0] for g in
re.split("'\s+'", flds['fldList'])]
assert flds['nrecords'] == len(flds['fldList'])
return flds
def _get_useful_info_from_meta_file(metafile):
# why does the .meta file contain so much repeated info?
# Here we just get the part we need
# and reverse order (numpy uses C order, mds is fortran)
meta = parse_meta_file(metafile)
shape = [g[0] for g in meta['dimList']][::-1]
assert len(shape) == meta['nDims']
# now add an extra for number of recs
nrecs = meta['nrecords']
shape.insert(0, nrecs)
dtype = meta['dataprec']
if 'fldList' in meta:
fldlist = meta['fldList']
name = fldlist[0]
else:
name = meta['basename']
fldlist = None
return nrecs, shape, name, dtype, fldlist
def read_mds(fname, iternum=None, use_mmap=True, force_dict=True, endian='>',
shape=None, dtype=None, dask_delayed=True, llc=False,
llc_method="smallchunks"):
"""Read an MITgcm .meta / .data file pair
PARAMETERS
----------
fname : str
The base name of the data file pair (without a .data or .meta suffix)
iternum : int, optional
The iteration number suffix
use_mmap : bool, optional
Whether to read the data using a numpy.memmap
force_dict : bool, optional
Whether to return a dictionary of ``{varname: data}`` pairs
endian : {'>', '<', '|'}, optional
Dndianness of the data
dtype : numpy.dtype, optional
Data type of the data (will be inferred from the .meta file by default)
shape : tuple, optional
Shape of the data (will be inferred from the .meta file by default)
dask_delayed : bool, optional
Whether wrap the reading of the raw data in a ``dask.delayed`` object
llc : bool, optional
Whether the data is from an LLC geometry
llc_method : {'smalchunks', 'bigchunks'}
Which routine to use for reading raw LLC. "smallchunks" splits the file
into a individual dask chunk of size (nx x nx) for each face of each
level (i.e. the total number of chunks is 13 * nz). "bigchunks" loads
the whole raw data file (either into memory or as a numpy.memmap),
splits it into faces, and concatenates those faces together using
``dask.array.concatenate``. The different methods will have different
memory and i/o performance depending on the details of the system
configuration.
RETURNS
-------
data : dict
The keys correspond to the variable names of the different variables in
the data file. The values are the data itself, either as an
``numpy.ndarray``, ``numpy.memmap``, or ``dask.array.Array`` depending
on the options selected.
"""
if iternum is None:
istr = ''
else:
assert isinstance(iternum, int)
istr = '.%010d' % iternum
datafile = fname + istr + '.data'
metafile = fname + istr + '.meta'
# get metadata
try:
nrecs, shape, name, dtype, fldlist = _get_useful_info_from_meta_file(metafile)
dtype = dtype.newbyteorder(endian)
except IOError:
# we can recover from not having a .meta file if dtype and shape have
# been specified already
if shape is None:
raise IOError("Cannot find the shape associated to %s in the metadata." %fname)
elif dtype is None:
raise IOError("Cannot find the dtype associated to %s in the metadata, "
"please specify the default dtype to avoid this error." %fname)
else:
nrecs = 1
shape = list(shape)
shape.insert(0, nrecs)
name = os.path.basename(fname)
# TODO: refactor overall logic of the code below
# this will exclude vertical profile files
if llc and shape[-1]>1:
# remeberer that the first dim is nrec
if len(shape)==4:
_, nz, ny, nx = shape
else:
_, ny, nx = shape
nz = 1
if llc_method=='bigchunks' and (not use_mmap):
# this would load a ton of data... need to delay it
d = dsa.from_delayed(
delayed(read_3d_llc_data)(datafile, nz, nx, dtype=dtype,
memmap=memmap, nrecs=nrecs, method=llc_method)
)
else:
if llc_method=='smallchunks':
use_mmap=False
d = read_3d_llc_data(datafile, nz, nx, dtype=dtype, memmap=use_mmap,
nrecs=nrecs, method=llc_method)
elif dask_delayed:
d = dsa.from_delayed(
delayed(read_raw_data)(datafile, dtype, shape, use_mmap=use_mmap),
shape, dtype
)
else:
d = read_raw_data(datafile, dtype, shape, use_mmap=use_mmap)
if nrecs == 1:
if force_dict:
return {name: d[0]}
else:
return d[0]
else:
# need record names
out = {}
for n, name in enumerate(fldlist):
out[name] = d[n]
return out
def read_raw_data(datafile, dtype, shape, use_mmap=False):
"""Read a raw binary file and shape it.
PARAMETERS
----------
datafile : str
Path to a .data file
dtype : numpy.dtype
Data type of the data
shape : tuple
Shape of the data
use_memmap : bool, optional
Whether to read the data using a numpy.memmap
RETURNS
-------
data : numpy.ndarray
The data (or a memmap to it)
"""
#print("Reading raw data in %s" % datafile)
# first check to be sure there is the right number of bytes in the file
number_of_values = reduce(lambda x, y: x * y, shape)
expected_number_of_bytes = number_of_values * dtype.itemsize
actual_number_of_bytes = os.path.getsize(datafile)
if expected_number_of_bytes != actual_number_of_bytes:
raise IOError('File `%s` does not have the correct size '
'(expected %g, found %g)' %
(datafile,
expected_number_of_bytes,
actual_number_of_bytes))
if use_mmap:
# print("Reading %s using memmap" % datafile)
d = np.memmap(datafile, dtype, 'r')
else:
# print("Reading %s using fromfile" % datafile)
d = np.fromfile(datafile, dtype)
d.shape = shape
return d
def parse_available_diagnostics(fname, layers={}):
"""Examine the available_diagnostics.log file and translate it into
useful variable metadata.
PARAMETERS
----------
fname : str or buffer
the path to the diagnostics file or a file buffer
layers : dict (optional)
dictionary mapping layers names to dimension sizes
RETURNS
-------
all_diags : a dictionary keyed by variable names with values
(coords, description, units)
"""
all_diags = {}
diag_id_lookup = {}
mate_lookup = {}
# mapping between the available_diagnostics.log codes and the actual
# coordinate names
# http://mitgcm.org/public/r2_manual/latest/online_documents/node268.html
xcoords = {'U': 'i_g', 'V': 'i', 'M': 'i', 'Z': 'i_g'}
ycoords = {'U': 'j', 'V': 'j_g', 'M': 'j', 'Z': 'j_g'}
rcoords = {'M': 'k', 'U': 'k_u', 'L': 'k_l'}
# need to be able to accept string filename or buffer
def process_buffer(f):
for l in f:
# will automatically skip first four header lines
c = re.split('\|', l)
if len(c) == 7 and c[0].strip() != 'Num':
# parse the line to extract the relevant variables
key = c[1].strip()
diag_id = int(c[0].strip())
diag_id_lookup[diag_id] = key
levs = int(c[2].strip())
mate = c[3].strip()
if mate:
mate = int(mate)
mate_lookup[key] = mate
code = c[4]
units = c[5].strip()
desc = c[6].strip()
# decode what those variables mean
hpoint = code[1]
rpoint = code[8]
xycoords = [ycoords[hpoint], xcoords[hpoint]]
rlev = code[9]
if rlev == '1' and levs == 1:
zcoord = []
elif rlev == 'R':
zcoord = [rcoords[rpoint]]
elif rlev == 'X' and layers:
layer_name = key.ljust(8)[-4:].strip()
n_layers = layers[layer_name]
if levs == n_layers:
suffix = 'bounds'
elif levs == (n_layers-1):
suffix = 'center'
elif levs == (n_layers-2):
suffix = 'interface'
else:
suffix = None
warnings.warn("Could not match rlev = %g to a layers"
"coordiante" % rlev)
# dimname = ('layer_' + layer_name + '_' + suffix if suffix
dimname = (('l' + layer_name[0] + '_' + suffix[0]) if suffix
else '_UNKNOWN_')
zcoord = [dimname]
else:
warnings.warn("Not sure what to do with rlev = " + rlev)
zcoord = ['_UNKNOWN_']
coords = zcoord + xycoords
all_diags[key] = dict(dims=coords,
# we need a standard name
attrs={'standard_name': key,
'long_name': desc,
'units': units})
try:
with open(fname) as f:
process_buffer(f)
except TypeError:
process_buffer(fname)
# add mate information
for key, mate_id in mate_lookup.items():
all_diags[key]['attrs']['mate'] = diag_id_lookup[mate_id]
return all_diags
# stuff related to llc mds file structure
LLC_NUM_FACES=13
facet_strides = ((0,3), (3,6), (6,7), (7,10), (10,13))
facet_orders = ('C', 'C', 'C', 'F', 'F')
face_facets = [0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4]
face_offsets = [0, 1, 2, 0, 1, 2, 0, 0, 1, 2, 0, 1, 2]
transpose_face = [False, False, False, False, False, False, False,
True, True, True, True, True, True]
def _read_2d_facet(fname, nfacet, nlev, nx, dtype='>f8', memmap=True):
# make sure we have a valid dtype
dtype = np.dtype(dtype)
nbytes = dtype.itemsize
# where the facet starts in the file
facet_offset = facet_strides[nfacet][0] * nx * nx * nbytes
level_offset = LLC_NUM_FACES * nx * nx * nbytes * nlev
offset = facet_offset + level_offset
# the array order of the facet
facet_order = facet_orders[nfacet]
# the size shape of the facet
facet_ny = (facet_strides[nfacet][1] - facet_strides[nfacet][0])*nx
facet_shape = (facet_ny, nx)
facet_nitems = facet_ny * nx
with open(fname, 'rb') as f:
#print("Reading %s facet %g nlev %g" % (fname, nfacet, nlev))
if memmap:
data = np.memmap(f, dtype=dtype, mode='r', offset=offset,
shape=facet_shape, order=facet_order)
else:
f.seek(offset)
data = np.fromfile(f, dtype=dtype, count=facet_nitems)
data = data.reshape(facet_shape, order=facet_order)
return data
def _read_2d_face(fname, nface, nlev, nx, dtype='>f8', memmap=True):
# make sure we have a valid dtype
nfacet = face_facets[nface]
face_slice = slice(nx*face_offsets[nface], nx*(face_offsets[nface]+1))
facet_offset = nx * face_offsets[nface]
data_facet = _read_2d_facet(fname, nfacet, nlev, nx,
dtype=dtype, memmap=memmap)
data = data_facet[face_slice]
if transpose_face[nface]:
data = data.T
return data
# manually construct dask graph
def read_3d_llc_data(fname, nz, nx, dtype='>f8', memmap=True, nrecs=1,
method="smallchunks"):
"""Read a three-dimensional LLC file using a custom dask graph.
PARAMETERS
----------
fname : string
Path to the file on disk
nz : int
Number of vertical levels
nx : int
Size of each face side dimension
dtype : np.dtype, optional
Datatype of the data
memmap : bool, optional
Whether to read the data using np.memmap. Forced to be ``False`` for
``method="smallchunks"``.
nrecs : int, optional
The number of records in a multi-record file
method : {"smallchunks", "bigchunks"}, optional
Which routine to use for reading raw LLC. "smallchunks" splits the file
into a individual dask chunk of size (nx x nx) for each face of each
level (i.e. the total number of chunks is 13 * nz). "bigchunks" loads
the whole raw data file (either into memory or as a numpy.memmap),
splits it into faces, and concatenates those faces together using
``dask.array.concatenate``. The different methods will have different
memory and i/o performance depending on the details of the system
configuration.
RETURNS
-------
data : dask.array.Array
The data
"""
dtype=np.dtype(dtype)
if method=="smallchunks":
def load_chunk(nface, nlev):
return _read_2d_face(fname, nface, nlev, nx,
dtype=dtype, memmap=memmap)[None, None, None]
chunks = (1, 1, 1, nx, nx)
shape = (nrecs, nz, LLC_NUM_FACES, nx, nx)
name = 'llc-' + tokenize(fname) # unique identifier
# we hack the record number as extra vertical levels
dsk = {(name, nrec, nlev, nface, 0, 0): (load_chunk, nface,
nlev + nz*nrec)
for nface in range(LLC_NUM_FACES)
for nlev in range(nz)
for nrec in range(nrecs)}
data = dsa.Array(dsk, name, chunks, dtype=dtype, shape=shape)
elif method=="bigchunks":
shape = (nrecs, nz, LLC_NUM_FACES*nx, nx)
# the dimension that needs to be reshaped
jdim = 2
data = read_raw_data(fname, dtype, shape, use_mmap=memmap)
data= _reshape_llc_data(data, jdim)
# automatically squeeze off z dimension; this matches mds file behavior
if nz==1:
data = data[:,0]
return data
# a deprecated function that I can't bear to delete because it was painful to
# write
def _reshape_llc_data(data, jdim): # pragma: no cover
"""Fix the weird problem with llc data array order."""
# Can we do this without copying any data?
# If not, we need to go upstream and implement this at the MDS level
# Or can we fudge it with dask?
# this is all very specific to the llc file output
# would be nice to generalize more, but how?
nside = data.shape[jdim] // LLC_NUM_FACES
# how the LLC data is laid out along the j dimension
strides = ((0,3), (3,6), (6,7), (7,10), (10,13))
# whether to reshape each face
reshape = (False, False, False, True, True)
# this will slice the data into 5 facets
slices = [jdim * (slice(None),) + (slice(nside*st[0], nside*st[1]),)
for st in strides]
facet_arrays = [data[sl] for sl in slices]
face_arrays = []
for ar, rs, st in zip(facet_arrays, reshape, strides):
nfaces_in_facet = st[1] - st[0]
shape = list(ar.shape)
if rs:
# we assume the other horizontal dimension is immediately after jdim
shape[jdim] = ar.shape[jdim+1]
shape[jdim+1] = ar.shape[jdim]
# insert a length-1 dimension along which to concatenate
shape.insert(jdim, 1)
# this modify the array shape in place, with no copies allowed
# but it doesn't work with dask arrays
# ar.shape = shape
ar = ar.reshape(shape)
# now ar is propery shaped, but we still need to slice it into faces
face_slice_dim = jdim + 1 + rs
for n in range(nfaces_in_facet):
face_slice = (face_slice_dim * (slice(None),) +
(slice(nside*n, nside*(n+1)),))
data_face = ar[face_slice]
face_arrays.append(data_face)
# We can't concatenate using numpy (hcat etc.) because it makes a copy,
# presumably loading the memmaps into memory.
# Using dask gets around this.
# But what if we want different chunks, or already chunked the data
# upstream? Doesn't seem like this is ideal
# TODO: Refactor handling of dask arrays and chunking
#return np.concatenate(face_arrays, axis=jdim)
# the dask version doesn't work because of this:
# https://github.com/dask/dask/issues/1645
face_arrays_dask = [dsa.from_array(fa, chunks=fa.shape)
for fa in face_arrays]
concat = dsa.concatenate(face_arrays_dask, axis=jdim)
return concat
def _llc_face_shape(llc_id):
"""Given an integer identifier for the llc grid, return the face shape."""
# known valid LLC configurations
if llc_id in (90, 270, 1080, 2160, 4320):
return (llc_id, llc_id)
else:
raise ValueError("%g is not a valid llc identifier" % llc_id)
def _llc_data_shape(llc_id, nz=None):
"""Given an integer identifier for the llc grid, and possibly a number of
vertical grid points, return the expected shape of the full data field."""
# this is a constant for all LLC setups
NUM_FACES = 13
tile_shape = _llc_face_shape(llc_id)
data_shape = (NUM_FACES,) + face_shape
if nz is not None:
data_shape = (nz,) + data_shape
# should we accomodate multiple records?
# no, not in this function
return data_shape
| {
"content_hash": "162779fa97d819167d2c81ce31ff4d82",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 91,
"avg_line_length": 37.25897920604915,
"alnum_prop": 0.5636225266362253,
"repo_name": "sambarluc/xmitgcm",
"id": "dbabb2de3d48d8dfd2aa0d67ac103a6d89a9de69",
"size": "19710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmitgcm/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "129014"
},
{
"name": "Python",
"bytes": "138657"
}
],
"symlink_target": ""
} |
from microservice.tests.microservice_test_case import MicroserviceTestCase
from microservice.core import settings
from microservice.tests import microservices_for_testing
class TestZeroMode(MicroserviceTestCase):
@classmethod
def setUpClass(cls):
super(TestZeroMode, cls).setUpClass()
settings.deployment_mode = settings.DeploymentMode.ZERO
cls.args = (1, 2, 3)
cls.kwargs = {'a': 'asdf', 'b': 123}
def test_basic_request(self):
result = microservices_for_testing.echo_as_dict(
*self.args,
**self.kwargs
)
self.assertEqual(result, {'_args': self.args, **self.kwargs})
self.mocked_send_object_to_service.assert_not_called()
def test_nested_request(self):
result = microservices_for_testing.echo_as_dict2(
*self.args,
**self.kwargs
)
expected_result = (
{'_args': self.args, **self.kwargs},
{'_args': microservices_for_testing.echo_as_dict2_args,
**microservices_for_testing.echo_as_dict2_kwargs}
)
self.assertEqual(result, expected_result)
self.mocked_send_object_to_service.assert_not_called()
def test_exception_raised(self):
with self.assertRaises(RuntimeError):
microservices_for_testing.exception_raiser()
| {
"content_hash": "c0ada6e3fb8080604ca5865ecc716709",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 33.875,
"alnum_prop": 0.6369003690036901,
"repo_name": "MartinHowarth/microservice",
"id": "edd325cba1fd9d08556b3830c0a4c925a843e9f9",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "microservice/tests/test_zero_mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105842"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
__all__ = ['Project', 'Experiment', 'Result', 'Variation', 'Goal', 'Audience']
import json
import urllib
class ResourceGenerator(object):
def __init__(self, client=None, resource=None):
if client is None:
raise ValueError('Must specify client.')
if resource is None:
raise ValueError('Must specify resource.')
self.client = client
self.resource = resource
def get(self, optimizely_ids=None):
if not optimizely_ids:
return self.resource.list(client=self.client)
elif type(optimizely_ids) == int:
instance = self.resource(self.client, optimizely_id=optimizely_ids)
instance.refresh()
return instance
elif type(optimizely_ids) == list:
response_list = []
for optimizely_id in optimizely_ids:
response_list.append(self.get(optimizely_id))
return response_list
def create(self, data):
return self.resource.create(data, self.client)
def update(self, rid, data):
return self.resource.update(rid, data, self.client)
class APIObject(object):
def __init__(self, client, optimizely_id=None):
self.client = client
if optimizely_id:
self.id = optimizely_id
self.refresh()
def refresh(self):
if not hasattr(self, 'id'):
raise AttributeError('%s object has no ID, so it cannot be refreshed' % self.class_name())
self._refresh_from(self.client.request('get', [self.class_url(), self.id]))
def _refresh_from(self, params):
for k, v in params.iteritems():
self.__setattr__(k, v)
@classmethod
def class_name(cls):
if cls == APIObject:
raise NotImplementedError(
'APIObject is an abstract class. You should perform '
'actions on its subclasses (e.g. Project, Experiment)')
return cls.__name__.lower()
@classmethod
def class_url(cls):
return '%ss' % cls.class_name()
def get_child_objects(self, resource):
resp = []
for li in self.client.request('get', [self.class_url(), self.id, resource.class_url()]):
e = resource(self.client)
e._refresh_from(li)
resp.append(e)
return resp
class ListableObject(APIObject):
@classmethod
def list(cls, client):
resp = []
for li in client.request('get', [cls.class_url()]):
e = cls(client)
e._refresh_from(li)
resp.append(e)
return resp
class CreatableObject(APIObject):
@classmethod
def create(cls, data, client):
instance = cls(client)
instance._refresh_from(client.request('post', [cls.class_url()], data=json.dumps(data),
headers={'Content-Type': 'application/json'}))
return instance
class CreatableChildObject(APIObject):
parent_resource = None
@classmethod
def create(cls, data, client):
instance = cls(client)
instance._refresh_from(client.request('post', [cls.parent_resource.class_url(),
data['%s_id' % cls.parent_resource.class_name()],
cls.class_url()],
data=json.dumps(data),
headers={'Content-Type': 'application/json'}))
return instance
class UpdatableObject(APIObject):
editable_fields = []
def save(self):
self._refresh_from(self.update(self.id, self.__dict__, self.client).__dict__)
@classmethod
def update(cls, rid, data, client):
updates = {}
for k, v in data.iteritems():
if k in cls.editable_fields:
updates[k] = v
resp = client.request('put', [cls.class_url(), rid], data=json.dumps(updates),
headers={'Content-Type': 'application/json'})
instance = cls(client)
instance._refresh_from(resp)
return instance
class DeletableObject(APIObject):
def delete(self):
self.client.request('delete', [self.class_url(), self.id])
class Project(ListableObject, CreatableObject, UpdatableObject):
editable_fields = ['ip_filter',
'include_jquery',
'project_name',
'project_status']
def experiments(self):
return self.get_child_objects(Experiment)
def goals(self):
return self.get_child_objects(Goal)
def audiences(self):
return self.get_child_objects(Audience)
def dimensions(self):
return self.get_child_objects(Dimension)
class Experiment(CreatableChildObject, UpdatableObject, DeletableObject):
parent_resource = Project
editable_fields = ['audience_ids',
'activation_mode',
'description',
'edit_url',
'status',
'custom_css',
'custom_js',
'percentage_included',
'url_conditions']
def results(self):
return self.get_child_objects(Result)
def variations(self):
return self.get_child_objects(Variation)
def schedules(self):
return self.get_child_objects(Schedule)
def add_goal(self, gid):
goal = Goal(self.client, gid)
experiment_ids = set(goal.experiment_ids)
experiment_ids.add(self.id)
goal.experiment_ids = list(experiment_ids)
return goal.save()
def remove_goal(self, gid):
goal = Goal(self.client, gid)
goal.refresh()
experiment_ids = set(goal.experiment_ids)
experiment_ids.remove(self.id)
goal.experiment_ids = list(experiment_ids)
return goal.save()
class Result(APIObject):
pass
class Variation(CreatableChildObject, UpdatableObject, DeletableObject):
parent_resource = Experiment
editable_fields = ['description',
'is_paused',
'js_component',
'weight']
class Goal(CreatableChildObject, UpdatableObject, DeletableObject):
parent_resource = Project
editable_fields = ['addable',
'experiment_ids',
'goal_type',
'selector',
'target_to_experiments',
'target_urls',
'target_url_match_types',
'title',
'urls',
'url_match_types']
class Audience(CreatableChildObject, UpdatableObject):
parent_resource = Project
editable_fields = ['name',
'description',
'conditions',
'segmentation']
class Dimension(CreatableChildObject, UpdatableObject, DeletableObject):
parent_resource = Project
editable_fields = ['name',
'client_api_name',
'description']
class Schedule(CreatableChildObject, UpdatableObject, DeletableObject):
parent_resource = Experiment
editable_fields = ['start_time',
'stop_time']
| {
"content_hash": "2f918ce2354638574763ce2d16922286",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 104,
"avg_line_length": 30.081632653061224,
"alnum_prop": 0.5527815468113976,
"repo_name": "wlowry88/optimizely-client-python",
"id": "1b114f10d8e57e33272e4052f2ec294aeb14b2bf",
"size": "7370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optimizely/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12319"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import tensorflow as tf
WIDTH = HEIGHT = 8
CHANNELS = 12
FILTERS = 10
KERNEL_SIZE = 3
STRIDES = [1, 1]
PADDING = 'SAME'
DENSE_HIDDEN_UNITS = 2048
KAPPA = 10.0 # Emphasizes f(p) = -f(q)
class ChessConvolutionalNetwork(object):
def __init__(self, learning_rate, adam_epsilon):
with tf.variable_scope("input"):
self.X = self._get_input('X')
self.X_parent = self._get_input('X_parent')
self.X_observed = self._get_input('X_observed')
self.X_random = self._get_input('X_random')
with tf.variable_scope("f_p"):
self.training = tf.placeholder_with_default(False, shape=(), name='training')
self.f = self._get_evaluation_function(self.X)
tf.get_variable_scope().reuse_variables()
self.f_parent = self._get_evaluation_function(self.X_parent)
self.f_observed = self._get_evaluation_function(self.X_observed)
self.f_random = self._get_evaluation_function(self.X_random)
with tf.name_scope('loss'):
self.loss = self._get_loss()
with tf.name_scope('train'):
self.training_op = self._get_training_op(learning_rate, adam_epsilon)
def train(self, session, X_parent, X_observed, X_random):
session.run(self.training_op, feed_dict={
self.X_parent: X_parent,
self.X_observed: X_observed,
self.X_random: X_random,
self.training: True,
})
def compute_loss(self, session, X_parent, X_observed, X_random, detailed=False):
if not detailed:
return session.run(self.loss, feed_dict={
self.X_parent: X_parent,
self.X_observed: X_observed,
self.X_random: X_random,
})
else:
return session.run([self.loss_a, self.loss_b, self.loss_c, self.loss], feed_dict={
self.X_parent: X_parent,
self.X_observed: X_observed,
self.X_random: X_random,
})
def evaluate(self, session, X):
return session.run(self.f, feed_dict={
self.X: X
})
def _get_input(self, name):
return tf.placeholder(tf.float32, shape=(None, WIDTH, HEIGHT, CHANNELS), name=name)
def _get_evaluation_function(self, X):
conv1 = self._get_convolutional_layer(X, 'conv1')
conv2 = self._get_convolutional_layer(conv1, 'conv2')
conv2_flat = self._reshape_conv_layer(conv2)
dense = tf.layers.dense(conv2_flat, DENSE_HIDDEN_UNITS, activation=tf.nn.relu, name='dense')
dense_dropout = tf.layers.dropout(dense, rate=0.5, training=self.training)
output = tf.layers.dense(dense_dropout, 1, activation=None, name='output')
return output
def _get_convolutional_layer(self, input, name):
return tf.layers.conv2d(
input,
filters=FILTERS,
kernel_size=KERNEL_SIZE,
strides=STRIDES,
padding=PADDING,
activation=tf.nn.relu,
name=name,
)
def _reshape_conv_layer(self, conv):
return tf.contrib.layers.flatten(conv)
def _get_loss(self):
x_observed_random = self.f_random - self.f_observed
x_parent_observed = self.f_parent + self.f_observed
epsilon_log = 1e-3
loss_a = -tf.log(epsilon_log + tf.sigmoid(KAPPA * x_observed_random))
loss_b = -tf.log(epsilon_log + tf.sigmoid(KAPPA * x_parent_observed))
loss_c = -tf.log(epsilon_log + tf.sigmoid(-KAPPA * x_parent_observed))
self.loss_a = tf.reduce_mean(loss_a, name='loss_a')
self.loss_b = tf.reduce_mean(loss_b, name='loss_b')
self.loss_c = tf.reduce_mean(loss_c, name='loss_c')
return tf.reduce_mean(loss_a + loss_b + loss_c, name='loss')
def _get_training_op(self, learning_rate, epsilon):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
return optimizer.minimize(self.loss)
| {
"content_hash": "ffac532c95afa084022ca726845ab90e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 100,
"avg_line_length": 36.92727272727273,
"alnum_prop": 0.5950270802560315,
"repo_name": "srom/chessbot",
"id": "38f0752cac2d33e6e720468fc750c40f016c3d8a",
"size": "4062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "estimator/train/convolutional.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "34151"
},
{
"name": "Jupyter Notebook",
"bytes": "7467"
},
{
"name": "Python",
"bytes": "32395"
},
{
"name": "Shell",
"bytes": "276"
}
],
"symlink_target": ""
} |
import numpy as np
import time
import tree_utils
from Bio import Seq, SeqRecord
from Bio.Align import MultipleSeqAlignment, AlignInfo
from Bio.Alphabet import generic_dna, generic_protein
from node_ranking import node_ranking
verbose = True
aa_codes = {
'A':('Ala', 'Alanine'),
'R':('Arg', 'Arginine'),
'N':('Asn', 'Asparagine'),
'D':('Asp', 'Aspartic acid'),
'C':('Cys', 'Cysteine'),
'Q':('Gln', 'Glutamine'),
'E':('Glu', 'Glutamic acid'),
'G':('Gly', 'Glycine'),
'H':('His', 'Histidine'),
'I':('Ile', 'Isoleucine'),
'L':('Leu', 'Leucine'),
'K':('Lys', 'Lysine'),
'M':('Met', 'Methionine'),
'F':('Phe', 'Phenylalanine'),
'P':('Pro', 'Proline'),
'S':('Ser', 'Serine'),
'T':('Thr', 'Threonine'),
'W':('Trp', 'Tryptophan'),
'Y':('Tyr', 'Tyrosine'),
'V':('Val', 'Valine'),
'B':('Asx', 'Aspartic acid or Asparagine'),
'Z':('Glx', 'Glutamine or Glutamic acid')}
##############################################################################
## class holding alignment + outgroup + allele frequencies etx
##############################################################################
class alignment(object):
'''
class holding an aligment, an outgroup, allele frequencies and utility functions
'''
def __init__(self, aln, outgroup, cds=None, collapse = False, build_tree=True):
'''
parameters:
aln -- biopython alignment
outgroup -- outgroup sequence
annotation -- dictionary or panda DataFrame that holds spatio/temporal info
cds -- coding region
'''
self.aln = aln
self.outgroup = outgroup
self.collapse=collapse
self.alphabet = 'ACTG-'
self.aa_alphabet = ''.join(aa_codes.keys())
self.make_tree=build_tree
if cds is not None:
self.cds = cds
self.protein = True
else:
# specifies the beginning and end of the coding region, allows to add
# a padding of XX to the beginning of the seq, in case not the complete
# cds is present (i.e., to preserve aa numbering in a protein)
self.cds = {'begin':0, 'end':0, 'pad':0}
self.protein = False
self.process_alignment()
def process_alignment(self):
'''
calculate different properties of the alignment that are needed for later
distance calculations
'''
if verbose:
t1 = time.time()
print "processing alignment of",len(self.aln), "sequences"
self.summary_info = AlignInfo.SummaryInfo(self.aln)
self.consensus = self.summary_info.dumb_consensus()
if verbose:
print "calculating allele frequencies...",
self.calculate_allele_frequencies()
if self.protein:
self.translate_alignment()
if verbose:
print "done in ", np.round(time.time()-t1,2),'seconds'
t1= time.time()
print "calculating tree..."
if self.make_tree:
self.build_tree(collapse_nodes = self.collapse)
if verbose:
print "done in ", np.round(time.time()-t1,2),'seconds'
def calculate_allele_frequencies(self):
'''
calculates the allele frequencies of the stored alignment
'''
# allocate an array for the allele frequencies and cast the alignment to an array
self.allele_frequencies = np.zeros((len(self.alphabet),
self.aln.get_alignment_length()))
self.aln_array = np.array(self.aln)
# loop over all nucleotides, calculate the frequency in each column
for ni,nuc in enumerate(self.alphabet):
self.allele_frequencies[ni,:]=np.mean(self.aln_array==nuc, axis=0)
def calculate_aa_allele_frequencies(self):
'''
calculates the allele frequencies of the stored amino acid alignment
'''
if self.protein:
self.aa_allele_frequencies = np.zeros((len(self.aa_alphabet),
self.aa_aln.get_alignment_length()))
tmp_aln = np.array(self.aa_aln)
# loop over the entire amino acid alphabet, calculate the frequency in each column
for ai,aa in enumerate(self.aa_alphabet):
self.aa_allele_frequencies[ai,:]=np.mean(tmp_aln==aa, axis=0)
else:
print "Not a protein sequence"
def translate_alignment(self):
'''
translate the alignment and calculate amino acid consensus
'''
self.aa_aln = MultipleSeqAlignment([])
if self.cds['end']>=0:
last_base = self.cds['end']
else:
last_base = self.aln.get_alignment_length()+self.cds['end']+1
# translate, add cds['pad'] Xs at the beginning
# TODO: make translation gap-tolerant
for seq in self.aln:
try:
tmp_seq = 'X'*self.cds['pad'] + seq.seq[self.cds['begin']:last_base].translate()
except:
tmp_seq = Seq.Seq('X'*(self.cds['pad']+(self.cds['end']-self.cds['begin'])/3), generic_protein)
print self.cds
if self.cds['end']-self.cds['begin']==0:
tmp_seq=Seq.Seq('X', generic_protein)
self.aa_aln.append(SeqRecord.SeqRecord(seq = tmp_seq,
name=seq.name, id=seq.id))
# process amino acid alignment
self.aa_summary_info = AlignInfo.SummaryInfo(self.aa_aln)
self.aa_consensus = self.aa_summary_info.dumb_consensus()
self.calculate_aa_allele_frequencies()
def mean_distance_to_sequence(self, query):
'''
calculate the average hamming distance between the query sequence and
the stored alignment based on the allele frequencies
'''
distance = 0
# calculate the average distance at each alignment column via the allele frequencies
# for each nucleotide state. average over columns, sum over nucleotides
for ni, nuc in enumerate(self.alphabet):
distance += np.mean((np.array(query)==nuc)*(1-self.allele_frequencies[ni,:]))
return distance
def mean_distance_to_set(self, other_af):
'''
calculate the average hamming distance between the another alignment
and the stored alignment based on the allele frequencies
'''
# calculate the average distance at each alignment column via the allele frequencies
# for each nucleotide state. average over columns, sum over nucleotides
return np.mean(np.sum(self.allele_frequencies*(1.0-other_af), axis=0))
def aa_distance_to_sequence(self, query, positions = None):
'''
calculate the average hamming distance between the query sequence and
the stored alignment based on the allele frequencies
'''
if self.protein:
distance = 0
if positions is None: positions = np.arange(len(query))
relevant_positions = np.zeros(len(query))
relevant_positions[positions] = 1
# calculate the average distance at each alignment column via the allele frequencies
# for each amino acid. average over columns, sum over amino acids
for ai, aa in enumerate(self.aa_alphabet):
distance += np.mean((np.array(query)==aa)*(1-self.aa_allele_frequencies[ai,:])
*relevant_positions)
return distance
else:
print "Not a protein sequence"
return np.nan
def aa_distance_to_set(self, other, positions = None):
'''
calculate the average hamming distance between the another alignment
and the stored alignment based on the allele frequencies
'''
if self.protein:
if positions is None: positions = np.arange(self.aa_allele_frequencies.shape[1])
relevant_positions = np.zeros(self.aa_allele_frequencies.shape[1])
relevant_positions[positions]=1
# calculate the average distance at each alignment column via the allele frequencies
# for each amino acid. average over columns, sum over amino acids
return np.mean(np.sum(self.aa_allele_frequencies*(1.0-other.aa_allele_frequencies),
axis=0)*relevant_positions)
else:
print "Not a protein sequence"
return np.nan
def build_tree(self, collapse_nodes = False):
'''
given the alignment and the outgroup, infer a phylogenetic tree, root with outgroup
and infer ancestral states for each internal node
'''
if verbose:
print "calculating tree and infering ancestral sequences"
tmp_t = time.time()
self.T = tree_utils.calculate_tree(self.aln, self.outgroup, ancestral=True)
# put mutations on branches
if self.protein:
tree_utils.translate_sequences_on_tree(self.T, self.cds)
tree_utils.mutations_on_branches(self.T, aa=self.protein)
if verbose:
print "done in ", np.round(time.time()-tmp_t,2), "s"
#collapse branches with 0 length (that is identical sequences on both ends of the branch) if requested
if collapse_nodes:
tree_utils.collapse_zero_branches(self.T.root)
##############################################################################
## sub class of node_ranking that operates on an alignment rather than a tree
##############################################################################
class sequence_ranking(node_ranking):
'''
subclass of node_ranking that handles sequence data, builds a tree,
and uses the prediction and ranking function of the base
'''
def __init__(self, sequence_data, distance_scale = 1.0, *args, **kwargs):
node_ranking.__init__(self, *args, **kwargs)
self.distance_scale = distance_scale
self.data = sequence_data
# calcute the coalescence time scale associated with tree
pairwise_distance = np.sum((self.data.allele_frequencies
*(1-self.data.allele_frequencies)), axis=0).mean()
# distance_scale * D * T_2 = 1, T2 = pi/2
self.time_scale = distance_scale*self.D*pairwise_distance*0.5
self.set_tree(tree = self.data.T, time_scale = self.time_scale)
def predict(self):
'''
initialize the tree of the underlying fitness inference.
returns:
the highest ranked external node
'''
self.compute_rankings()
return self.best_node(self.methods[0])
| {
"content_hash": "5f523e0d2d2060e1bc2797fef85952f6",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 111,
"avg_line_length": 40.50566037735849,
"alnum_prop": 0.5813303521520402,
"repo_name": "rneher/FitnessInference",
"id": "70bf00a6b66f1947eb716f5f3c192b91994f17ad",
"size": "11638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prediction_src/sequence_ranking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5909"
},
{
"name": "Python",
"bytes": "255715"
},
{
"name": "Shell",
"bytes": "34"
}
],
"symlink_target": ""
} |
from lucy.models import LucyObject
class Binary(LucyObject):
_type = 'binaries'
def __init__(self, job, arch, suite, binaries, builder, **kwargs):
from lucy.models.job import Job
job = Job.load(job)
if job['package_type'] != 'source':
raise ValueError("Package from Job isn't a source package")
if 'source' not in kwargs:
kwargs['source'] = job['package']
super(Binary, self).__init__(job=job['_id'],
arch=arch,
suite=suite,
builder=builder,
binaries=binaries,
**kwargs)
def get_source(self):
from lucy.models.source import Source
return Source.load(self['source'])
def get_reports(self):
from lucy.models.report import Report
for x in Report.query({"package": self['_id']}):
yield x
| {
"content_hash": "13897d0c6e8ed5de4dd779fdd894677f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.496,
"repo_name": "paultag/lucy",
"id": "fd545b71bbeb12e69fefb117ac32c3bd65a7c5af",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lucy/models/binary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "44421"
},
{
"name": "Shell",
"bytes": "471"
}
],
"symlink_target": ""
} |
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset
from django import forms
from campaign.models.user import Squadron
class SquadronForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# If you pass FormHelper constructor a form instance
# It builds a default layout with all its fields
self.helper = FormHelper()
self.helper.form_id = 'add_squadron'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-3'
self.helper.field_class = 'col-lg-8'
# You can dynamically adjust your layout
self.helper.layout = Layout(
Fieldset(
'Create New Squadron',
'name',
'invite_only',
)
)
self.helper.layout.append(
FormActions(
Submit('save', 'save')
),
)
class Meta:
model = Squadron
fields = (
'name',
'invite_only',
)
| {
"content_hash": "30bef140b257bc7a665f7a2bcbf4287a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 60,
"avg_line_length": 27.975609756097562,
"alnum_prop": 0.5658238884045336,
"repo_name": "sheepeatingtaz/hotac-tracker",
"id": "87a045311e82243112f570223fabe75865aa0559",
"size": "1147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "campaign/forms/squadron.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7306"
},
{
"name": "HTML",
"bytes": "10675"
},
{
"name": "JavaScript",
"bytes": "5698"
},
{
"name": "Python",
"bytes": "36787"
}
],
"symlink_target": ""
} |
from pprint import pprint
from fo2.connections import db_cursor_so
from base.views import O2BaseGetPostView
import produto.forms as forms
import produto.queries as queries
class Custo(O2BaseGetPostView):
def __init__(self, *args, **kwargs):
super(Custo, self).__init__(*args, **kwargs)
self.Form_class = forms.CustoDetalhadoForm
self.template_name = 'produto/custo.html'
self.title_name = 'Custo de item'
self.get_args = ['nivel', 'ref', 'tamanho', 'cor', 'alternativa']
def mount_context(self):
nivel = self.form.cleaned_data['nivel']
ref = self.form.cleaned_data['ref']
tamanho = self.form.cleaned_data['tamanho']
cor = self.form.cleaned_data['cor']
alternativa = self.form.cleaned_data['alternativa']
self.context.update({
'nivel': nivel,
'ref': ref,
})
cursor = db_cursor_so(self.request)
info = queries.nivel_ref_inform(cursor, nivel, ref)
if len(info) == 0:
self.context.update({'erro': 'Referência não encontrada'})
return
alternativas = queries.nivel_ref_estruturas(cursor, nivel, ref)
if len(alternativas) == 0:
self.context.update({'erro': 'Referência sem estrutura'})
return
alternativa0 = alternativas[0]
if cor == '':
if alternativa0['COR'] == '000000':
cores = queries.prod_cores(cursor, nivel, ref)
cor = cores[0]['COR']
else:
cor = alternativa0['COR']
else:
cores = queries.prod_cores(cursor, nivel, ref)
if cor not in [c['COR'] for c in cores]:
self.context.update({
'erro': 'Cor não existe nessa referência'})
return
if tamanho == '':
if alternativa0['TAM'] == '000':
tamanhos = queries.prod_tamanhos(cursor, nivel, ref)
tam = tamanhos[0]['TAM']
else:
tam = alternativa0['TAM']
else:
tamanhos = queries.prod_tamanhos(cursor, nivel, ref)
if tamanho in [t['TAM'] for t in tamanhos]:
tam = tamanho
else:
self.context.update({
'erro': 'Tamanho não existe nessa referência'})
return
if alternativa is None:
alt = alternativa0['ALTERNATIVA']
alt_descr = alternativa0['DESCR']
else:
if alternativa in [a['ALTERNATIVA'] for a in alternativas]:
alt = alternativa
alt_descr = [
a['DESCR']
for a in alternativas
if a['ALTERNATIVA'] == alt][0]
else:
self.context.update({
'erro': 'Alternativa não existe nessa referência'})
return
data = queries.CustoItem(cursor, nivel, ref, tam, cor, alt).get_data()
if not data:
self.context.update({
'erro': 'Item não encontrado'})
return
data[0]['|STYLE'] = 'font-weight: bold;'
max_estrut_nivel = 0
max_digits_consumo = 0
max_digits_preco = 0
max_digits_custo = 0
for row in data:
max_estrut_nivel = max(max_estrut_nivel, row['ESTRUT_NIVEL'])
num_digits_consumo = str(row['CONSUMO'])[::-1].find('.')
max_digits_consumo = max(max_digits_consumo, num_digits_consumo)
num_digits_preco = str(row['PRECO'])[::-1].find('.')
max_digits_preco = max(max_digits_preco, num_digits_preco)
num_digits_custo = str(row['CUSTO'])[::-1].find('.')
max_digits_custo = max(max_digits_custo, num_digits_custo)
ident = 1
for row in data:
row['CONSUMO|DECIMALS'] = max_digits_consumo
row['PRECO|DECIMALS'] = max_digits_preco
row['CUSTO|DECIMALS'] = max_digits_custo
pad_left = row['ESTRUT_NIVEL'] * ident
if row['ESTRUT_NIVEL'] != 0:
row['|STYLE'] = f'padding-left: {pad_left}em;'
pad_right = (max_estrut_nivel * ident) - pad_left
for field in ['CONSUMO', 'PRECO', 'CUSTO']:
row[f'{field}|STYLE'] = f'padding-right: {pad_right}em;'
self.context.update({
'cor': cor,
'tam': tam,
'alt': alt,
'alt_descr': alt_descr,
'headers': ['Estrutura',
'Sequência', 'Nível', 'Referência',
'Tamanho', 'Cor', 'Narrativa',
'Alternativa', 'Consumo', 'Preço', 'Custo'],
'fields': ['ESTRUT_NIVEL',
'SEQ', 'NIVEL', 'REF',
'TAM', 'COR', 'DESCR',
'ALT', 'CONSUMO', 'PRECO', 'CUSTO'],
'style': {9: 'text-align: right;',
10: 'text-align: right;',
11: 'text-align: right;'},
'data': data,
})
| {
"content_hash": "b7290f9d83eae6d601a3a2aa66c5f54c",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 37.072463768115945,
"alnum_prop": 0.5013682564503519,
"repo_name": "anselmobd/fo2",
"id": "6de8a0a6b7d4c987dae1a2867b179b70ad9cd527",
"size": "5130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/produto/views/custo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import hashlib
import json
import os
from typing import Sequence
import pre_commit.constants as C
from pre_commit.hook import Hook
from pre_commit.languages import helpers
from pre_commit.prefix import Prefix
from pre_commit.util import CalledProcessError
from pre_commit.util import clean_path_on_failure
from pre_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'docker'
PRE_COMMIT_LABEL = 'PRE_COMMIT'
get_default_version = helpers.basic_get_default_version
health_check = helpers.basic_health_check
def _is_in_docker() -> bool:
try:
with open('/proc/1/cgroup', 'rb') as f:
return b'docker' in f.read()
except FileNotFoundError:
return False
def _get_container_id() -> str:
# It's assumed that we already check /proc/1/cgroup in _is_in_docker. The
# cpuset cgroup controller existed since cgroups were introduced so this
# way of getting the container ID is pretty reliable.
with open('/proc/1/cgroup', 'rb') as f:
for line in f.readlines():
if line.split(b':')[1] == b'cpuset':
return os.path.basename(line.split(b':')[2]).strip().decode()
raise RuntimeError('Failed to find the container ID in /proc/1/cgroup.')
def _get_docker_path(path: str) -> str:
if not _is_in_docker():
return path
container_id = _get_container_id()
try:
_, out, _ = cmd_output_b('docker', 'inspect', container_id)
except CalledProcessError:
# self-container was not visible from here (perhaps docker-in-docker)
return path
container, = json.loads(out)
for mount in container['Mounts']:
src_path = mount['Source']
to_path = mount['Destination']
if os.path.commonpath((path, to_path)) == to_path:
# So there is something in common,
# and we can proceed remapping it
return path.replace(to_path, src_path)
# we're in Docker, but the path is not mounted, cannot really do anything,
# so fall back to original path
return path
def md5(s: str) -> str: # pragma: win32 no cover
return hashlib.md5(s.encode()).hexdigest()
def docker_tag(prefix: Prefix) -> str: # pragma: win32 no cover
md5sum = md5(os.path.basename(prefix.prefix_dir)).lower()
return f'pre-commit-{md5sum}'
def build_docker_image(
prefix: Prefix,
*,
pull: bool,
) -> None: # pragma: win32 no cover
cmd: tuple[str, ...] = (
'docker', 'build',
'--tag', docker_tag(prefix),
'--label', PRE_COMMIT_LABEL,
)
if pull:
cmd += ('--pull',)
# This must come last for old versions of docker. See #477
cmd += ('.',)
helpers.run_setup_cmd(prefix, cmd)
def install_environment(
prefix: Prefix, version: str, additional_dependencies: Sequence[str],
) -> None: # pragma: win32 no cover
helpers.assert_version_default('docker', version)
helpers.assert_no_additional_deps('docker', additional_dependencies)
directory = prefix.path(
helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),
)
# Docker doesn't really have relevant disk environment, but pre-commit
# still needs to cleanup its state files on failure
with clean_path_on_failure(directory):
build_docker_image(prefix, pull=True)
os.mkdir(directory)
def get_docker_user() -> tuple[str, ...]: # pragma: win32 no cover
try:
return ('-u', f'{os.getuid()}:{os.getgid()}')
except AttributeError:
return ()
def docker_cmd() -> tuple[str, ...]: # pragma: win32 no cover
return (
'docker', 'run',
'--rm',
*get_docker_user(),
# https://docs.docker.com/engine/reference/commandline/run/#mount-volumes-from-container-volumes-from
# The `Z` option tells Docker to label the content with a private
# unshared label. Only the current container can use a private volume.
'-v', f'{_get_docker_path(os.getcwd())}:/src:rw,Z',
'--workdir', '/src',
)
def run_hook(
hook: Hook,
file_args: Sequence[str],
color: bool,
) -> tuple[int, bytes]: # pragma: win32 no cover
# Rebuild the docker image in case it has gone missing, as many people do
# automated cleanup of docker images.
build_docker_image(hook.prefix, pull=False)
entry_exe, *cmd_rest = hook.cmd
entry_tag = ('--entrypoint', entry_exe, docker_tag(hook.prefix))
cmd = (*docker_cmd(), *entry_tag, *cmd_rest)
return helpers.run_xargs(hook, cmd, file_args, color=color)
| {
"content_hash": "3f31436bd3e9d67df3dafa8ea07178a2",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 109,
"avg_line_length": 32.20422535211268,
"alnum_prop": 0.6374371309862235,
"repo_name": "pre-commit/pre-commit",
"id": "eea9f7682917928b78dd669d172f0bd8841c807a",
"size": "4573",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pre_commit/languages/docker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "753"
},
{
"name": "Dart",
"bytes": "142"
},
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Go",
"bytes": "240"
},
{
"name": "JavaScript",
"bytes": "128"
},
{
"name": "Lua",
"bytes": "513"
},
{
"name": "Perl",
"bytes": "532"
},
{
"name": "PowerShell",
"bytes": "744"
},
{
"name": "Python",
"bytes": "511310"
},
{
"name": "R",
"bytes": "24268"
},
{
"name": "Ruby",
"bytes": "829"
},
{
"name": "Rust",
"bytes": "56"
},
{
"name": "Shell",
"bytes": "3952"
},
{
"name": "Swift",
"bytes": "181"
}
],
"symlink_target": ""
} |
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from caching.base import CachingManager, CachingMixin
from django.db import models
from rest_framework.renderers import JSONRenderer
from .accession import Accession
from .ensembl_assembly import EnsemblAssembly
class RawSqlQueryset(models.QuerySet):
"""
We override the default queryset to annotate each queryset object
with database-specific fields, obtained via raw SQL queries, when
queryset is actually evaluated (queryset is evaluated when its
_fetch_all() method is called). So, we override that method to
add some extra fields, obtained by raw SQL queries.
"""
def _get_taxid(self):
"""
This is a dirty-dirty hack that checks, if taxid filter is applied
to this queryset, and if it is, extracts taxid from django internals,
otherwise, returns None.
Used to provide taxid to raw SQL queries, issued by _fetch_all().
Dirty implementation details:
* self.query is a python object, used to actually construct raw SQL.
* self.query.where is a WhereNode, extending django.utils.Node,
it's a tree node. queryset.filter() expressions are stored as tree
nodes on WhereNode objects.
* self.query.where.children stores children of current node.
* self.query.where.children[0].lhs is a lookup - Col object - where
Col.target knows what field to lookup.
* self.query.were.children[0].rhs contains lookup value.
"""
from django.db.models.lookups import Exact
taxid = None
for child in self.query.where.children:
if (
isinstance(child, Exact)
and str(child.lhs.target) == "portal.Xref.taxid"
):
taxid = child.rhs
return taxid
def _fetch_all(self):
"""
This method performs the actual database lookup, when queryset is evaluated.
We extend it to fetch database-specific data with raw SQL queries.
"""
super(RawSqlQueryset, self)._fetch_all()
# check this flag to avoid infinite recursion loop with _fetch_all() called by get_mirbase_mature_products()
if not hasattr(self, "fetch_all_already_called"):
# set this flag to avoid infinite recursion loop
self.fetch_all_already_called = True
# add database-specific fields only if this queryset contains model objects
# (this is not the case for values() or values_list() methods)
if len(self) and type(self[0]) == Xref:
taxid = self._get_taxid()
# Raw SQL queries to fetch database-specific data with self-joins, impossible in Django ORM
mirbase_mature_products = self.get_mirbase_mature_products(taxid)
mirbase_precursors = self.get_mirbase_precursor(taxid)
refseq_mirna_mature_products = self.get_refseq_mirna_mature_products(
taxid
)
refseq_mirna_precursors = self.get_refseq_mirna_precursor(taxid)
refseq_splice_variants = self.get_refseq_splice_variants(taxid)
ensembl_splice_variants = self.get_ensembl_splice_variants(taxid)
tmrna_mates = self.get_tmrna_mate(taxid)
# "annotate" xrefs queryset with additional attributes, retrieved by raw SQL queries
for xref in self:
if xref.id in mirbase_mature_products:
xref.mirbase_mature_products = [
mature_product.upi.upi
for mature_product in mirbase_mature_products[xref.id]
]
if xref.id in mirbase_precursors:
xref.mirbase_precursor = mirbase_precursors[xref.id][
0
].upi.upi # note, there's just 1 precursor
if xref.id in refseq_mirna_mature_products:
xref.refseq_mirna_mature_products = [
mature_product.upi.upi
for mature_product in refseq_mirna_mature_products[xref.id]
]
if xref.id in refseq_mirna_precursors:
xref.refseq_mirna_precursor = refseq_mirna_precursors[xref.id][
0
].upi.upi # note, there's just 1 precursor
if xref.id in refseq_splice_variants:
xref.refseq_splice_variants = [
splice_variant.upi.upi
for splice_variant in refseq_splice_variants[xref.id]
]
if xref.id in ensembl_splice_variants:
xref.ensembl_splice_variants = [
splice_variant.upi.upi
for splice_variant in ensembl_splice_variants[xref.id]
]
if xref.id in tmrna_mates:
xref.tmrna_mates = [
tmrna_mate.upi.upi for tmrna_mate in tmrna_mates[xref.id]
]
def _xrefs_raw_queryset_to_dict(self, raw_queryset):
"""
We convert it into a single dict, aggregate those iterables into a
:param raw_queryset: iterable of dicts, returned by Xref.object.raw()
:return: dict { Xref.pk: Xref (a single item of raw queryset) }
"""
output_dict = {}
for xref in raw_queryset:
if xref.xid not in output_dict:
output_dict[xref.xid] = [xref]
else:
output_dict[xref.xid].append(xref)
return output_dict
def get_mirbase_mature_products(self, taxid=None):
if hasattr(self, "accession"):
if self.accession.database != "mirbase".upper():
return None
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(
["'%s'" % xref.pk for xref in self]
) # e.g. "'250381225', '250381243', '295244525'"
queryset = """
SELECT xref.*, rnc_accessions.external_id
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND rnc_accessions.database = 'MIRBASE'
AND rnc_accessions.feature_name = 'precursor_RNA'
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.external_id = x.external_id
WHERE rnc_accessions.database = 'MIRBASE'
AND rnc_accessions.feature_name = 'ncRNA'
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_mirbase_precursor(self, taxid=None):
if hasattr(self, "accession"):
if self.accession.database != "mirbase".upper():
return None
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.external_id
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND rnc_accessions.database = 'MIRBASE'
AND rnc_accessions.feature_name = 'ncRNA'
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.external_id = x.external_id
WHERE rnc_accessions.database = 'MIRBASE'
AND rnc_accessions.feature_name = 'precursor_RNA'
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_refseq_mirna_mature_products(self, taxid=None):
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.parent_ac
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND rnc_accessions.database = 'REFSEQ'
AND rnc_accessions.feature_name = 'precursor_RNA'
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.parent_ac = x.parent_ac
WHERE rnc_accessions.database = 'REFSEQ'
AND rnc_accessions.feature_name = 'ncRNA'
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_refseq_mirna_precursor(self, taxid=None):
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.parent_ac
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND xref.dbid = 9
AND rnc_accessions.feature_name = 'ncRNA'
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.parent_ac = x.parent_ac
WHERE xref.dbid = 9
AND rnc_accessions.feature_name = 'precursor_RNA'
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_refseq_splice_variants(self, taxid=None):
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.ncrna_class, rnc_accessions.optional_id
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND xref.dbid = 9
AND rnc_accessions.optional_id != ''
AND (rnc_accessions.ncrna_class != 'miRNA' OR rnc_accessions.feature_name = 'precursor_RNA')
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.optional_id = x.optional_id
WHERE xref.dbid = 9
AND xref.deleted = 'N'
AND rnc_accessions.accession != x.ac
AND (rnc_accessions.ncrna_class != 'miRNA' OR rnc_accessions.feature_name = 'precursor_RNA')
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_ensembl_splice_variants(self, taxid=None):
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.optional_id
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.dbid = 25
AND rnc_accessions.optional_id != ''
AND (rnc_accessions.ncrna_class != 'miRNA' OR rnc_accessions.feature_name = 'precursor_RNA')
AND xref.id IN ({pks})
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.optional_id = x.optional_id
WHERE xref.dbid = 25
AND xref.deleted = 'N'
AND rnc_accessions.accession != x.ac
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
def get_tmrna_mate(self, taxid=None):
taxid_filter = "AND xref.taxid = %s" % taxid if taxid else ""
# _fetch_all() has already been called by now
pks = ",".join(["'%s'" % xref.pk for xref in self])
queryset = """
SELECT xref.*, rnc_accessions.optional_id, rnc_accessions.database
FROM xref, rnc_accessions
WHERE xref.ac = rnc_accessions.accession
AND xref.id IN ({pks})
AND rnc_accessions.database = 'TMRNA_WEB'
AND rnc_accessions.optional_id IS NOT NULL
{taxid_filter}
""".format(
pks=pks, taxid_filter=taxid_filter
)
annotated_queryset = """
SELECT xref.*, x.id as xid
FROM xref
JOIN rnc_accessions
ON xref.ac = rnc_accessions.accession
JOIN (
{queryset}
) x
ON rnc_accessions.parent_ac = x.optional_id
WHERE rnc_accessions.is_composite = 'Y'
{taxid_filter}
""".format(
queryset=queryset, taxid_filter=taxid_filter
)
raw_queryset = Xref.objects.raw(annotated_queryset)
return self._xrefs_raw_queryset_to_dict(raw_queryset)
class RawSqlXrefManager(models.Manager):
use_for_related_fields = True
def get_queryset(self):
return RawSqlQueryset(self.model, using=self._db)
class Xref(CachingMixin, models.Model):
id = models.AutoField(primary_key=True)
db = models.ForeignKey(
"Database", db_column="dbid", related_name="xrefs", on_delete=models.CASCADE
)
accession = models.ForeignKey(
"Accession",
db_column="ac",
to_field="accession",
related_name="xrefs",
unique=True,
on_delete=models.CASCADE,
)
created = models.ForeignKey(
"Release",
db_column="created",
related_name="release_created",
on_delete=models.CASCADE,
)
last = models.ForeignKey(
"Release",
db_column="last",
related_name="last_release",
on_delete=models.CASCADE,
)
upi = models.ForeignKey(
"Rna",
db_column="upi",
to_field="upi",
related_name="xrefs",
on_delete=models.CASCADE,
)
version_i = models.IntegerField()
deleted = models.CharField(max_length=1)
timestamp = models.DateTimeField()
userstamp = models.CharField(max_length=100)
version = models.IntegerField()
taxid = models.IntegerField()
objects = RawSqlXrefManager()
default_objects = CachingManager()
class Meta:
db_table = "xref"
def has_modified_nucleotides(self):
"""Determine whether an xref has modified nucleotides."""
return self.modifications.count() > 0
def get_distinct_modifications(self):
"""Get a list of distinct modified nucleotides described in this xref."""
modifications = []
seen = None
for modification in self.modifications.order_by("modification_id").all():
if modification.modification_id == seen:
continue
else:
modifications.append(modification)
seen = modification.modification_id
return modifications
def get_modifications_as_json(self):
"""
Get a JSON object listing all modified positions and the chemical
components. This object is used for visualising modified nucleotides
in the UI.
"""
import apiv1.serializers
serializer = apiv1.serializers.ModificationSerializer(
self.modifications.all(), many=True
)
return JSONRenderer().render(serializer.data)
def is_active(self):
"""Convenience method for determining whether an xref is current or obsolete."""
return self.deleted == "N"
def is_rfam_seed(self):
"""Determine whether an xref is part of a manually curated RFAM seed alignment."""
if self.accession.note:
return (
re.search("alignment\:seed", self.accession.note, re.IGNORECASE)
is not None
)
else:
return False
def get_ncbi_gene_id(self):
"""GeneID links are stored in the optional_id field."""
if self.accession.optional_id:
match = re.search(
"GeneID\:(\d+)", self.accession.optional_id, re.IGNORECASE
)
return match.group(1) if match else None
else:
return None
def get_ndb_external_url(self):
"""
For some entries NDB uses different ids than those assigned by the PDB.
NDB ids are store in the db_xref column.
This function returns an NDB url using NDB ids where possible
with PDB ids used as a fallback.
"""
if self.accession.database == "PDBE":
ndb_url = "http://ndbserver.rutgers.edu/service/ndb/atlas/summary?searchTarget={structure_id}"
if self.accession.db_xref:
match = re.search("NDB\:(\w+)", self.accession.db_xref, re.IGNORECASE)
if match:
structure_id = match.group(1) # NDB id
else:
structure_id = self.accession.external_id # default to PDB id
return ndb_url.format(structure_id=structure_id)
else:
return None
else:
return None
def is_mirbase_mirna_precursor(self):
"""True if the accession is a miRBase precursor miRNA."""
return (
self.accession.feature_name == "precursor_RNA"
and self.accession.database == "MIRBASE"
)
def get_mirbase_mature_products_if_any(self):
return (
self.get_mirbase_mature_products()
if self.is_mirbase_mirna_precursor()
else []
)
def get_mirbase_mature_products(self):
"""miRBase mature products and precursors share the same external MI* identifier."""
mature_products = Xref.objects.filter(
accession__external_id=self.accession.external_id,
accession__feature_name="ncRNA",
).all()
upis = []
for mature_product in mature_products:
upis.append(mature_product.upi)
return upis
def get_mirbase_precursor(self):
"""miRBase mature products and precursors share the same external MI* identifier."""
if self.accession.database != "mirbase".upper():
return None
else:
precursor = Xref.objects.filter(
accession__external_id=self.accession.external_id,
accession__feature_name="precursor_RNA",
).first()
return precursor.upi.upi if precursor else None
def is_refseq_mirna(self):
"""
RefSeq miRNAs are stored in 3 xrefs:
* precursor_RNA
* 5-prime ncRNA
* 3-prime ncRNA
which share the same parent accession.
"""
same_parent = Xref.objects.filter(
accession__parent_ac=self.accession.parent_ac,
accession__ncrna_class="miRNA",
deleted=self.deleted,
).all()
return len(same_parent) > 0
def get_refseq_mirna_mature_products_if_any(self):
return self.get_refseq_mirna_mature_products() if self.is_refseq_mirna() else []
def get_refseq_mirna_mature_products(self):
"""Given a precursor miRNA, retrieve its mature products."""
mature_products = Xref.objects.filter(
accession__parent_ac=self.accession.parent_ac,
accession__feature_name="ncRNA",
).all()
upis = []
for mature_product in mature_products:
upis.append(mature_product.upi)
return upis
def get_refseq_mirna_precursor(self):
"""Given a 5-prime or 3-prime mature product, retrieve its precursor miRNA."""
if self.accession.feature_name != "precursor_RNA":
rna = Xref.objects.filter(
accession__parent_ac=self.accession.parent_ac,
accession__feature_name="precursor_RNA",
).first()
if rna:
return rna.upi
return None
def get_refseq_splice_variants(self):
"""
RefSeq splice variants are identified by the same GeneID.
Example: URS000075D687.
"""
splice_variants = []
gene_id = self.get_ncbi_gene_id()
if gene_id:
xrefs = (
Xref.objects.filter(
db__display_name="RefSeq",
deleted="N",
accession__ncrna_class=self.accession.ncrna_class,
accession__db_xref__iregex="GeneId:" + gene_id,
)
.exclude(accession=self.accession.accession)
.all()
)
for splice_variant in xrefs:
splice_variants.append(splice_variant.upi)
splice_variants.sort(key=lambda x: x.length)
return splice_variants
def get_tmrna_mate_upi(self):
"""Get the mate of the 2-piece tmRNA"""
# TODO: Currently this function is not used anywhere in the code.
# TODO: Moreover, it doesn't work, because self.accession.optional_id
# TODO: is always None for all the records from rmRNA Website.
if self.db.display_name != "tmRNA Website":
tmrna_mate_upi = False
if not self.accession.optional_id: # no mate info
tmrna_mate_upi = False
try:
mate = Accession.objects.filter(
parent_ac=self.accession.optional_id, is_composite="Y"
).get()
except Accession.DoesNotExist:
return False
tmrna_mate_upi = mate.xrefs.get().upi.upi
return tmrna_mate_upi
def get_tmrna_type(self):
"""
Possible tmRNA types:
* acceptor (tRNA-like domain)
* coding (mRNA-like domain),
* precursor (contains the acceptor and coding sequences and other intervening sequences)
"""
tmrna_type = 0
if self.db.display_name != "tmRNA Website":
tmrna_type = 0 # not tmRNA
if not self.accession.optional_id:
tmrna_type = 1 # one-piece or precursor
else:
tmrna_type = 2 # two-piece tmRNA
return tmrna_type
def get_gencode_transcript_id(self):
"""
GENCODE entries have their corresponding Ensembl transcript ids stored
in Accession.accession. Example:
{"transcript_id": ["GENCODE:ENSMUST00000160979.8"]}
"""
if self.db.display_name == "Ensembl/GENCODE":
if self.accession.accession.startswith("GENCODE:"):
return self.accession.accession.split(":")[1]
elif self.accession.accession.startswith("ENSMUST"):
return self.accession.accession
else:
return None
else:
return None
def get_gencode_ensembl_url(self):
"""Get Ensembl URL for GENCODE transcripts."""
ensembl_transcript_id = self.get_gencode_transcript_id()
if ensembl_transcript_id:
url = (
"http://ensembl.org/{species}/Transcript/Summary?db=core;t={id}".format(
id=ensembl_transcript_id,
species=self.accession.species.replace(" ", "_"),
)
)
return url
else:
return None
def get_ensembl_division(self):
"""Get Ensembl or Ensembl Genomes division for the cross-reference."""
try:
assembly = EnsemblAssembly.objects.get(taxid=self.taxid)
return {"name": assembly.division, "url": "http://" + assembly.subdomain}
except EnsemblAssembly.DoesNotExist:
return None
def get_ucsc_db_id(self):
"""Get UCSC id for the genome assembly. http://genome.ucsc.edu/FAQ/FAQreleases.html"""
try:
genome = EnsemblAssembly.objects.get(taxid=self.taxid)
return genome.assembly_ucsc
except EnsemblAssembly.DoesNotExist:
return None
| {
"content_hash": "78135234bb7d1df55c2fae89daa9068a",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 116,
"avg_line_length": 37.16005471956225,
"alnum_prop": 0.5653070240023561,
"repo_name": "RNAcentral/rnacentral-webcode",
"id": "b5e2163a099317c3c2f77c8ac70c681b4bc978b7",
"size": "27164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rnacentral/portal/models/xref.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59656"
},
{
"name": "Dockerfile",
"bytes": "2817"
},
{
"name": "HTML",
"bytes": "312509"
},
{
"name": "JavaScript",
"bytes": "285993"
},
{
"name": "Python",
"bytes": "562943"
},
{
"name": "Shell",
"bytes": "14414"
}
],
"symlink_target": ""
} |
import random
from slackbot.bot import listen_to
from plugins.parameters import EMOJI_NAME, ATSUMORI_PROBABILITY, NORMAL_PROBABILITY
@listen_to('.*')
def listen_func(message):
if random.random() < ATSUMORI_PROBABILITY:
message.react(EMOJI_NAME)
if random.random() < NORMAL_PROBABILITY:
message.send('失礼しました。熱盛と出てしまいました。')
else:
message.send('あっ…熱盛が出てしまい…ました失礼しました。')
| {
"content_hash": "10063f9a9e97c582d24120d615f0a116",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.6824644549763034,
"repo_name": "yamasakih/atsumori-slack-bot",
"id": "8a194612f6745012c2d7c4954adb480beedde255",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/listen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1097"
}
],
"symlink_target": ""
} |
"""
A Hyper-V Nova Compute driver.
"""
import platform
from nova.virt import driver
from oslo_log import log as logging
from oslo_utils import excutils
from hyperv.i18n import _
from hyperv.nova import eventhandler
from hyperv.nova import hostops
from hyperv.nova import livemigrationops
from hyperv.nova import migrationops
from hyperv.nova import rdpconsoleops
from hyperv.nova import serialconsoleops
from hyperv.nova import snapshotops
from hyperv.nova import vmops
from hyperv.nova import volumeops
LOG = logging.getLogger(__name__)
class HyperVDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
"supports_migrate_to_same_host": True
}
def __init__(self, virtapi):
super(HyperVDriver, self).__init__(virtapi)
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()
self._rdpconsoleops = rdpconsoleops.RDPConsoleOps()
self._serialconsoleops = serialconsoleops.SerialConsoleOps()
def init_host(self, host):
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
event_handler.start_listener()
def list_instance_uuids(self):
return self._vmops.list_instance_uuids()
def list_instances(self):
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
self._vmops.reboot(instance, network_info, reboot_type)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
def get_info(self, instance):
return self._vmops.get_info(instance)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
return self._volumeops.attach_volume(connection_info,
instance.name)
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
return self._volumeops.detach_volume(connection_info,
instance.name)
def get_volume_connector(self, instance):
return self._volumeops.get_volume_connector(instance)
def get_available_resource(self, nodename):
return self._hostops.get_available_resource()
def get_available_nodes(self, refresh=False):
return [platform.node()]
def host_power_action(self, action):
return self._hostops.host_power_action(action)
def snapshot(self, context, instance, image_id, update_task_state):
self._snapshotops.snapshot(context, instance, image_id,
update_task_state)
def pause(self, instance):
self._vmops.pause(instance)
def unpause(self, instance):
self._vmops.unpause(instance)
def suspend(self, context, instance):
self._vmops.suspend(instance)
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
def power_off(self, instance, timeout=0, retry_interval=0):
self._vmops.power_off(instance, timeout, retry_interval)
def power_on(self, context, instance, network_info,
block_device_info=None):
self._vmops.power_on(instance, block_device_info, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""Resume guest state when a host is booted."""
self._vmops.resume_state_on_host_boot(context, instance, network_info,
block_device_info)
def live_migration(self, context, instance, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
self._livemigrationops.live_migration(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
self.destroy(context, instance, network_info, block_device_info)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
self._livemigrationops.pre_live_migration(context, instance,
block_device_info,
network_info)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
self._livemigrationops.post_live_migration(context, instance,
block_device_info)
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
self._livemigrationops.post_live_migration_at_destination(
context,
instance,
network_info,
block_migration)
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return self._livemigrationops.check_can_live_migrate_destination(
context, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
self._livemigrationops.check_can_live_migrate_destination_cleanup(
context, dest_check_data)
def check_can_live_migrate_source(self, context, instance,
dest_check_data, block_device_info=None):
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
def get_instance_disk_info(self, instance, block_device_info=None):
pass
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
msg = _("VIF unplugging is not supported by the Hyper-V driver.")
raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance, network_info):
LOG.debug("ensure_filtering_rules_for_instance called",
instance=instance)
def unfilter_instance(self, instance, network_info):
LOG.debug("unfilter_instance called", instance=instance)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
network_info,
block_device_info,
timeout,
retry_interval)
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
self._migrationops.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
block_device_info, power_on)
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
def get_host_uptime(self):
return self._hostops.get_host_uptime()
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
def get_serial_console(self, context, instance):
return self._serialconsoleops.get_serial_console(instance.name)
def get_console_output(self, context, instance):
return self._serialconsoleops.get_console_output(instance.name)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
try:
self._vmops.rescue_instance(context, instance, network_info,
image_meta, rescue_password)
except Exception:
with excutils.save_and_reraise_exception():
self._vmops.unrescue_instance(instance)
def unrescue(self, instance, network_info):
self._vmops.unrescue_instance(instance)
def attach_interface(self, instance, image_meta, vif):
return self._vmops.attach_interface(instance, vif)
def detach_interface(self, instance, vif):
return self._vmops.detach_interface(instance, vif)
| {
"content_hash": "592485a1ee5d0df7f4eba831089dbe17",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 79,
"avg_line_length": 43.4236641221374,
"alnum_prop": 0.5773050892150831,
"repo_name": "adelina-t/compute-hyperv",
"id": "8bb5aec4c77a2d5933d151b8eb9c5c01b82980a8",
"size": "12033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperv/nova/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "685121"
}
],
"symlink_target": ""
} |
try:
from cms.plugins.text.models import AbstractText
except ImportError:
from djangocms_text_ckeditor.models import AbstractText
from django.db.models import CharField
from django.utils.translation import ugettext_lazy as _
class Footnote(AbstractText):
symbol = CharField(_('symbol'), max_length=10, blank=True,
help_text=_('Overrides the automatic numbering.'))
class Meta:
verbose_name = _('Footnote')
verbose_name_plural = _('Footnotes')
db_table = 'cmsplugin_footnote'
| {
"content_hash": "dbbcdc05e7cfdccb66234f3a79816273",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 33.8125,
"alnum_prop": 0.6876155268022182,
"repo_name": "BertrandBordage/cmsplugin-footnote",
"id": "9e3b5dc7a04a2c1c39314342037f2f89fba06b29",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_footnote/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "796"
},
{
"name": "Python",
"bytes": "9727"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from .models import Client, Contact
class ClientTestCase(TestCase):
def setUp(self):
Client.objects.create(name='Test Starbucks', address='Felipe Ángeles 225')
Client.objects.create(name='Test OXXO', address='Reforma 190')
def test_client_names(self):
"""The objects are created and their __str__ func works"""
starbucks = Client.objects.get(name='Test Starbucks')
oxxo = Client.objects.get(name='Test OXXO')
self.assertEqual(str(starbucks), 'Test Starbucks')
self.assertEqual(str(oxxo), 'Test OXXO')
class ContactTestCase(ClientTestCase):
def setUp(self):
starbucks = Client.objects.create(name='Test Starbucks', address='Felipe Ángeles 225')
Client.objects.create(name='Test OXXO', address='Reforma 190')
Contact.objects.create(
name='Fernando',
last_name='Lobato Meeser',
charge='Project Owner',
landline='4424674323',
mobile_phone_1='2341631',
email='lobato.meeser.fernando@hotmail.com',
alternate_email='ferlobo93@hotmail.com',
client=starbucks)
def test_contact_name(self):
"""Tests if the objects are being created in the database and their __str__ func works
"""
client_instance = Contact.objects.get(name='Fernando')
self.assertEqual(str(client_instance), 'Fernando Lobato Meeser')
def test_relation_contact(self):
"""Test if the contact objects are correctly referecing their clients.
"""
client_instance = Contact.objects.get(name='Fernando')
starbucks = Client.objects.get(name='Test Starbucks')
self.assertEqual(client_instance.client, starbucks)
| {
"content_hash": "3e8c9f234cc266f79ba0f5708f280e9a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 94,
"avg_line_length": 40.20454545454545,
"alnum_prop": 0.6529112492933861,
"repo_name": "fernandolobato/balarco",
"id": "c482b62acf6a8d200f996bb19349c403bc62087b",
"size": "1771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clients/tests_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48187"
},
{
"name": "HTML",
"bytes": "2319"
},
{
"name": "JavaScript",
"bytes": "102982"
},
{
"name": "Python",
"bytes": "168101"
}
],
"symlink_target": ""
} |
import argparse
import sys
import warnings
import numpy as np
import skimage.io
import skimage.color
import skimage.util
from sklearn.decomposition import PCA, NMF, FastICA, FactorAnalysis
convOptions = {
'hed2rgb' : lambda img_raw: skimage.color.hed2rgb(img_raw),
'hsv2rgb' : lambda img_raw: skimage.color.hsv2rgb(img_raw),
'lab2lch' : lambda img_raw: skimage.color.lab2lch(img_raw),
'lab2rgb' : lambda img_raw: skimage.color.lab2rgb(img_raw),
'lab2xyz' : lambda img_raw: skimage.color.lab2xyz(img_raw),
'lch2lab' : lambda img_raw: skimage.color.lch2lab(img_raw),
'luv2rgb' : lambda img_raw: skimage.color.luv2rgb(img_raw),
'luv2xyz' : lambda img_raw: skimage.color.luv2xyz(img_raw),
'rgb2hed' : lambda img_raw: skimage.color.rgb2hed(img_raw),
'rgb2hsv' : lambda img_raw: skimage.color.rgb2hsv(img_raw),
'rgb2lab' : lambda img_raw: skimage.color.rgb2lab(img_raw),
'rgb2luv' : lambda img_raw: skimage.color.rgb2luv(img_raw),
'rgb2rgbcie' : lambda img_raw: skimage.color.rgb2rgbcie(img_raw),
'rgb2xyz' : lambda img_raw: skimage.color.rgb2xyz(img_raw),
#'rgb2ycbcr' : lambda img_raw: skimage.color.rgb2ycbcr(img_raw),
#'rgb2yiq' : lambda img_raw: skimage.color.rgb2yiq(img_raw),
#'rgb2ypbpr' : lambda img_raw: skimage.color.rgb2ypbpr(img_raw),
#'rgb2yuv' : lambda img_raw: skimage.color.rgb2yuv(img_raw),
#'rgba2rgb' : lambda img_raw: skimage.color.rgba2rgb(img_raw),
'rgbcie2rgb' : lambda img_raw: skimage.color.rgbcie2rgb(img_raw),
'xyz2lab' : lambda img_raw: skimage.color.xyz2lab(img_raw),
'xyz2luv' : lambda img_raw: skimage.color.xyz2luv(img_raw),
'xyz2rgb' : lambda img_raw: skimage.color.xyz2rgb(img_raw),
#'ycbcr2rgb' : lambda img_raw: skimage.color.ycbcr2rgb(img_raw),
#'yiq2rgb' : lambda img_raw: skimage.color.yiq2rgb(img_raw),
#'ypbpr2rgb' : lambda img_raw: skimage.color.ypbpr2rgb(img_raw),
#'yuv2rgb' : lambda img_raw: skimage.color.yuv2rgb(img_raw),
'rgb_from_hed' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_hed),
'rgb_from_hdx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_hdx),
'rgb_from_fgx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_fgx),
'rgb_from_bex' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_bex),
'rgb_from_rbd' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_rbd),
'rgb_from_gdx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_gdx),
'rgb_from_hax' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_hax),
'rgb_from_bro' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_bro),
'rgb_from_bpx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_bpx),
'rgb_from_ahx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_ahx),
'rgb_from_hpx' : lambda img_raw: skimage.color.combine_stains(img_raw, skimage.color.rgb_from_hpx),
'hed_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.hed_from_rgb),
'hdx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.hdx_from_rgb),
'fgx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.fgx_from_rgb),
'bex_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.bex_from_rgb),
'rbd_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.rbd_from_rgb),
'gdx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.gdx_from_rgb),
'hax_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.hax_from_rgb),
'bro_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.bro_from_rgb),
'bpx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.bpx_from_rgb),
'ahx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.ahx_from_rgb),
'hpx_from_rgb' : lambda img_raw: skimage.color.separate_stains(img_raw, skimage.color.hpx_from_rgb),
'pca' : lambda img_raw: np.reshape(PCA(n_components=3).fit_transform(np.reshape(img_raw, [-1, img_raw.shape[2]])),
[img_raw.shape[0],img_raw.shape[1],-1]),
'nmf' : lambda img_raw: np.reshape(NMF(n_components=3, init='nndsvda').fit_transform(np.reshape(img_raw, [-1, img_raw.shape[2]])),
[img_raw.shape[0],img_raw.shape[1],-1]),
'ica' : lambda img_raw: np.reshape(FastICA(n_components=3).fit_transform(np.reshape(img_raw, [-1, img_raw.shape[2]])),
[img_raw.shape[0],img_raw.shape[1],-1]),
'fa' : lambda img_raw: np.reshape(FactorAnalysis(n_components=3).fit_transform(np.reshape(img_raw, [-1, img_raw.shape[2]])),
[img_raw.shape[0],img_raw.shape[1],-1])
}
parser = argparse.ArgumentParser()
parser.add_argument('input_file', type=argparse.FileType('r'), default=sys.stdin, help='input file')
parser.add_argument('out_file', type=argparse.FileType('w'), default=sys.stdin, help='out file (TIFF)')
parser.add_argument('conv_type', choices=convOptions.keys(), help='conversion type')
args = parser.parse_args()
img_in = skimage.io.imread(args.input_file.name)[:,:,0:3]
res = convOptions[args.conv_type](img_in)
res[res<-1]=-1
res[res>1]=1
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = skimage.util.img_as_uint(res) #Attention: precision loss
skimage.io.imsave(args.out_file.name, res, plugin='tifffile')
| {
"content_hash": "f2b5ef29f4cfe9b6a5f9477ec1d2e51b",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 142,
"avg_line_length": 70.58620689655173,
"alnum_prop": 0.6487542745481192,
"repo_name": "BMCV/galaxy-image-analysis",
"id": "b5141cb40f2ce3028d9b4dab1448b88f6ae72216",
"size": "6141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/color-deconvolution/color_deconvolution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "554"
},
{
"name": "Mako",
"bytes": "2018"
},
{
"name": "Python",
"bytes": "102811"
}
],
"symlink_target": ""
} |
"""Views for Timeline.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
from django import forms
from soc.logic import dicts
from soc.logic.models import program as program_logic
from soc.views.helper import access
from soc.views.helper import responses
from soc.views.models import base
import soc.logic.models.timeline
class View(base.View):
"""View methods for the Timeline model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['edit'] = [('checkCanEditTimeline', [program_logic.logic])]
new_params = {}
new_params['rights'] = rights
new_params['logic'] = soc.logic.models.timeline.logic
new_params['edit_template'] = 'soc/timeline/edit.html'
new_params['name'] = "Timeline"
patterns = [(r'^%(url_name)s/(?P<access_type>edit)/%(key_fields)s$',
'%(module_package)s.%(module_name)s.edit',
"Edit %(name_short)s")]
new_params['create_extra_dynaproperties'] = {
'link_id': forms.CharField(widget=forms.HiddenInput)
}
new_params['django_patterns_defaults'] = patterns
new_params['edit_dynaproperties'] = []
params = dicts.merge(params, new_params)
super(View, self).__init__(params=params)
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
# a timeline can only be edited, so set the scope path using entity
fields['scope_path'] = entity.scope_path
view = View()
edit = responses.redirectLegacyRequest
public = responses.redirectLegacyRequest
| {
"content_hash": "1d888a2b0be7ca67df9eea4be46654f7",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 73,
"avg_line_length": 26.82857142857143,
"alnum_prop": 0.6602768903088392,
"repo_name": "SRabbelier/Melange",
"id": "18b1b2c996ca87bfaaa50f02f433e7d2bfa98ab5",
"size": "2488",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/soc/views/models/timeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
__title__ = 'candemaker'
__description__ = 'Tools for creating CANDE input files (.cid).'
__url__ = 'https://github.com/Ricyteach/candemaker'
__version__ = '0.0.1'
__author__ = 'Rick Teachey'
__author_email__ = 'rickteachey@cbceng.com'
__license__ = 'BSD'
__copyright__ = 'Copyright 2017 Rick Teachey'
__keywords__ = 'cid msh CANDE fea' | {
"content_hash": "f32d80f52fa37838fdb490cafed4fc5f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 64,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6517857142857143,
"repo_name": "Ricyteach/candemaker",
"id": "cc76941778c7afc80e7e2f4d521403ad3c2c60f7",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/candemaker/__version__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "142106"
}
],
"symlink_target": ""
} |
"""
Sponge Knowledge Base
Action metadata Record type
"""
def createBookType(name):
return RecordType(name, [
IntegerType("id").withNullable().withLabel("Identifier"),
StringType("author").withLabel("Author"),
StringType("title").withLabel("Title")
])
BOOK = {"id":1, "author":"James Joyce", "title":"Ulysses"}
class RecordAsResultAction(Action):
def onConfigure(self):
self.withArg(IntegerType("bookId")).withResult(createBookType("book").withNullable())
def onCall(self, bookId):
global BOOK
return BOOK if bookId == BOOK["id"] else None
class RecordAsArgAction(Action):
def onConfigure(self):
self.withArg(createBookType("book")).withNoResult()
def onCall(self, book):
global BOOK
BOOK = {"id":1, "author":book["author"], "title":book["title"]}
| {
"content_hash": "1dd2692bafead202e8b578b865cb0a6f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 93,
"avg_line_length": 32.464285714285715,
"alnum_prop": 0.6072607260726073,
"repo_name": "softelnet/sponge",
"id": "c7353d3222d70c87a71f6b54ac09d2b7be9e1f0f",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sponge-integration-tests/examples/core/actions_metadata_types_record.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "482"
},
{
"name": "Dockerfile",
"bytes": "2389"
},
{
"name": "Groovy",
"bytes": "70914"
},
{
"name": "HTML",
"bytes": "6759"
},
{
"name": "Java",
"bytes": "3300560"
},
{
"name": "JavaScript",
"bytes": "70716"
},
{
"name": "Kotlin",
"bytes": "113542"
},
{
"name": "Mustache",
"bytes": "38"
},
{
"name": "Python",
"bytes": "426240"
},
{
"name": "Ruby",
"bytes": "65491"
},
{
"name": "SCSS",
"bytes": "6217"
},
{
"name": "Shell",
"bytes": "1388"
}
],
"symlink_target": ""
} |
import re
import npc
from npc.character.tags import TagContainer
from npc.character import Werewolf
from mako.template import Template
from markdown import Markdown
def template_output(character, header_level=3):
template_path = str(npc.settings.InternalSettings().get('listing.templates.html.character.werewolf'))
character_template = Template(filename=template_path)
md_converter = Markdown(extensions=['markdown.extensions.smarty'])
_clean_conv = md_converter.reset
return character_template.render(tags=character.tags, header_level=header_level, mdconv=_clean_conv().convert)
def test_inserts_hashes_for_header_level():
char = Werewolf()
output = template_output(char, 3)
assert re.match(r'^<h3>.*</h3>', output) is not None
class TestName:
def test_uses_first_name_for_header(self):
char = Werewolf()
char.tags('name').append('Joe Smith')
output = template_output(char)
assert '<h3>Joe Smith</h3>' in output
def test_adds_aka_for_remaining_names(self):
char = Werewolf()
char.tags('name').extend(['Joe Smith', 'Mr. Smith', 'The Man'])
output = template_output(char)
assert '<div><em>AKA Mr. Smith, The Man</em></div>' in output
class TestDead:
def test_inserts_deceased_note_if_dead(self):
char = Werewolf()
char.tags('name').append('Joe Smith')
char.tags('dead').touch()
output = template_output(char)
assert '<h3>Joe Smith (Deceased)</h3>' in output
def test_no_dead_section_without_dead_notes(self):
char = Werewolf()
char.tags('name').append('Joe Smith')
output = template_output(char)
assert '<em>Dead:</em>' not in output
def test_has_dead_section_with_dead_notes(self):
char = Werewolf()
char.tags('name').append('Joe Smith')
char.tags('dead').append('fell hard')
output = template_output(char)
assert '<em>Dead:</em> fell hard' in output
def test_titles_on_own_line():
char = Werewolf()
char.tags('title').extend(['title 1', 'title 2'])
output = template_output(char)
assert re.search(r'^<div>title 1, title 2</div>$', output, re.MULTILINE) is not None
def test_types_separated_with_slash():
char = Werewolf()
char.tags('type').extend(['human', 'changeling'])
output = template_output(char)
assert 'human/changeling' in output
def test_locations_appended_to_types():
char = Werewolf()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').append('florida')
char.tags('location').append('orlando')
output = template_output(char)
assert 'human/changeling in florida and orlando' in output
def test_foreign_note_if_foreign():
char = Werewolf()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').touch()
output = template_output(char)
assert 'human/changeling (foreign)' in output
def test_wanderer_note_if_wanderer():
char = Werewolf()
char.tags('type').extend(['human', 'changeling'])
char.tags('foreign').touch()
char.tags('wanderer').touch()
output = template_output(char)
assert 'human/changeling (foreign), Wanderer' in output
class TestPack:
def test_first_pack_on_own_line(self):
char = Werewolf()
char.tags('pack').append('grinners')
output = template_output(char)
assert '<br />grinners Pack' in output
def test_pack_rank_included(self):
char = Werewolf()
char.tags('pack').append('grinners')
char.tags('pack').subtag('grinners').append('scout')
output = template_output(char)
assert 'grinners Pack (scout)' in output
class TestTribe:
def test_first_tribe_appears_with_pack(self):
char = Werewolf()
char.tags('pack').append('Grinners')
char.tags('tribe').append('Blood Talons')
output = template_output(char)
assert 'Grinners Pack, Blood Talons Tribe' in output
def test_missing_tribe_yields_ghost_wolf(self):
char = Werewolf()
char.tags('pack').append('Grinners')
output = template_output(char)
assert 'Grinners Pack, Ghost Wolf' in output
def test_auspices_joined_with_slash():
char = Werewolf()
char.tags('auspice').extend(['Rahu', 'Cahalith'])
output = template_output(char)
assert 'Rahu/Cahalith' in output
def test_lodge_in_own_section():
char = Werewolf()
char.tags('lodge').append('Sky Splitters')
char.tags('lodge').subtag('Sky Splitters').append('Dancer')
output = template_output(char)
assert '<div>Sky Splitters (Dancer)</div>' in output
def test_all_groups_in_own_section():
char = Werewolf()
char.tags('type').append('human')
char.tags('group').append('student council')
char.tags('group').subtag('student council').append('president')
char.tags('group').subtag('student council').append('member')
char.tags('group').append('volleyball')
char.tags('group').subtag('volleyball').append('star')
char.tags('group').append('chess club')
char.tags('group').subtag('chess club').append('newbie')
output = template_output(char)
assert '<div>student council (president, member), volleyball (star), chess club (newbie)</div>' in output
class TestAppearance:
def test_has_section_if_filled(self):
char = Werewolf()
char.tags('appearance').append('grungy')
output = template_output(char)
assert re.search(r'^<p><em>Appearance:</em> grungy</p>$', output, re.MULTILINE) is not None
def test_no_section_if_not_filled(self):
char = Werewolf()
output = template_output(char)
assert '<em>Appearance:</em>' not in output
class TestDescription:
def test_has_section_if_filled(self):
char = Werewolf()
char.tags('description').append('some guy')
output = template_output(char)
assert re.search(r'^<p><em>Notes:</em> some guy</p>$', output, re.MULTILINE) is not None
def test_no_section_if_not_filled(self):
char = Werewolf()
output = template_output(char)
assert '<em>Notes:</em>' not in output
def test_full_sheet_formatting():
char = Werewolf()
char.tags('name').extend(['Bob Herbson', 'Bobbie'])
char.tags('dead').append('Perished in a teleporter accident.')
char.tags('title').append('The Werewolf Guinea Pig')
char.tags('pack').append("Mama's Boys")
char.tags('tribe').append('Glass Walkers')
char.tags('auspice').append('Irraka')
char.tags('lodge').append('Oedipus Lets')
char.tags('location').append('Moontown')
char.tags('wanderer').touch()
char.tags('group').append('Testers')
char.tags('group').subtag('Testers').append('Chief Marshall')
char.tags('group').append('Croquet Team')
char.tags('group').subtag('Croquet Team').append('Water Boy')
char.tags.add_group('motley', 'Moon Morons')
char.tags('motley').subtag('Moon Morons').append('Fixer')
char.tags('appearance').append('Red shirt and a goofy grin.')
char.tags('description').append('Outgoing fella with a shady hobby and no fear of death.')
output = template_output(char)
print(output) # Always print the real output for when things go wrong
expected = """\
<h3>Bob Herbson (Deceased)</h3>
<div><em>AKA Bobbie</em></div>
<div>The Werewolf Guinea Pig</div>
<div>werewolf in Moontown, Wanderer
<br />Mama's Boys Pack, Glass Walkers Tribe</div>
<div>Irraka</div>
<div>Oedipus Lets</div>
<div>Testers (Chief Marshall), Croquet Team (Water Boy)</div>
<p><em>Appearance:</em> Red shirt and a goofy grin.</p>
<p><em>Notes:</em> Outgoing fella with a shady hobby and no fear of death.</p>
<p><em>Dead:</em> Perished in a teleporter accident.</p>
"""
assert output == expected
| {
"content_hash": "812205c6a3e76cf721a4fbcb07ab246f",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 114,
"avg_line_length": 38.584158415841586,
"alnum_prop": 0.6540928919681807,
"repo_name": "aurule/npc",
"id": "20fee8b1bc235a54472d6f20dd9fb046e744d23d",
"size": "7794",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/commands/listing/templates/test_werewolf_html.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1376"
},
{
"name": "Mako",
"bytes": "15932"
},
{
"name": "Python",
"bytes": "455208"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
} |
import json
import os
import sys
import unittest
from api_data_source import (_JSCModel,
_FormatValue,
_GetEventByNameFromEvents)
from branch_utility import ChannelInfo
from extensions_paths import CHROME_EXTENSIONS
from fake_host_file_system_provider import FakeHostFileSystemProvider
from features_bundle import FeaturesBundle
from file_system import FileNotFoundError
from future import Future
from object_store_creator import ObjectStoreCreator
from reference_resolver import ReferenceResolver
from server_instance import ServerInstance
from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES)
from test_data.api_data_source.canned_trunk_fs import CANNED_TRUNK_FS_DATA
from test_file_system import TestFileSystem
from test_util import Server2Path
from third_party.json_schema_compiler.memoize import memoize
def _MakeLink(href, text):
return '<a href="%s">%s</a>' % (href, text)
def _GetType(dict_, name):
for type_ in dict_['types']:
if type_['name'] == name:
return type_
class _FakeAvailabilityFinder(object):
def GetApiAvailability(self, version):
return ChannelInfo('stable', '396', 5)
class _FakeSamplesDataSource(object):
def Create(self, request):
return {}
# Sad irony :(
class _FakeAPIDataSource(object):
def __init__(self, json_data):
self._json = json_data
def Create(self, *args, **kwargs):
return self
def get(self, key, disable_refs=False):
if key not in self._json:
raise FileNotFoundError(key)
return self._json[key]
class _FakeAPIModels(object):
def __init__(self, names):
self._names = names
def GetNames(self):
return self._names
class _FakeTemplateCache(object):
def GetFromFile(self, key):
return Future(value='handlebar %s' % key)
class APIDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = Server2Path('test_data', 'test_json')
server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_TRUNK_FS_DATA, relative_to=CHROME_EXTENSIONS))
file_system = server_instance.host_file_system_provider.GetTrunk()
self._json_cache = server_instance.compiled_fs_factory.ForJson(file_system)
self._features_bundle = FeaturesBundle(file_system,
server_instance.compiled_fs_factory,
server_instance.object_store_creator)
self._api_models = server_instance.api_models
# Used for testGetApiAvailability() so that valid-ish data is processed.
server_instance = ServerInstance.ForTest(
file_system_provider=FakeHostFileSystemProvider(
CANNED_API_FILE_SYSTEM_DATA))
self._avail_api_models = server_instance.api_models
self._avail_json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetTrunk())
self._avail_finder = server_instance.availability_finder
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _CreateRefResolver(self, filename):
test_data = self._LoadJSON(filename)
return ReferenceResolver.Factory(_FakeAPIDataSource(test_data),
_FakeAPIModels(test_data),
ObjectStoreCreator.ForTest()).Create()
def _LoadJSON(self, filename):
return json.loads(self._ReadLocalFile(filename))
def testCreateId(self):
dict_ = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None).ToDict()
self.assertEquals('type-TypeA', dict_['types'][0]['id'])
self.assertEquals('property-TypeA-b',
dict_['types'][0]['properties'][0]['id'])
self.assertEquals('method-get', dict_['functions'][0]['id'])
self.assertEquals('event-EventA', dict_['events'][0]['id'])
# TODO(kalman): re-enable this when we have a rebase option.
def DISABLED_testToDict(self):
expected_json = self._LoadJSON('expected_tester.json')
dict_ = _JSCModel('tester',
self._api_models,
False,
self._CreateRefResolver('test_file_data_source.json'),
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None).ToDict()
self.assertEquals(expected_json, dict_)
def testFormatValue(self):
self.assertEquals('1,234,567', _FormatValue(1234567))
self.assertEquals('67', _FormatValue(67))
self.assertEquals('234,567', _FormatValue(234567))
def testFormatDescription(self):
dict_ = _JSCModel('ref_test',
self._api_models,
self._CreateRefResolver('ref_test_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None).ToDict()
self.assertEquals(_MakeLink('ref_test.html#type-type2', 'type2'),
_GetType(dict_, 'type1')['description'])
self.assertEquals(
'A %s, or %s' % (_MakeLink('ref_test.html#type-type3', 'type3'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type2')['description'])
self.assertEquals(
'%s != %s' % (_MakeLink('other.html#type-type2', 'other.type2'),
_MakeLink('ref_test.html#type-type2', 'type2')),
_GetType(dict_, 'type3')['description'])
def testGetApiAvailability(self):
api_availabilities = {
'bluetooth': ChannelInfo('dev', CANNED_BRANCHES[28], 28),
'contextMenus': ChannelInfo('trunk', CANNED_BRANCHES['trunk'], 'trunk'),
'jsonStableAPI': ChannelInfo('stable', CANNED_BRANCHES[20], 20),
'idle': ChannelInfo('stable', CANNED_BRANCHES[5], 5),
'input.ime': ChannelInfo('stable', CANNED_BRANCHES[18], 18),
'tabs': ChannelInfo('stable', CANNED_BRANCHES[18], 18)
}
for api_name, availability in api_availabilities.iteritems():
model = _JSCModel(api_name,
self._avail_api_models,
None,
True,
self._avail_finder,
self._avail_json_cache,
_FakeTemplateCache(),
self._features_bundle,
None)
self.assertEquals(availability, model._GetApiAvailability())
def testGetIntroList(self):
model = _JSCModel('tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None)
expected_list = [
{ 'title': 'Description',
'content': [
{ 'text': 'a test api' }
]
},
{ 'title': 'Availability',
'content': [
{ 'partial': 'handlebar chrome/common/extensions/docs/' +
'templates/private/intro_tables/stable_message.html',
'version': 5
}
]
},
{ 'title': 'Permissions',
'content': [
{ 'class': 'override',
'text': '"tester"'
},
{ 'text': 'is an API for testing things.' }
]
},
{ 'title': 'Manifest',
'content': [
{ 'class': 'code',
'text': '"tester": {...}'
}
]
},
{ 'title': 'Learn More',
'content': [
{ 'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
]
self.assertEquals(model._GetIntroTableList(), expected_list)
def testGetEventByNameFromEvents(self):
events = {}
# Missing 'types' completely.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'] = []
# No type 'Event' defined.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
events['types'].append({ 'name': 'Event',
'functions': []})
add_rules = { "name": "addRules" }
events['types'][0]['functions'].append(add_rules)
self.assertEqual(add_rules,
_GetEventByNameFromEvents(events)['addRules'])
events['types'][0]['functions'].append(add_rules)
# Duplicates are an error.
self.assertRaises(AssertionError, _GetEventByNameFromEvents, events)
def _FakeLoadAddRulesSchema(self):
events = self._LoadJSON('add_rules_def_test.json')
return _GetEventByNameFromEvents(events)
def testAddRules(self):
dict_ = _JSCModel('add_rules_tester',
self._api_models,
self._CreateRefResolver('test_file_data_source.json'),
False,
_FakeAvailabilityFinder(),
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
self._FakeLoadAddRulesSchema).ToDict()
# Check that the first event has the addRulesFunction defined.
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('rules', dict_['events'][0]['name'])
self.assertEquals('notable_name_to_check_for',
dict_['events'][0]['byName']['addRules'][
'parameters'][0]['name'])
# Check that the second event has addListener defined.
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('callback',
dict_['events'][0]['byName']['addListener'][
'parameters'][0]['name'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "bc07a70e0701bb86436864b504dee1b1",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 80,
"avg_line_length": 35.94845360824742,
"alnum_prop": 0.5758531689131058,
"repo_name": "patrickm/chromium.src",
"id": "e842576f633e2a6bdc6f37f6529fc99318ade7a0",
"size": "10650",
"binary": false,
"copies": "1",
"ref": "refs/heads/nw",
"path": "chrome/common/extensions/docs/server2/api_data_source_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40737238"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "207930633"
},
{
"name": "CSS",
"bytes": "939170"
},
{
"name": "Java",
"bytes": "5844934"
},
{
"name": "JavaScript",
"bytes": "17837835"
},
{
"name": "Mercury",
"bytes": "10533"
},
{
"name": "Objective-C",
"bytes": "886228"
},
{
"name": "Objective-C++",
"bytes": "6667789"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10857933"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1326032"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
'''
Simple pull of account info
'''
import requests
import datetime
import pickle
import json
import time
import sys
account_url = 'https://api.toodledo.com/3/account/get.php?access_token='
tasks_get_url = 'https://api.toodledo.com/3/tasks/get.php?access_token='
'''
Fields you can use to filter when you get tasks:
https://api.toodledo.com/3/tasks/index.php under "Task Datatypes"
'''
def load_token(token):
token = pickle.load( open(token, 'rb'))
return token
def sync(token):
token = load_token(token)
get_account = requests.get('{}{}'.format(account_url, token['access_token']))
#cur_task = int(get_account.text['lastedit_task'])
return get_account.text
def query_tasks(token, days, completion_state='1', fields='tag,context,goal'):
token = load_token(token)
# Get Tasks from Monday (ie 4 days ago since we cron for friday)
start_date = datetime.date.today() - datetime.timedelta(days=days)
# Make it Epoch Time
start_date = int(time.mktime(start_date.timetuple()))
start_date = str(start_date)
# Get ALL tasks from start_date'
# Comp codes -- 1 == completed, 0 == incomplete, -1 == both
get_tasks = requests.get('{}{}&after={}&comp={}&fields={}'.format(tasks_get_url, token['access_token'], start_date, completion_state, fields))
pickle.dump(get_tasks.text, open('tasks_queried.pkl', 'wb'))
return get_tasks.text
def parse_to_json(response):
data = pickle.load(open(response, 'rb'))
return json.loads(data)
def arrange_date(epoch_time):
completion = time.strftime('%A, %b %d, %Y', time.gmtime(epoch_time))
return completion
def display_tasks(task_dump, context_pickle, days=4):
task_dump = parse_to_json(task_dump)
contexts = make_context_hash(context_pickle)
start_date = datetime.date.today() - datetime.timedelta(days=days)
start_date = datetime.date.strftime(start_date, '%A, %b %d, %Y')
end_date = datetime.date.today()
end_date = datetime.date.strftime(end_date, '%A, %b %d, %Y')
print 'Tasks Created between {} and {}.'.format(start_date, end_date)
print 'Total Tasks: ', task_dump[0]['total']
for i in range(len(task_dump)):
#print task_dump[i]
# print contexts
if 'completed' in task_dump[i]:
if task_dump[i]['completed'] == 0:
print 'Incomplete Task: {}'.format(task_dump[i]['title'])
elif contexts[task_dump[i]['context']] != 'Standing Meeting':
comp_date = arrange_date(task_dump[i]['completed'])
print 'Completed Task : {}, Completed {}'.format(task_dump[i]['title'], comp_date)
else:
pass
#test = display_tasks('tasks_queried.pkl', 4)
def format_task(task):
'''
Take a dictionary formatted task from display tasks and print it
out to something human readable.
'''
comp_date = arrange_date(task['completed'])
print 'Completed Task : {}, Completed {}'.format(task['title'], comp_date)
def get_completed_tasks():
query = query_tasks('auth_token.pkl', 4, '1')
return query
def get_incomplete_tasks():
query = query_tasks('auth_token.pkl', 4, '0')
return query
def get_all_tasks():
query = query_tasks('auth_token.pkl', 4, '-1')
return query
def get_defined_list_ids(token, defined_list):
valid_lists = ['goals', 'contexts']
if defined_list.lower() not in valid_lists:
print 'Not a valid user defined list, exiting...'
sys.exit(2)
token = load_token(token)
query = requests.get('http://api.toodledo.com/3/{}/get.php?access_token={}'.format(defined_list, token['access_token']))
pickle.dump(query.text, open('{}_queried.pkl'.format(defined_list), 'wb'))
return query.text
def make_context_hash(defined_list_pickle):
contexts = pickle.load( open(defined_list_pickle, 'rb'))
contexts = json.loads(contexts)
out = {}
for i in range(len(contexts)):
out[contexts[i]['id']] = contexts[i]['name']
return out
#tasks = get_completed_tasks()
#print tasks
if __name__ == '__main__':
tdump = display_tasks('tasks_queried.pkl', 4)
| {
"content_hash": "6ca577f50f901418aa18f5f6303e260d",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 143,
"avg_line_length": 32.567796610169495,
"alnum_prop": 0.6885245901639344,
"repo_name": "gadeleon/toodledo_cli_client",
"id": "c923c871088626efd096d2ec8fe327555a5ee4ad",
"size": "3843",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toodle_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23045"
}
],
"symlink_target": ""
} |
from IPython.display import Image
import sys
# ## Weighting functions in the $CO_2$ 15 $\mu m$ absorption band
# Below is a plot of radiance (or intensity) (left axis) and brightness temperature (right axis) vs. wavenumber near the main $CO_2$ absorption band. Wavenumber is defined as $1/\lambda$; the center of the absorption band is at $\lambda = 15\ \mu m$ which is a wavenumber of 1/0.0015 = 666 $cm^{-1}$. The VTPR (vertical temperature profiling radiometer) has six channels: Channel 1 is at the center of the band -- it has the lowest transmissivity and is measuring photons coming from 45 km at the top of the stratosphere (See [Stull Chapter 1, Figure 1.10](https://www.eoas.ubc.ca/books/Practical_Meteorology/prmet101/Ch01-Atmos-v101.pdf)). As the channel number increases from 2-6 the transmissivity also increases, and the photons originate from increasing lower levels of the atmosphere with increasing kinetic temperatures. Note that the different heights for the peaks in each of the weighting functions.
# In[10]:
Image('figures/wallace4_33.png',width=500)
# ### Assignment 9
#
# This notebook uses the five standard atmospheres from [hydrostatic.ipynb](http://clouds.eos.ubc.ca/~phil/courses/atsc301/html/hydrostatic.html) to show how to use Stull eq. 8.4 to calculate radiance at the top of the atmosphere for wavelengths similar to the 6 sounder channels shown in the above figure. I define a new function **find_tau** to calculate the optical thickness for a $CO_2$-like absorbing gas, and get the weighting functions at 7 wavelengths from the transmission. I then ask you to plot the brightness temperatures at each wavelength for each of the five standard atmospheres.
# In[11]:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticks
from a301utils.a301_readfile import download
import h5py
#
# from the hydrostatic noteboo
#
from a301lib.thermo import calcDensHeight
# In[12]:
filename='std_soundings.h5'
download(filename)
# In[13]:
from pandas import DataFrame
with h5py.File(filename) as infile:
sound_dict={}
print('soundings: ',list(infile.keys()))
#
# names are separated by commas, so split them up
# and strip leading blanks
#
column_names=infile.attrs['variable_names'].split(',')
column_names = [item.strip() for item in column_names]
column_units = infile.attrs['units'].split(',')
column_units = [item.strip() for item in column_units]
for name in infile.keys():
data = infile[name][...]
sound_dict[name]=DataFrame(data,columns=column_names)
# ### mass absorption coefficient for fake $CO_2$
#
# To keep things simple I'm going to make up a set of 7 absorption coefficients that will give weighting functions that look something like the VPTR.
#
# We have been working with the volume absorption coefficient:
#
# $$\beta_\lambda = n b\ (m^{-1})$$ where $n$ is the absorber number concentration in $\#\,m^{-3}$ and $b$ is the absorption cross section of a molecule in $m^2$. For absorbing gasses like $CO_2$ that obey the ideal gas law $n$ depends inversely on temperature -- which changes rapidly with height.
#
# A better route is to use the **mass absorption coefficient** $k_\lambda$:
#
# $$k_\lambda = \frac{n b}{\rho_{air}}$$
#
# units: $m^2/kg$. With this definition the optical depth is:
#
# $$\tau_\lambda = \int_0^z \rho_{air} k_\lambda dz$$
#
# Why is this an improvement? We now have an absorption coefficient that can is roughly constant with height and can be taken out of the integral.
#
# In[14]:
Rd = 287.
def find_tau(r_gas,k_lambda,df):
"""
given a data frame df with a standard sounding, return the optical depth assuming an
absorbing gas with a constant mixing ration r_gas and absorption coefficient k_lambda
Parameters
----------
r_gas: float
absorber mixing ratio, kg/kg
k_lambda: float
mass absorption coefficient, m^2/kg
df: dataframe
sounding with height levels as rows, columns 'temp': temperature in K
'press': pressure in Pa
Returns
-------
tau: vector (float)
optical depth measured from the surface at each height in df
"""
#
# density scale height
#
Hdens = calcDensHeight(df)
#
# surface density
#
rho0 = df['press']/(Rd*df['temp'])
rho = rho0*np.exp(-df['z']/Hdens)
height = df['z'].values
tau=np.empty_like(rho)
#
# start from the surface
#
tau[0]=0
num_levels=len(rho)
num_layers=num_levels-1
for index in range(num_layers):
delta_z=height[index+1] - height[index]
delta_tau=r_gas*rho[index]*k_lambda*delta_z
tau[index+1]=tau[index] + delta_tau
return tau
# In[15]:
get_ipython().magic('matplotlib inline')
r_gas=0.01 #kg/kg
k_lambda=0.01 #m^2/kg
df=sound_dict['tropics']
top = 20.e3
df = df.loc[df['z']<top]
height = df['z']
press = df['press']
tau=find_tau(r_gas,k_lambda,df)
fig1,axis1=plt.subplots(1,1)
axis1.plot(tau,height*1.e-3)
axis1.set_title('vertical optical depth vs. height')
axis1.set_ylabel('height (km)')
axis1.set_xlabel('optical depth (no units)')
fig2,axis2=plt.subplots(1,1)
axis2.plot(tau,press*1.e-3)
axis2.invert_yaxis()
axis2.set_title('vertical optical depth vs. pressure')
axis2.set_ylabel('pressure (kPa)')
axis2.set_xlabel('optical depth (no units)')
# In[16]:
r_gas=0.01 #kg/kg
#
# assign the 7 k_lambdas to 7 CO2 absorption band wavelengths
# (see Wallace and Hobbs figure 4.33)
#
wavenums=np.linspace(666,766,7) #wavenumbers in cm^{-1}
wavelengths=1/wavenums*1.e-2 #wavelength in m
wavelengths_um = wavelengths*1.e6 # in microns
print('channel wavelengths (microns) ',wavelengths_um) #microns
df=sound_dict['tropics']
top = 20.e3 #stop at 20 km
df = df.loc[df['z']< top]
height = df['z'].values
mid_height = (height[1:] - height[:-1])/2.
#
# here are the mass absorption coefficients for each of the 7 wavelengths
# in m^2/kg
#
k_lambda_list=np.array([ 0.175 , 0.15 , 0.125 , 0.1 , 0.075, 0.05 , 0.025])
legend_string=["{:5.3f}".format(item) for item in k_lambda_list]
#
# make a list of tuples of k_lambda and its label
# using zip
#
k_vals=zip(k_lambda_list,legend_string)
#
# find the height at mid-layer
#
mid_height=(height[1:] + height[:-1])/2.
fig1,axis_list=plt.subplots(2,2,figsize=(14,14))
#
# turn axis_list from 2 x 2 to 4 x 1
#
axis_list=np.array(axis_list).ravel()
#
# throw away the 4th plot using delaxes -- only plotting 3 graphs
#
fig1.delaxes(axis_list[3])
axis_list=axis_list[:3]
heightkm=height*1.e-3
mid_heightkm=mid_height*1.e-3
for k_lambda,k_label in k_vals:
tau=find_tau(r_gas,k_lambda,df)
tau_tot=tau[-1]
axis_list[0].plot(tau,heightkm,label=k_label)
trans=np.exp(-(tau_tot - tau))
axis_list[1].plot(trans,heightkm,label=k_label)
del_trans=np.diff(trans)
axis_list[2].plot(del_trans,mid_heightkm,label=k_label)
titles = ['optical depth for 7 values of $k_\lambda$',
'transmittance for 7 values of $k_\lambda$',
'weighting function for 7 values of $k_\lambda$']
xlabels = ['optical depth',
'transmittance',
'weighting function']
for axis,title,xlabel in zip(axis_list,titles,xlabels):
axis.set(title=title,xlabel=xlabel)
[axis.set_ylabel('height (km)') for axis in axis_list]
_=[axis.legend(loc='best') for axis in axis_list]
fig1.savefig('trans_plots.png')
# In[17]:
from collections import defaultdict
from a301lib.radiation import Blambda,planckInvert
Tbright=defaultdict(list)
for the_sound,the_df in sound_dict.items():
for the_wave_m,the_k in zip(wavelengths,k_lambda_list):
tau = find_tau(r_gas,the_k,the_df)
tau_tot=tau[-1]
temps=the_df['temp'].values
Bsfc=Blambda(the_wave_m,temps[0])
sfc_contrib = Bsfc*np.exp(-tau_tot)
mid_temps=(temps[1:] + temps[:-1])/2.
Batm=Blambda(the_wave_m,mid_temps)
trans = np.exp(-(tau_tot - tau))
weights = np.diff(trans)
atm_contrib = np.sum(Batm*weights)
Lsat = sfc_contrib + atm_contrib
the_temp = planckInvert(the_wave_m,Lsat)
Tbright[the_sound].append(the_temp)
Tbright[the_sound]=np.array(Tbright[the_sound])
fig, ax = plt.subplots(1,1)
the_wave_um = wavelengths*1.e6
for the_sound,the_temps in Tbright.items():
ax.plot(the_wave_um,the_temps,label=the_sound)
ax.legend(loc='best')
ax.set(ylabel="Tbright (K)",xlabel='wavelength (microns)')
print(the_wave_um)
# In[ ]:
| {
"content_hash": "b84c76b3120ce33d9d789b95639994b4",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 914,
"avg_line_length": 34.318548387096776,
"alnum_prop": 0.6832334625778405,
"repo_name": "a301-teaching/a301_code",
"id": "30de6af0786a1330cedc66cd9cf159d820f502bd",
"size": "8564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/python/rad_calc_solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "69"
},
{
"name": "Jupyter Notebook",
"bytes": "54523739"
},
{
"name": "Python",
"bytes": "467164"
},
{
"name": "Shell",
"bytes": "314"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(name='nlcd',
packages=['nlcd'],
version='0.2',
description='This library provides some helpers related to the National Land Cover Database (NLCD). The NLCD is created by the Multi-Resolution Land Characteristics Consortium (MRLC) and is available at: https://www.mrlc.gov/',
author='Zac Miller',
author_email='zac@informatical.info',
url='https://github.com/jzmiller1/nlcd',
download_url='https://github.com/jzmiller1/nlcd/archive/0.2.tar.gz',
keywords=['NLCD', 'MRLC', 'Remote Sensing', 'Landcover', 'Land Cover'],
classifiers=[],
)
| {
"content_hash": "04ae6d6395cf964041b6edd95c32c3d0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 234,
"avg_line_length": 48.84615384615385,
"alnum_prop": 0.6755905511811023,
"repo_name": "jzmiller1/nlcd",
"id": "4a4b7fb6c2c25d40555b47728da0cfd005106311",
"size": "635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2665"
}
],
"symlink_target": ""
} |
'''Jamenson Compiler Front End
'''
from __future__ import absolute_import
from __future__ import with_statement
from hlab.pathutils import FilePath
from optparse import OptionParser
from .util import err, msg, vmsg, vemit, set_verbosity, get_prog_name, evaling_context
from ..compiler.util import timing
from ..compiler.block import BlockCompiler
from ..runtime.require import state as require_state
from ..runtime import state as runtime_state
from ..runtime.symbol import get_package, use_package
from ..runtime.compiled import write_code
def main():
global config
config = configure()
if not config.nouser:
#populate user package through side effects
from ..core import user
for target in config.targets:
compile_target(target)
class vtiming(timing):
def __init__(self, v, name):
super(vtiming, self).__init__(name)
self.v = v
vemit(self.v, '%s: %s: ' % (get_prog_name(), self.name,))
def emit(self):
vemit(self.v, self.strtime + '\n')
def compile_target(target):
dest = get_dest_path(target)
with evaling_context(FilePath(target) if target != '-' else None, config.package):
compiler = FrontEndCompiler.create(filename=target.abspath())
vmsg(0, 'compiling %s', target)
with timing() as time:
code = compiler.construct_code()
vmsg(1, 'elapsed time %s', time.strtime)
write_code(code, dest)
def get_dest_path(target):
base = target.basename()
if base.endswith('.jms'):
base = base.rsplit('.',1)[0]
return target.sibling(base + '.jmc')
def configure():
parser = OptionParser()
parser = OptionParser(usage='%prog [OPTIONS] [TARGETS]',
epilog=__doc__,
add_help_option=False)
parser.add_option('-?','--help',
action='help',
help='show this help message and exit')
parser.add_option('-v','--verbose',
dest='verbosity',
default=0,
action='count',
help='increment extent of messages durring compiling')
parser.add_option('-p','--package',
dest='package',
default=None,
action='store',
metavar='PACKAGE',
help='specify initial package for compiling')
parser.add_option('--nouser',
dest='nouser',
default=False,
action='store_true',
help="don't populate user package")
options,args = parser.parse_args()
targets = []
for arg in args:
target = FilePath(arg)
if not target.exists():
msg('skipping non-existent file %s', target)
continue
targets.append(target)
if not targets:
msg('no targets specified')
options.targets = targets
set_verbosity(options.verbosity)
return options
class FrontEndCompiler(BlockCompiler):
def read_all_top_level_forms(self):
vmsg(1, 'loading forms')
with timing() as time:
op = super(FrontEndCompiler, self).read_all_top_level_forms()
vmsg(1, 'loaded forms in %s', time.strtime)
return op
def read_and_translate_form(self):
self.stream.strip_whitespace()
with vtiming(2, 'load %s.%d' % (self.stream.filename.basename(), self.stream.lineno)):
return super(FrontEndCompiler, self).read_and_translate_form()
def block_transform(self, top_expr):
with vtiming(1, 'transforming top level expressions'):
return super(FrontEndCompiler, self).block_transform(top_expr)
def compile_top_expr(self, top_expr):
with vtiming(1, 'generating code'):
return super(FrontEndCompiler, self).compile_top_expr(top_expr)
| {
"content_hash": "386520a415c0f51c7e557696e2950a23",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 94,
"avg_line_length": 34.10434782608696,
"alnum_prop": 0.5915349311575727,
"repo_name": "matthagy/Jamenson",
"id": "e1ee1204f8bf6b80a71430b8fb5cb1f6474037b2",
"size": "3922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jamenson/bin/Attic/jmc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "444789"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerPort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container_port': 'int',
'host_ip': 'str',
'host_port': 'int',
'name': 'str',
'protocol': 'str'
}
attribute_map = {
'container_port': 'containerPort',
'host_ip': 'hostIP',
'host_port': 'hostPort',
'name': 'name',
'protocol': 'protocol'
}
def __init__(self, container_port=None, host_ip=None, host_port=None, name=None, protocol=None):
"""
V1ContainerPort - a model defined in Swagger
"""
self._container_port = None
self._host_ip = None
self._host_port = None
self._name = None
self._protocol = None
self.discriminator = None
self.container_port = container_port
if host_ip is not None:
self.host_ip = host_ip
if host_port is not None:
self.host_port = host_port
if name is not None:
self.name = name
if protocol is not None:
self.protocol = protocol
@property
def container_port(self):
"""
Gets the container_port of this V1ContainerPort.
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:return: The container_port of this V1ContainerPort.
:rtype: int
"""
return self._container_port
@container_port.setter
def container_port(self, container_port):
"""
Sets the container_port of this V1ContainerPort.
Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
:param container_port: The container_port of this V1ContainerPort.
:type: int
"""
if container_port is None:
raise ValueError("Invalid value for `container_port`, must not be `None`")
self._container_port = container_port
@property
def host_ip(self):
"""
Gets the host_ip of this V1ContainerPort.
What host IP to bind the external port to.
:return: The host_ip of this V1ContainerPort.
:rtype: str
"""
return self._host_ip
@host_ip.setter
def host_ip(self, host_ip):
"""
Sets the host_ip of this V1ContainerPort.
What host IP to bind the external port to.
:param host_ip: The host_ip of this V1ContainerPort.
:type: str
"""
self._host_ip = host_ip
@property
def host_port(self):
"""
Gets the host_port of this V1ContainerPort.
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:return: The host_port of this V1ContainerPort.
:rtype: int
"""
return self._host_port
@host_port.setter
def host_port(self, host_port):
"""
Sets the host_port of this V1ContainerPort.
Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
:param host_port: The host_port of this V1ContainerPort.
:type: int
"""
self._host_port = host_port
@property
def name(self):
"""
Gets the name of this V1ContainerPort.
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
:return: The name of this V1ContainerPort.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1ContainerPort.
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.
:param name: The name of this V1ContainerPort.
:type: str
"""
self._name = name
@property
def protocol(self):
"""
Gets the protocol of this V1ContainerPort.
Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".
:return: The protocol of this V1ContainerPort.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this V1ContainerPort.
Protocol for port. Must be UDP or TCP. Defaults to \"TCP\".
:param protocol: The protocol of this V1ContainerPort.
:type: str
"""
self._protocol = protocol
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerPort):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "9f3e89554406cb3becc8f9ce029bbddd",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 202,
"avg_line_length": 29.261603375527425,
"alnum_prop": 0.5674116798846431,
"repo_name": "mbohlool/client-python",
"id": "7f13e602d59322c507a71ad2a35c66750c1b37c7",
"size": "6952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_container_port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8417639"
},
{
"name": "Shell",
"bytes": "16830"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import taggit.managers
import django_extensions.db.fields
import django_extensions.db.fields.json
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('handle', models.CharField(max_length=70, null=True, blank=True)),
('client', models.CharField(max_length=70, null=True, blank=True)),
],
),
migrations.CreateModel(
name='ContactInfo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('lookup_email', models.EmailField(unique=True, max_length=254)),
('request_id', models.CharField(max_length=255, null=True, blank=True)),
('likelihood', models.FloatField(null=True, blank=True)),
('family_name', models.CharField(max_length=35, null=True, blank=True)),
('given_name', models.CharField(max_length=35, null=True, blank=True)),
('full_name', models.CharField(max_length=70, null=True, blank=True)),
('raw_json', django_extensions.db.fields.json.JSONField()),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
options={
'verbose_name_plural': 'Contact information',
},
),
migrations.CreateModel(
name='Demographic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location_general', models.CharField(max_length=255, null=True, blank=True)),
('normalized_location', models.CharField(max_length=255, null=True, blank=True)),
('deduced_location', models.CharField(max_length=255, null=True, blank=True)),
('city', models.CharField(max_length=70, null=True, blank=True)),
('city_deduced', models.NullBooleanField()),
('state_name', models.CharField(max_length=255, null=True, blank=True)),
('state_code', models.CharField(max_length=255, null=True, blank=True)),
('state_deduced', models.NullBooleanField()),
('location_likelihood', models.FloatField(null=True, blank=True)),
('age', models.CharField(max_length=255, null=True, blank=True)),
('gender', models.CharField(max_length=255, null=True, blank=True)),
('age_range', models.CharField(max_length=255, null=True, blank=True)),
('contact', models.ForeignKey(related_name='demographics', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, null=True, blank=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('start_date', models.DateField(null=True, blank=True)),
('end_date', models.DateField(null=True, blank=True)),
('is_primary', models.NullBooleanField()),
('is_current', models.NullBooleanField()),
('contact', models.ForeignKey(related_name='organizations', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type_id', models.CharField(max_length=255, null=True, blank=True)),
('type_name', models.CharField(max_length=255, null=True, blank=True)),
('url', models.URLField(null=True, blank=True)),
('is_primary', models.NullBooleanField()),
('contact', models.ForeignKey(related_name='photos', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255, null=True, blank=True)),
('type', models.CharField(max_length=255, null=True, blank=True)),
('provider', models.CharField(max_length=255, null=True, blank=True)),
('contact', models.ForeignKey(related_name='scores', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='SocialProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type_id', models.CharField(max_length=255, null=True, blank=True)),
('type_name', models.CharField(max_length=255, null=True, blank=True)),
('profile_id', models.CharField(max_length=255, null=True, blank=True)),
('username', models.CharField(max_length=255, null=True, blank=True)),
('url', models.URLField(null=True, blank=True)),
('bio', models.TextField(null=True, blank=True)),
('rss', models.CharField(max_length=255, null=True, blank=True)),
('following', models.IntegerField(null=True, blank=True)),
('followers', models.IntegerField(null=True, blank=True)),
('contact', models.ForeignKey(related_name='social_profiles', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('value', models.CharField(max_length=255, null=True, blank=True)),
('provider', models.CharField(max_length=255, null=True, blank=True)),
('contact', models.ForeignKey(related_name='topics', to='cyfullcontact.ContactInfo')),
],
),
migrations.CreateModel(
name='Website',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField()),
('contact', models.ForeignKey(related_name='websites', to='cyfullcontact.ContactInfo')),
],
),
migrations.AddField(
model_name='chat',
name='contact',
field=models.ForeignKey(related_name='chats', to='cyfullcontact.ContactInfo'),
),
]
| {
"content_hash": "895a4b96b0ff6859c8bca072903c5076",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 182,
"avg_line_length": 56.11851851851852,
"alnum_prop": 0.5789334741288279,
"repo_name": "shawnhermans/cyborgcrm",
"id": "16d2746de3a91218b1dfd51bfa91147e8cf3735e",
"size": "7600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyidentity/cyfullcontact/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "26682"
},
{
"name": "DIGITAL Command Language",
"bytes": "667"
},
{
"name": "HTML",
"bytes": "405415"
},
{
"name": "JavaScript",
"bytes": "735"
},
{
"name": "Python",
"bytes": "100893"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('members', '0013_merge_20170811_1500'),
]
operations = [
migrations.AlterModelOptions(
name='member',
options={'permissions': (('api_read_member', 'Can view members through the API'), ('api_read_full_member', 'Can view members through the API'), ('api_add_member', 'Can add members through the API'), ('api_change_member', 'Can change members through the API'), ('api_delete_member', 'Can delete members through the API')), 'verbose_name': 'member', 'verbose_name_plural': 'members'},
),
]
| {
"content_hash": "7801dc6b1912ddeed07f3d750cd5a270",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 394,
"avg_line_length": 40.529411764705884,
"alnum_prop": 0.6502177068214804,
"repo_name": "onepercentclub/bluebottle",
"id": "319a60e98faead39ec658108adce0b928750b6c5",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/members/migrations/0014_auto_20170816_1614.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import flavio
import math
s = 1.519267515435317e+24
ps = 1e-12*s
class TestIntegrate(unittest.TestCase):
def test_nintegrate(self):
# integrate sin(x) from 0 to 2
xmin = 0
xmax = 2
val = 2*math.sin(1)**2
self.assertAlmostEqual(flavio.math.integrate.nintegrate(math.sin, xmin, xmax), val, delta=0.01*val)
self.assertAlmostEqual(flavio.math.integrate.nintegrate_fast(math.sin, xmin, xmax), val, delta=0.01*val)
| {
"content_hash": "f2e05fe552d195947ddeac0edd4f3f14",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 112,
"avg_line_length": 29.352941176470587,
"alnum_prop": 0.6793587174348698,
"repo_name": "flav-io/flavio",
"id": "7d44e335e7873cdc8248702dee61dc9cedda19ee",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flavio/math/test_integrate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "54358"
},
{
"name": "Python",
"bytes": "1094859"
},
{
"name": "Shell",
"bytes": "230"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: influxdb_user
short_description: Manage InfluxDB users
description:
- Manage InfluxDB users
version_added: 2.5
author: "Vitaliy Zhhuta (@zhhuta)"
requirements:
- "python >= 2.6"
- "influxdb >= 0.9"
options:
user_name:
description:
- Name of the user.
required: True
user_password:
description:
- Password to be set for the user.
required: false
admin:
description:
- Whether the user should be in the admin role or not.
- Since version 2.8, the role will also be updated.
default: no
type: bool
state:
description:
- State of the user.
choices: [ present, absent ]
default: present
grants:
description:
- Privileges to grant to this user. Takes a list of dicts containing the
"database" and "privilege" keys.
- If this argument is not provided, the current grants will be left alone.
If an empty list is provided, all grants for the user will be removed.
version_added: 2.8
extends_documentation_fragment: influxdb
'''
EXAMPLES = '''
- name: Create a user on localhost using default login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
- name: Create a user on localhost using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create an admin user on a remote host using custom login credentials
influxdb_user:
user_name: john
user_password: s3cr3t
admin: yes
hostname: "{{ influxdb_hostname }}"
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
- name: Create a user on localhost with privileges
influxdb_user:
user_name: john
user_password: s3cr3t
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
grants:
- database: 'collectd'
privilege: 'WRITE'
- database: 'graphite'
privilege: 'READ'
- name: Destroy a user using custom login credentials
influxdb_user:
user_name: john
login_username: "{{ influxdb_username }}"
login_password: "{{ influxdb_password }}"
state: absent
'''
RETURN = '''
#only defaults
'''
import ansible.module_utils.urls
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.influxdb as influx
def find_user(module, client, user_name):
user_result = None
try:
users = client.get_list_users()
for user in users:
if user['user'] == user_name:
user_result = user
break
except (ansible.module_utils.urls.ConnectionError, influx.exceptions.InfluxDBClientError) as e:
module.fail_json(msg=to_native(e))
return user_result
def check_user_password(module, client, user_name, user_password):
try:
client.switch_user(user_name, user_password)
client.get_list_users()
except influx.exceptions.InfluxDBClientError as e:
if e.code == 401:
return False
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=to_native(e))
finally:
# restore previous user
client.switch_user(module.params['username'], module.params['password'])
return True
def set_user_password(module, client, user_name, user_password):
if not module.check_mode:
try:
client.set_user_password(user_name, user_password)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=to_native(e))
def create_user(module, client, user_name, user_password, admin):
if not module.check_mode:
try:
client.create_user(user_name, user_password, admin)
except ansible.module_utils.urls.ConnectionError as e:
module.fail_json(msg=to_native(e))
def drop_user(module, client, user_name):
if not module.check_mode:
try:
client.drop_user(user_name)
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
module.exit_json(changed=True)
def set_user_grants(module, client, user_name, grants):
changed = False
try:
current_grants = client.get_list_privileges(user_name)
# Fix privileges wording
for i, v in enumerate(current_grants):
if v['privilege'] == 'ALL PRIVILEGES':
v['privilege'] = 'ALL'
current_grants[i] = v
elif v['privilege'] == 'NO PRIVILEGES':
del(current_grants[i])
# check if the current grants are included in the desired ones
for current_grant in current_grants:
if current_grant not in grants:
if not module.check_mode:
client.revoke_privilege(current_grant['privilege'],
current_grant['database'],
user_name)
changed = True
# check if the desired grants are included in the current ones
for grant in grants:
if grant not in current_grants:
if not module.check_mode:
client.grant_privilege(grant['privilege'],
grant['database'],
user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=e.content)
return changed
def main():
argument_spec = influx.InfluxDb.influxdb_argument_spec()
argument_spec.update(
state=dict(default='present', type='str', choices=['present', 'absent']),
user_name=dict(required=True, type='str'),
user_password=dict(required=False, type='str', no_log=True),
admin=dict(default='False', type='bool'),
grants=dict(type='list')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
state = module.params['state']
user_name = module.params['user_name']
user_password = module.params['user_password']
admin = module.params['admin']
grants = module.params['grants']
influxdb = influx.InfluxDb(module)
client = influxdb.connect_to_influxdb()
user = find_user(module, client, user_name)
changed = False
if state == 'present':
if user:
if not check_user_password(module, client, user_name, user_password) and user_password is not None:
set_user_password(module, client, user_name, user_password)
changed = True
try:
if admin and not user['admin']:
client.grant_admin_privileges(user_name)
changed = True
elif not admin and user['admin']:
client.revoke_admin_privileges(user_name)
changed = True
except influx.exceptions.InfluxDBClientError as e:
module.fail_json(msg=to_native(e))
else:
user_password = user_password or ''
create_user(module, client, user_name, user_password, admin)
changed = True
if grants is not None:
if set_user_grants(module, client, user_name, grants):
changed = True
module.exit_json(changed=changed)
if state == 'absent':
if user:
drop_user(module, client, user_name)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| {
"content_hash": "4cfb2d1295b1b0dd7b5b031d2c881388",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 111,
"avg_line_length": 31.19685039370079,
"alnum_prop": 0.6091620393740536,
"repo_name": "thaim/ansible",
"id": "ed0728cbfa0cca7edc10a28b76f3c5aa5240ae1e",
"size": "8172",
"binary": false,
"copies": "6",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/database/influxdb/influxdb_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PasswordReset'
db.create_table('accounts_passwordreset', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(related_name='password_resets', to=orm['accounts.Account'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('last_sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal('accounts', ['PasswordReset'])
# Adding model 'Account'
db.create_table('accounts_account', (
('user_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True, primary_key=True)),
('sex', self.gf('django.db.models.fields.CharField')(max_length=1)),
('birthday', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal('accounts', ['Account'])
# Adding model 'EmailConfirmation'
db.create_table('accounts_emailconfirmation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(related_name='email_confirmations', to=orm['accounts.Account'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('key', self.gf('django.db.models.fields.CharField')(max_length=40)),
('last_sent', self.gf('django.db.models.fields.DateTimeField')(null=True)),
))
db.send_create_signal('accounts', ['EmailConfirmation'])
def backwards(self, orm):
# Deleting model 'PasswordReset'
db.delete_table('accounts_passwordreset')
# Deleting model 'Account'
db.delete_table('accounts_account')
# Deleting model 'EmailConfirmation'
db.delete_table('accounts_emailconfirmation')
models = {
'accounts.account': {
'Meta': {'object_name': 'Account', '_ormbases': ['auth.User']},
'birthday': ('django.db.models.fields.DateField', [], {}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'accounts.emailconfirmation': {
'Meta': {'object_name': 'EmailConfirmation'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'email_confirmations'", 'to': "orm['accounts.Account']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'accounts.passwordreset': {
'Meta': {'object_name': 'PasswordReset'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'password_resets'", 'to': "orm['accounts.Account']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'last_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| {
"content_hash": "68610273daea1473ec11481ba0226ce4",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 182,
"avg_line_length": 62.018181818181816,
"alnum_prop": 0.5696276751685723,
"repo_name": "softak/webfaction_demo",
"id": "c59837656e70bb0d95eb72abf7174b7bb84e1372",
"size": "6840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/accounts/migrations/0003_auto__add_passwordreset__add_account__add_emailconfirmation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "33283"
},
{
"name": "JavaScript",
"bytes": "984889"
},
{
"name": "Python",
"bytes": "8055804"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
from collections import namedtuple, OrderedDict
from itertools import islice
from allele import Allele
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import matplotlib
import os, argparse, gzip, csv, pprint
""" Command line parsers """
parser = argparse.ArgumentParser()
parser.add_argument("files", nargs='+', help="tab-separated output of IgDiscover")
parser.add_argument("-o", "--out", help="file to output")
args = parser.parse_args()
""" Constants: update indices as necessary """
DELIM = "\t"
CIND = 0
VIND = 1
DIND = 2
JIND = 3
VERR = 20
CDR3 = 28 # This col gives nucleotide seq, use 29 for AA-seq
PLOTDIR = "geneplots"
def check_ncol(files):
print("Checking %d files for consistency in number of columns..." % len(files))
ncols = []
for infile in files:
with gzip.open(infile,'rt') as f:
reader = csv.reader(f, delimiter=DELIM)
row = next(reader)
ncols.append(len(row))
if len(ncols) > 1 and ncols[-1] != ncols[-2]:
sys.exit("Number of columns don't match!: " + str(ncols))
print("Consistent number of columns found {0}".format(ncols))
def col_headers(infile):
with gzip.open(infile,'rt') as f:
reader = csv.reader(f, delimiter=DELIM)
headers = next(reader)
print("Column headers for the first file:")
for i in range(0,len(headers)):
print("[{0}]: {1}".format(i, headers[i]))
def getAllele(row, d):
""" Checks if the given row contains an allele that exists already.
If so the info for that allele is updated,
otherwise a new object is created.
"""
gene = row[VIND].split('*')[0]
a = Allele(row[VIND])
if gene not in d:
d[gene] = [a]
else:
alleles = d[gene]
if a in alleles:
for allele in alleles:
if allele == a:
allele.addRow(row[CIND], row[DIND], row[JIND], row[CDR3], float(row[VERR]) == 0)
break
else:
d[gene].append(a)
def plotgene(genotypes, gene):
data = {}
data2= {}
for donor, gt in genotypes.items():
if gene not in gt:
continue
alleles = genotypes[donor][gene]
datarow = {}
datarow2= {}
for a in alleles:
datarow[a.shortname()] = a.nseqs
datarow2[a.shortname()]= len(a.cdr3s)
data[donor] = datarow
data2[donor]= datarow2
#donors = ["Donor"+str(i) for i in range(1,len(data)+1)]
df = pd.DataFrame.from_dict(data, orient="index")
df2= pd.DataFrame.from_dict(data2,orient="index")
#df.index = donors
#df2.index= donors
matplotlib.style.use("seaborn-colorblind")
fig = plt.figure() # Create matplotlib figure
ax = fig.add_subplot(111) # Create matplotlib axes
ax2 = ax.twinx() # Create another axes that shares the same x-axis as ax.
wid = 0.25
df_scaled = df.div(df.sum(axis=1), axis=0)
df2_scaled = df2.div(df2.sum(axis=1), axis=0)
df2_scaled.plot.bar(stacked=True, ax=ax2, width=wid, position=1,legend=False)
df_scaled.plot.bar(stacked=True, ax=ax, width=wid, position=0, legend=False)
fig.autofmt_xdate()
#ax.set_ylabel("relative sequence count")
ax.legend(loc='center left', bbox_to_anchor=(1.05, 0.5))
#ax2.set_ylabel("relative unique CDR3 count")
#ax.set_ylim(top = 1.5)
#ax2.set_ylim(top= 1.5)
plt.title(gene + " expression \n sequence counts (left) and unique CDR3s (right)")
return plt
def main():
check_ncol(args.files)
col_headers(args.files[0])
genotypes = {}
for i in range(0,len(args.files)):
f = args.files[i]
print("Processing file: {0}".format(f))
acc = {}
if f.endswith(".gz"):
reader = csv.reader(gzip.open(f, 'rt'), delimiter=DELIM)
header = next(reader)
for row in reader:
getAllele(row, acc)
genotypes["Donor"+str(i+1)] = acc
#pprint.pprint(genotypes[0])
if not os.path.exists(PLOTDIR):
os.mkdir(PLOTDIR)
genes = set()
for donor,gt in genotypes.items():
for g in gt.keys():
if g not in genes:
figure = plotgene(genotypes, g)
plotfile = os.path.join(PLOTDIR, g+".pdf")
figure.savefig(plotfile, bbox_inches="tight")
plt.close()
genes.add(g)
if __name__ == "__main__":
main()
| {
"content_hash": "4e8d6f91e865c7fe31848a37f6e16a23",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 101,
"avg_line_length": 30.355704697986578,
"alnum_prop": 0.5827990271943401,
"repo_name": "ukirik/giggle",
"id": "7e8d7cc243073d27b5556f334e817e8cc312b94c",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plotgene.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "36337"
}
],
"symlink_target": ""
} |
import asyncio
import contextlib
import datetime
import os # noqa
import pathlib
import pickle
import re
import warnings
from collections import defaultdict
from http.cookies import BaseCookie, Morsel, SimpleCookie
from typing import ( # noqa
DefaultDict,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Tuple,
Union,
cast,
)
from yarl import URL
from .abc import AbstractCookieJar, ClearCookiePredicate
from .helpers import is_ip_address, next_whole_second
from .typedefs import LooseCookies, PathLike, StrOrURL
__all__ = ("CookieJar", "DummyCookieJar")
CookieItem = Union[str, "Morsel[str]"]
class CookieJar(AbstractCookieJar):
"""Implements cookie storage adhering to RFC 6265."""
DATE_TOKENS_RE = re.compile(
r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*"
r"(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)"
)
DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})")
DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})")
DATE_MONTH_RE = re.compile(
"(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|" "(aug)|(sep)|(oct)|(nov)|(dec)",
re.I,
)
DATE_YEAR_RE = re.compile(r"(\d{2,4})")
MAX_TIME = datetime.datetime.max.replace(tzinfo=datetime.timezone.utc)
MAX_32BIT_TIME = datetime.datetime.utcfromtimestamp(2 ** 31 - 1)
def __init__(
self,
*,
unsafe: bool = False,
quote_cookie: bool = True,
treat_as_secure_origin: Union[StrOrURL, List[StrOrURL], None] = None
) -> None:
self._loop = asyncio.get_running_loop()
self._cookies = defaultdict(
SimpleCookie
) # type: DefaultDict[str, SimpleCookie[str]]
self._host_only_cookies = set() # type: Set[Tuple[str, str]]
self._unsafe = unsafe
self._quote_cookie = quote_cookie
if treat_as_secure_origin is None:
treat_as_secure_origin = []
elif isinstance(treat_as_secure_origin, URL):
treat_as_secure_origin = [treat_as_secure_origin.origin()]
elif isinstance(treat_as_secure_origin, str):
treat_as_secure_origin = [URL(treat_as_secure_origin).origin()]
else:
treat_as_secure_origin = [
URL(url).origin() if isinstance(url, str) else url.origin()
for url in treat_as_secure_origin
]
self._treat_as_secure_origin = treat_as_secure_origin
self._next_expiration = next_whole_second()
self._expirations = {} # type: Dict[Tuple[str, str], datetime.datetime]
# #4515: datetime.max may not be representable on 32-bit platforms
self._max_time = self.MAX_TIME
try:
self._max_time.timestamp()
except OverflowError:
self._max_time = self.MAX_32BIT_TIME
def save(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode="wb") as f:
pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)
def load(self, file_path: PathLike) -> None:
file_path = pathlib.Path(file_path)
with file_path.open(mode="rb") as f:
self._cookies = pickle.load(f)
def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None:
if predicate is None:
self._next_expiration = next_whole_second()
self._cookies.clear()
self._host_only_cookies.clear()
self._expirations.clear()
return
to_del = []
now = datetime.datetime.now(datetime.timezone.utc)
for domain, cookie in self._cookies.items():
for name, morsel in cookie.items():
key = (domain, name)
if (
key in self._expirations and self._expirations[key] <= now
) or predicate(morsel):
to_del.append(key)
for domain, name in to_del:
key = (domain, name)
self._host_only_cookies.discard(key)
if key in self._expirations:
del self._expirations[(domain, name)]
self._cookies[domain].pop(name, None)
next_expiration = min(self._expirations.values(), default=self._max_time)
try:
self._next_expiration = next_expiration.replace(
microsecond=0
) + datetime.timedelta(seconds=1)
except OverflowError:
self._next_expiration = self._max_time
def clear_domain(self, domain: str) -> None:
self.clear(lambda x: self._is_domain_match(domain, x["domain"]))
def __iter__(self) -> "Iterator[Morsel[str]]":
self._do_expiration()
for val in self._cookies.values():
yield from val.values()
def __len__(self) -> int:
return sum(1 for i in self)
def _do_expiration(self) -> None:
self.clear(lambda x: False)
def _expire_cookie(self, when: datetime.datetime, domain: str, name: str) -> None:
self._next_expiration = min(self._next_expiration, when)
self._expirations[(domain, name)] = when
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
"""Update cookies."""
hostname = response_url.raw_host
if not self._unsafe and is_ip_address(hostname):
# Don't accept cookies from IPs
return
if isinstance(cookies, Mapping):
cookies = cookies.items()
for name, cookie in cookies:
if not isinstance(cookie, Morsel):
tmp = SimpleCookie() # type: SimpleCookie[str]
tmp[name] = cookie # type: ignore[assignment]
cookie = tmp[name]
domain = cookie["domain"]
# ignore domains with trailing dots
if domain.endswith("."):
domain = ""
del cookie["domain"]
if not domain and hostname is not None:
# Set the cookie's domain to the response hostname
# and set its host-only-flag
self._host_only_cookies.add((hostname, name))
domain = cookie["domain"] = hostname
if domain.startswith("."):
# Remove leading dot
domain = domain[1:]
cookie["domain"] = domain
if hostname and not self._is_domain_match(domain, hostname):
# Setting cookies for different domains is not allowed
continue
path = cookie["path"]
if not path or not path.startswith("/"):
# Set the cookie's path to the response path
path = response_url.path
if not path.startswith("/"):
path = "/"
else:
# Cut everything from the last slash to the end
path = "/" + path[1 : path.rfind("/")]
cookie["path"] = path
max_age = cookie["max-age"]
if max_age:
try:
delta_seconds = int(max_age)
try:
max_age_expiration = datetime.datetime.now(
datetime.timezone.utc
) + datetime.timedelta(seconds=delta_seconds)
except OverflowError:
max_age_expiration = self._max_time
self._expire_cookie(max_age_expiration, domain, name)
except ValueError:
cookie["max-age"] = ""
else:
expires = cookie["expires"]
if expires:
expire_time = self._parse_date(expires)
if expire_time:
self._expire_cookie(expire_time, domain, name)
else:
cookie["expires"] = ""
self._cookies[domain][name] = cookie
self._do_expiration()
def filter_cookies(
self, request_url: URL = URL()
) -> Union["BaseCookie[str]", "SimpleCookie[str]"]:
"""Returns this jar's cookies filtered by their attributes."""
self._do_expiration()
if not isinstance(request_url, URL):
warnings.warn(
"The method accepts yarl.URL instances only, got {}".format(
type(request_url)
),
DeprecationWarning,
)
request_url = URL(request_url)
filtered: Union["SimpleCookie[str]", "BaseCookie[str]"] = (
SimpleCookie() if self._quote_cookie else BaseCookie()
)
hostname = request_url.raw_host or ""
request_origin = URL()
with contextlib.suppress(ValueError):
request_origin = request_url.origin()
is_not_secure = (
request_url.scheme not in ("https", "wss")
and request_origin not in self._treat_as_secure_origin
)
for cookie in self:
name = cookie.key
domain = cookie["domain"]
# Send shared cookies
if not domain:
filtered[name] = cookie.value
continue
if not self._unsafe and is_ip_address(hostname):
continue
if (domain, name) in self._host_only_cookies:
if domain != hostname:
continue
elif not self._is_domain_match(domain, hostname):
continue
if not self._is_path_match(request_url.path, cookie["path"]):
continue
if is_not_secure and cookie["secure"]:
continue
# It's critical we use the Morsel so the coded_value
# (based on cookie version) is preserved
mrsl_val = cast("Morsel[str]", cookie.get(cookie.key, Morsel()))
mrsl_val.set(cookie.key, cookie.value, cookie.coded_value)
filtered[name] = mrsl_val
return filtered
@staticmethod
def _is_domain_match(domain: str, hostname: str) -> bool:
"""Implements domain matching adhering to RFC 6265."""
if hostname == domain:
return True
if not hostname.endswith(domain):
return False
non_matching = hostname[: -len(domain)]
if not non_matching.endswith("."):
return False
return not is_ip_address(hostname)
@staticmethod
def _is_path_match(req_path: str, cookie_path: str) -> bool:
"""Implements path matching adhering to RFC 6265."""
if not req_path.startswith("/"):
req_path = "/"
if req_path == cookie_path:
return True
if not req_path.startswith(cookie_path):
return False
if cookie_path.endswith("/"):
return True
non_matching = req_path[len(cookie_path) :]
return non_matching.startswith("/")
@classmethod
def _parse_date(cls, date_str: str) -> Optional[datetime.datetime]:
"""Implements date string parsing adhering to RFC 6265."""
if not date_str:
return None
found_time = False
found_day = False
found_month = False
found_year = False
hour = minute = second = 0
day = 0
month = 0
year = 0
for token_match in cls.DATE_TOKENS_RE.finditer(date_str):
token = token_match.group("token")
if not found_time:
time_match = cls.DATE_HMS_TIME_RE.match(token)
if time_match:
found_time = True
hour, minute, second = (int(s) for s in time_match.groups())
continue
if not found_day:
day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)
if day_match:
found_day = True
day = int(day_match.group())
continue
if not found_month:
month_match = cls.DATE_MONTH_RE.match(token)
if month_match:
found_month = True
assert month_match.lastindex is not None
month = month_match.lastindex
continue
if not found_year:
year_match = cls.DATE_YEAR_RE.match(token)
if year_match:
found_year = True
year = int(year_match.group())
if 70 <= year <= 99:
year += 1900
elif 0 <= year <= 69:
year += 2000
if False in (found_day, found_month, found_year, found_time):
return None
if not 1 <= day <= 31:
return None
if year < 1601 or hour > 23 or minute > 59 or second > 59:
return None
return datetime.datetime(
year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc
)
class DummyCookieJar(AbstractCookieJar):
"""Implements a dummy cookie storage.
It can be used with the ClientSession when no cookie processing is needed.
"""
def __iter__(self) -> "Iterator[Morsel[str]]":
while False:
yield None
def __len__(self) -> int:
return 0
def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None:
pass
def clear_domain(self, domain: str) -> None:
pass
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
pass
def filter_cookies(self, request_url: URL) -> "BaseCookie[str]":
return SimpleCookie()
| {
"content_hash": "82da8c95a90babdf7bb9e42215159d17",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 87,
"avg_line_length": 32.798561151079134,
"alnum_prop": 0.5396651312422315,
"repo_name": "KeepSafe/aiohttp",
"id": "f519fff8dd7cd76d5f4800d9b5dafd428cf19133",
"size": "13677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/cookiejar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3179"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1236385"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
} |
"""
MoinMoin - OpenID preferences
@copyright: 2007 MoinMoin:JohannesBerg
@license: GNU GPL, see COPYING for details.
"""
from MoinMoin import wikiutil, user
from MoinMoin.widget import html
from MoinMoin.userprefs import UserPrefBase
import base64
class Settings(UserPrefBase):
def __init__(self, request):
""" Initialize OpenID settings form. """
UserPrefBase.__init__(self, request)
self.request = request
self._ = request.getText
self.cfg = request.cfg
_ = self._
self.title = _("OpenID server")
def allowed(self):
if not self.request.cfg.openid_server_enabled:
return False
groups = self.request.groups
openid_group_name = self.request.cfg.openid_server_restricted_users_group
if openid_group_name and self.request.user.name not in groups.get(openid_group_name, []):
return False
return True
def _handle_remove(self):
_ = self.request.getText
if not hasattr(self.request.user, 'openid_trusted_roots'):
return
roots = self.request.user.openid_trusted_roots[:]
for root in self.request.user.openid_trusted_roots:
name = "rm-%s" % root
if name in self.request.form:
roots.remove(root)
self.request.user.openid_trusted_roots = roots
self.request.user.save()
return 'info', _("The selected websites have been removed.")
def handle_form(self):
_ = self._
form = self.request.form
if form.has_key('cancel'):
return
if self.request.method != 'POST':
return
if form.has_key('remove'):
return self._handle_remove()
def _make_form(self):
action = "%s%s" % (self.request.script_root, self.request.path)
_form = html.FORM(action=action)
_form.append(html.INPUT(type="hidden", name="action", value="userprefs"))
_form.append(html.INPUT(type="hidden", name="handler", value="oidserv"))
return _form
def _make_row(self, label, cell, **kw):
""" Create a row in the form table.
"""
self._table.append(html.TR().extend([
html.TD(**kw).extend([html.B().append(label), ' ']),
html.TD().extend(cell),
]))
def _trust_root_list(self):
_ = self.request.getText
form = self._make_form()
for root in self.request.user.openid_trusted_roots:
display = base64.decodestring(root)
name = 'rm-%s' % root
form.append(html.INPUT(type="checkbox", name=name, id=name))
form.append(html.LABEL(for_=name).append(html.Text(display)))
form.append(html.BR())
self._make_row(_("Trusted websites"), [form], valign='top')
label = _("Remove selected")
form.append(html.BR())
form.append(html.INPUT(type="submit", name="remove", value=label))
def create_form(self):
""" Create the complete HTML form code. """
_ = self._
ret = html.P()
# Use the user interface language and direction
lang_attr = self.request.theme.ui_lang_attr()
ret.append(html.Raw('<div %s>' % lang_attr))
self._table = html.TABLE(border="0")
ret.append(self._table)
ret.append(html.Raw("</div>"))
request = self.request
if hasattr(request.user, 'openid_trusted_roots') and request.user.openid_trusted_roots:
self._trust_root_list()
form = self._make_form()
label = _("Cancel")
form.append(html.INPUT(type="submit", name='cancel', value=label))
self._make_row('', [form])
return unicode(ret)
| {
"content_hash": "f0a8014f55bcfea95529ab49c8bc0686",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 97,
"avg_line_length": 33.473214285714285,
"alnum_prop": 0.5822886102960789,
"repo_name": "RealTimeWeb/wikisite",
"id": "4d7489c25395ac1d428884482040ea8c784e69a3",
"size": "3778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/userprefs/oidserv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations
from django.contrib.postgres.search import SearchVector
def update_full_name_dni_host_name(apps, schema_editor):
model = apps.get_model('visitors', 'Visitor')
model.objects.update(
full_name_dni_host_name=SearchVector('full_name', 'id_number', 'host_name')
)
class Migration(migrations.Migration):
dependencies = [
('visitors', '0008_auto_20180729_2217'),
]
operations = [
migrations.AddField(
model_name='visitor',
name='full_name_dni_host_name',
field=django.contrib.postgres.search.SearchVectorField(null=True),
),
migrations.RunPython(
update_full_name_dni_host_name, reverse_code=migrations.RunPython.noop
),
migrations.AddIndex(
model_name='visitor',
index=django.contrib.postgres.indexes.GinIndex(
fields=['full_name_dni_host_name'], name='full_name_dni_host_name_idx'
),
),
]
| {
"content_hash": "6ab4fc3af17a53aaa6ca422d8531ac93",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 86,
"avg_line_length": 31.2,
"alnum_prop": 0.6355311355311355,
"repo_name": "aniversarioperu/django-manolo",
"id": "96e606a1dbd0696b673e9d77591766926981f726",
"size": "1141",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "visitors/migrations/0009_auto_20200927_1118.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "901"
},
{
"name": "CSS",
"bytes": "842"
},
{
"name": "HTML",
"bytes": "35219"
},
{
"name": "JavaScript",
"bytes": "1767"
},
{
"name": "Makefile",
"bytes": "2626"
},
{
"name": "Python",
"bytes": "48190"
}
],
"symlink_target": ""
} |
import os
import os.path
import pickle
import vincent
DATA_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "data")
)
DATA_FILE = os.path.join(DATA_DIR, "python-versions.pkl")
JOB_NAME = os.path.splitext(os.path.basename(__file__))[0][5:]
JSON_FILE = os.path.join(DATA_DIR, "{}.json".format(JOB_NAME))
with open(DATA_FILE, "rb") as fp:
df = pickle.load(fp)
df = df.resample("W", how="sum")
# Total Percentages of Python Versions
graph = vincent.StackedArea(df / df.sum(axis=1))
graph.legend(title="")
graph.axes["y"].format = "%"
graph.scales["y"].domain_max = 1.0
graph.to_json(JSON_FILE)
| {
"content_hash": "ca9947faa8835f27aae2c056d9718a44",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 62,
"avg_line_length": 20.966666666666665,
"alnum_prop": 0.6629570747217806,
"repo_name": "dstufft/pypi-stats",
"id": "6388a2ca86ce831b0e13e78851fa9d45ab278563",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/json-stacked-py-pct.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11650"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import namedtuple
import webcolors
GdalFormat = namedtuple(typename='GdalFormat',
field_names=['name', 'attributes', 'description',
'can_read', 'can_write', 'can_update',
'has_virtual_io'])
def enum(**enums):
E = namedtuple(typename='enum',
field_names=list(enums.keys()))
return E(**enums)
_rgba = namedtuple(typename='_rgba',
field_names=['r', 'g', 'b', 'a'])
class rgba(_rgba):
"""Represents an RGBA color."""
def __new__(cls, r, g, b, a=255):
return super(rgba, cls).__new__(cls, r, g, b, a)
@classmethod
def webcolor(cls, color):
"""Returns an RGBA color from its HTML/CSS representation."""
if color.startswith('#'):
return cls(*webcolors.hex_to_rgb(color))
return cls(*webcolors.name_to_rgb(color))
_Extents = namedtuple('Extents', ['lower_left', 'upper_right'])
class Extents(_Extents):
def __contains__(self, other):
if isinstance(other, type(self)):
# TODO: Support testing against own type
raise NotImplementedError()
elif isinstance(other, (tuple, list, XY)):
return (self.lower_left.x <= other[0] < self.upper_right.x and
self.lower_left.y <= other[1] < self.upper_right.y)
raise TypeError("Can't handle {0!r}".format(other))
def almost_equal(self, other, places=None, delta=None):
return (self.lower_left.almost_equal(other.lower_left,
places=places, delta=delta) and
self.upper_right.almost_equal(other.upper_right,
places=places, delta=delta))
@property
def dimensions(self):
return self.upper_right - self.lower_left
_XY = namedtuple('XY', ['x', 'y'])
class XY(_XY):
def __add__(self, other):
return type(self)(x=self.x + other.x,
y=self.y + other.y)
def __sub__(self, other):
return type(self)(x=self.x - other.x,
y=self.y - other.y)
def __mul__(self, other):
return type(self)(x=self.x * other,
y=self.y * other)
def __truediv__(self, other):
return type(self)(x=self.x / other,
y=self.y / other)
def floor(self):
return type(self)(int(self.x), int(self.y))
def almost_equal(self, other, places=None, delta=None):
if self.x == other[0] and self.y == other[1]:
return True # Shortcut
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
return (abs(self.x - other[0]) <= delta and
abs(self.y - other[1]) <= delta)
if places is None:
places = 7
return (round(abs(other[0] - self.x), places) == 0 and
round(abs(other[1] - self.y), places) == 0)
| {
"content_hash": "d3edee7d74f1ca98667d295d0e0a4a21",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 76,
"avg_line_length": 32.06,
"alnum_prop": 0.5274485339987524,
"repo_name": "ecometrica/gdal2mbtiles",
"id": "c03d94754523d8c7f0659076f7cb465a6014aa6f",
"size": "3993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdal2mbtiles/gd_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "317105"
}
],
"symlink_target": ""
} |
"""
Slactorbot - A Python Slack Bot with hot patch!
"""
import os
import re
from setuptools import find_packages, setup
def fread(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def get_version():
VERSIONFILE = "slactorbot/_version.py"
verstrline = fread(VERSIONFILE).strip()
vsre = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(vsre, verstrline, re.M)
if mo:
VERSION = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." %
(VERSIONFILE, ))
return VERSION
dependencies = ['thespian', 'slackclient', 'pyyaml', 'requests']
setup(
name='slactorbot',
version=get_version(),
url='https://github.com/dataloop/slactorbot',
download_url="https://github.com/dataloop/slactorbot/tarball/v" + get_version(),
license="MIT",
author='Steven Acreman',
author_email='steven.acreman@dataloop.io',
description='A Python Slack Bot with hot patch!',
keywords="slack bot",
packages=find_packages(exclude=['tests']),
exclude_package_data={'': ['config.yaml']},
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
"slactorbot = slactorbot.bot:start",
],
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: System Administrators"
])
| {
"content_hash": "81ad14b98459a3c10ec375f599e10419",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 84,
"avg_line_length": 28.192307692307693,
"alnum_prop": 0.6186903137789904,
"repo_name": "dataloop/slactorbot",
"id": "3e43b0046d95a69aaf3e5d71b6b05845e2af5135",
"size": "1466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9281"
}
],
"symlink_target": ""
} |
from django.db import models
from django.conf import settings
class Profile(models.Model):
"""
This model establishes the structure of the Profile model.
"""
def __str__(self):
"""
This will display in string format the name of the edu object
"""
fn = self.user.get_full_name().strip() or self.user.get_username()
return "{}".format(fn)
user = models.OneToOneField(
settings.AUTH_USER_MODEL,
related_name='user'
)
street_addr = models.CharField('street address', max_length=128)
unit = models.CharField('unit', max_length=8)
city = models.CharField('city', max_length=64)
state = models.CharField('state', max_length=4)
post_code = models.CharField('postal code', max_length=5)
mobile = models.CharField('mobile', max_length=10)
email = models.CharField('email', max_length=64)
repo = models.CharField('repo', max_length=64)
class Job(models.Model):
"""
This model establishes the structure of the Job model.
"""
def __str__(self):
"""
This will display in string format the name of the job object
"""
return str(self.title)
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE,
related_name='job',
)
title = models.CharField('title', max_length=64)
company = models.CharField('company', max_length=64)
location = models.CharField('location', max_length=64)
date_start = models.DateField('start date')
date_end = models.DateField('end date')
short_desc = models.CharField('short description', max_length=128)
description = models.TextField('description')
class Edu(models.Model):
"""
This model establishes the structure of the Edu model.
"""
def __str__(self):
"""
This will display in string format the name of the edu object
"""
return str(self.school)
profile = models.ForeignKey(
Profile,
on_delete=models.CASCADE,
related_name='school',
)
school = models.CharField('school', max_length=64)
degree = models.CharField('degree', max_length=32)
field_of_study = models.CharField('field of study', max_length=64)
date_start = models.DateField('start date')
date_end = models.DateField('end date')
short_desc = models.CharField('short description', max_length=128)
description = models.TextField('description')
class Skills(models.Model):
"""
This models establishes the structure of Skills model.
"""
def __str__(self):
"""
This will display in string format the name of the Skill
"""
return str(self.name)
skill_type = models.CharField('skill type', max_length=16)
skill_name = models.CharField('skill name', max_length=16)
skill_description = models.TextField('skill description')
skill_strength = models.IntegerField('skill_strength')
| {
"content_hash": "7d5c085b9a1af1987f50e3fa43ce1228",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 74,
"avg_line_length": 29.336633663366335,
"alnum_prop": 0.6382045224434695,
"repo_name": "ilikesounds/jt_portfolio",
"id": "4d8724092aae216c1b56a680103773d86e56d225",
"size": "2963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aboutme/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "31615"
},
{
"name": "JavaScript",
"bytes": "329108"
},
{
"name": "Python",
"bytes": "21799"
}
],
"symlink_target": ""
} |
class TypeRegistry:
def __init__(self):
self.data = {}
def registry(self, model_cls, record_type):
self.data[model_cls] = record_type
def __iter__(self):
return self.data.items().__iter__()
def __getitem__(self, item):
return self.data[item]
def get_type(self, obj):
return self.data[obj.__class__]
def __contains__(self, item):
return item in self.data
record_type_registry = TypeRegistry()
| {
"content_hash": "5f2ae23be9620c393da7f65eef85f53a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 47,
"avg_line_length": 22.428571428571427,
"alnum_prop": 0.5796178343949044,
"repo_name": "watchdogpolska/feder",
"id": "60b00fd101d1d4b825849037f91280a27c76083b",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feder/records/registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "624"
},
{
"name": "HTML",
"bytes": "183421"
},
{
"name": "JavaScript",
"bytes": "6245"
},
{
"name": "Makefile",
"bytes": "2086"
},
{
"name": "Python",
"bytes": "574027"
},
{
"name": "SCSS",
"bytes": "40546"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import pytest
from crosspm.helpers.output import Output
from crosspm.helpers.package import Package
@pytest.fixture(scope="function")
def package():
params = {'arch': 'x86', 'osname': 'win', 'package': 'package'}
params_found = {'repo': 'lib-cpp-release', 'version': '1.2.3'}
_package = Package('package', None, params, None, None, None, params_found, None, None)
_package.unpacked_path = "/test/path"
yield _package
@pytest.fixture(scope="function")
def package_root():
"""
Create root with dependencies:
root
- package1
- package11
- package12
- package2
"""
params = {'arch': 'x86', 'osname': 'win', 'package': 'root'}
_root = Package('root', None, params, None, None, None, None, None, None)
params = {'arch': 'x86', 'osname': 'win', 'package': 'package1'}
_package1 = Package('package1', None, params, None, None, None, None, None, None)
params = {'arch': 'x86', 'osname': 'win', 'package': 'package11'}
_package11 = Package('package11', None, params, None, None, None, None, None, None)
params = {'arch': 'x86', 'osname': 'win', 'package': 'package12'}
_package12 = Package('package12', None, params, None, None, None, None, None, None)
params = {'arch': 'x86', 'osname': 'win', 'package': 'package2'}
_package2 = Package('package2', None, params, None, None, None, None, None, None)
_package1.packages = OrderedDict([('package11', _package11), ('package12', _package12)])
_root.packages = OrderedDict([('package2', _package2), ('package1', _package1)])
for _package in _root.all_packages:
_package.unpacked_path = "/test/path/{}".format(_package.name)
yield _root
@pytest.fixture
def output():
_output = Output(data=None, name_column='package', config=None)
yield _output
| {
"content_hash": "fdb483c27c549b827707684fb6f11b6f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 92,
"avg_line_length": 35.0188679245283,
"alnum_prop": 0.6330818965517241,
"repo_name": "devopshq/crosspm",
"id": "78f1f511888652ce62594ee6228900c874cde094",
"size": "1856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "8406"
},
{
"name": "Jinja",
"bytes": "669"
},
{
"name": "Python",
"bytes": "229070"
},
{
"name": "Shell",
"bytes": "8264"
}
],
"symlink_target": ""
} |
from data_models import government_models
from data_models.government_models import DonationRecipient
from utils import entity_resolver
resolver = entity_resolver.MasterEntitiesResolver()
#test_entry = "Lord na Lester"
#test_entry = "The Rt Hon Vincent Cable MP"
#test_entry = "Michael Denzil Xavier Portillo"
test_entry = "The Rt Hon Charles Kennedy MP"
test_type = "MP"
test_recipient = None
def parse_entities(entry):
result = resolver.get_entities(entry)
return result
def parse_recipient(entry):
print "*trying mp search:", resolver.find_mp(entry)
print "*trying lord search:", resolver.find_lord(entry)
print "*trying party search:", resolver.find_party(entry)
print "*trying entity search:", resolver.get_entities(entry)
def node_properties_test():
me = government_models.MemberOfParliament("Warren The Magnificent")
details = {
"first_name": "Warren",
"last_name": "The Magnificent",
"party": "All the time",
"twfy_id": "666",
"number_of_terms": "infinite",
# TODO change mp["guardian_image"] to mp["image_url"]
"image_url": "http://ahyeah.com"
}
me.create()
me.set_mp_details(details)
new_me = DonationRecipient("Warren The Magnificent")
extra_details = {
"extra_indigo": "flatbush zombie"
}
new_me.set_recipient_details(extra_details)
parse_recipient(test_entry)
#print "found:", parse_entities(test_entry)
#node_properties_test()
| {
"content_hash": "e6784bd2bfcc39018bf5dce3b0dce0df",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 30.102040816326532,
"alnum_prop": 0.6827118644067797,
"repo_name": "spudmind/undertheinfluence",
"id": "b48dfd3388bb9afafb6695304525fa6dc4368034",
"size": "1475",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "entity_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2366948"
},
{
"name": "HTML",
"bytes": "153611"
},
{
"name": "JavaScript",
"bytes": "16718"
},
{
"name": "Python",
"bytes": "449237"
}
],
"symlink_target": ""
} |
import typing
def find_missing_services(
ecs, cluster: str, services: typing.Set[str]
) -> typing.Set[str]:
"""Return a set of service names which don't exist in AWS.
We use `ECS.Client.describe_services` since we have a list of service
names we want to check and instead of just retrieving all services in the
cluster we pass the items we want. We can only pass 10 services per call
so we iterate over the list in chunks.
"""
existing_services = set()
for service in describe_services(ecs, cluster, services):
existing_services.add(service["serviceName"])
return set(services) - existing_services
def describe_services(
ecs, cluster: str, services: typing.Set[str]
) -> typing.List[typing.Dict[str, typing.Any]]:
"""Wrap `ECS.Client.describe_services` to allow more then 10 services in
one call.
"""
result: typing.List[typing.Dict[str, typing.Any]] = []
services_list = list(services)
for i in range(0, len(services_list), 10):
response = ecs.describe_services(
cluster=cluster, services=services_list[i : i + 10]
)
result.extend(response["services"])
return result
| {
"content_hash": "514ae33c03d75d4b1263dcf2e3935296",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 77,
"avg_line_length": 35,
"alnum_prop": 0.6739495798319328,
"repo_name": "LabD/ecs-deplojo",
"id": "136abe5e2d8e378abd95074c37462db0ec1cd429",
"size": "1190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ecs_deplojo/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "460"
},
{
"name": "Python",
"bytes": "35879"
}
],
"symlink_target": ""
} |
"""
Test TorchRankerAgent.
"""
import os
import pytest
import unittest
import parlai.utils.testing as testing_utils
from parlai.tasks.integration_tests.agents import CandidateTeacher
class _AbstractTRATest(unittest.TestCase):
"""
Test upgrade_opt behavior.
"""
@classmethod
def setUpClass(cls):
if cls is _AbstractTRATest:
raise unittest.SkipTest('Skip abstract parent class')
super(_AbstractTRATest, cls).setUpClass()
def _get_args(self):
# Add arguments for the Torch Ranker Agent to test
# Override in child classes
return dict(
task='integration_tests:overfit',
optimizer='adam',
learningrate=1e-2,
batchsize=4,
validation_every_n_epochs=5,
validation_patience=10,
lr_scheduler='none',
embedding_size=8,
gradient_clip=0.5,
)
def _get_threshold(self):
# Accuracy threshold
return 0.8
# test train inline cands
@testing_utils.retry(ntries=3)
def test_train_inline(self):
args = self._get_args()
args['candidates'] = 'inline'
args['eval_candidates'] = 'inline'
valid, test = testing_utils.train_model(args)
threshold = self._get_threshold()
self.assertGreaterEqual(valid['hits@1'], threshold)
# test train batch cands
@testing_utils.retry(ntries=3)
def test_train_batch(self):
args = self._get_args()
args['candidates'] = 'batch'
args['eval_candidates'] = 'batch'
valid, test = testing_utils.train_model(args)
threshold = self._get_threshold()
self.assertGreaterEqual(valid['hits@1'], threshold)
# test train fixed
@pytest.mark.nofbcode
@testing_utils.retry(ntries=3)
def test_train_fixed(self):
args = self._get_args()
args['candidates'] = 'fixed'
args['eval_candidates'] = 'fixed'
args['encode_candidate_vecs'] = False
valid, test = testing_utils.train_model(args)
threshold = self._get_threshold()
self.assertGreaterEqual(valid['hits@1'], threshold)
# test train batch all cands
@testing_utils.retry(ntries=3)
def test_train_batch_all(self):
args = self._get_args()
args['candidates'] = 'batch-all-cands'
args['eval_candidates'] = 'batch-all-cands'
valid, test = testing_utils.train_model(args)
threshold = self._get_threshold()
self.assertGreaterEqual(valid['hits@1'], threshold)
# test eval inline ecands
@testing_utils.retry(ntries=3)
def test_eval_inline(self):
args = self._get_args()
args['eval_candidates'] = 'inline'
valid, test = testing_utils.train_model(args)
threshold = self._get_threshold()
self.assertGreaterEqual(valid['hits@1'], threshold)
# test eval batch ecands
def test_eval_batch(self):
args = self._get_args()
args['eval_candidates'] = 'batch'
valid, test = testing_utils.train_model(args)
# no threshold, the model won't generalize on :overfit
# test eval vocab ecands
@testing_utils.retry(ntries=3)
def test_eval_vocab(self):
args = self._get_args()
args['eval_candidates'] = 'vocab'
args['encode_candidate_vecs'] = True
valid, test = testing_utils.train_model(args)
# accuracy should be zero, none of the vocab candidates should be the
# correct label
self.assertEqual(valid['hits@100'], 0)
class TestTransformerRanker(_AbstractTRATest):
def _get_args(self):
args = super()._get_args()
new_args = dict(model='transformer/ranker', n_layers=1, n_heads=4, ffn_size=32)
for k, v in new_args.items():
args[k] = v
return args
class TestMemNN(_AbstractTRATest):
def _get_args(self):
args = super()._get_args()
args['model'] = 'memnn'
return args
def _get_threshold(self):
# this is a slightly worse model, so we expect it to perform worse
return 0.5
@pytest.mark.nofbcode
class TestPolyRanker(_AbstractTRATest):
def _get_args(self):
args = super()._get_args()
new_args = dict(
model='transformer/polyencoder', n_layers=1, n_heads=4, ffn_size=32
)
for k, v in new_args.items():
args[k] = v
return args
def _get_threshold(self):
return 0.6
def test_eval_fixed_label_not_in_cands(self):
# test where cands during eval do not contain test label
args = self._get_args()
args[
'model'
] = 'parlai.agents.transformer.polyencoder:IRFriendlyPolyencoderAgent'
args['eval_candidates'] = 'fixed'
teacher = CandidateTeacher({'datatype': 'train'})
all_cands = teacher.train + teacher.val + teacher.test
train_val_cands = teacher.train + teacher.val
all_cands_str = '\n'.join([' '.join(x) for x in all_cands])
train_val_cands_str = '\n'.join([' '.join(x) for x in train_val_cands])
with testing_utils.tempdir() as tmpdir:
tmp_cands_file = os.path.join(tmpdir, 'all_cands.text')
with open(tmp_cands_file, 'w') as f:
f.write(all_cands_str)
tmp_train_val_cands_file = os.path.join(tmpdir, 'train_val_cands.text')
with open(tmp_train_val_cands_file, 'w') as f:
f.write(train_val_cands_str)
args['fixed_candidates_path'] = tmp_cands_file
args['encode_candidate_vecs'] = False # don't encode before training
args['ignore_bad_candidates'] = False
args['model_file'] = os.path.join(tmpdir, 'model')
args['dict_file'] = os.path.join(tmpdir, 'model.dict')
args['num_epochs'] = 4
args['add_label_to_fixed_cands'] = False
# Train model where it has access to the candidate in labels
valid, test = testing_utils.train_model(args)
self.assertGreaterEqual(valid['hits@100'], 0.0)
# Evaluate model where label is not in fixed candidates
args['fixed_candidates_path'] = tmp_train_val_cands_file
# need these args dropped, it was for train only
del args['num_epochs']
del args['validation_patience']
del args['validation_every_n_epochs']
# use validation set that doesn't overlap
args['task'] = 'integration_tests'
# Will fail without appropriate arg set
with self.assertRaises(RuntimeError):
testing_utils.eval_model(args, skip_valid=True)
args['add_label_to_fixed_cands'] = True
_, test = testing_utils.eval_model(args, skip_valid=True)
self.assertGreaterEqual(test['hits@100'], 0.0)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "c54cd4f4676a4ee5e815243d7f60e387",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 87,
"avg_line_length": 33.8390243902439,
"alnum_prop": 0.5975205420210465,
"repo_name": "facebookresearch/ParlAI",
"id": "b911713f2ab8400b7e3133f596ee3e54319eaec8",
"size": "7136",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_tra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import sys
import argparse
import potsim
def main():
parser = argparse.ArgumentParser(description='Run an audio file through '
'a simulated POTS line')
parser.add_argument('infile', type=str,
help='Input file. Type inferred from extension.')
parser.add_argument('-o', '--outtype', choices=['wav', 'txt'],
help='Override output type')
parser.add_argument('-r', '--seed', type=int,
help='Fixed random seed if provided; integer')
parser.add_argument('-s', '--snr', type=float, default=30,
help='Approximate signal to noise radio in dB; float. Default: 30')
args = parser.parse_args()
try:
pfilt = potsim.POTSFilter(filename=args.infile)
except TypeError as e:
print(str(e), file=sys.stderr)
sys.exit(1)
pfilt.process(seed=args.seed, snr=args.snr)
if args.outtype:
pfilt.dtype = args.outtype
with open(pfilt.suggested_name(), 'wb') as outstream:
pfilt.dump(outstream)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "7018eca7731177c224abdb4c83775ddc",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 29.263157894736842,
"alnum_prop": 0.637589928057554,
"repo_name": "nicktimko/pots-sim",
"id": "45895216e3c4439545238edf5dee4905ec59ada8",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dopots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6840"
},
{
"name": "Python",
"bytes": "9806"
}
],
"symlink_target": ""
} |
class B:
pass
class C(B):
pass
class D(C):
pass
for c in [B, C, D]:
try:
raise c()
except D:
print "D"
except C:
print "C"
except B:
print "B"
for c in [B, C, D]:
try:
raise c()
except B as b:
print "B ",str(b),str(B)
# every exception will be caught by B block.
for c in [B, C, D]:
try:
raise c()
except D:
print "D"
except C:
print "C"
# exception will be generated here because when you raise B() there is
# no except B line. You can't catch exceptions from base class using
# derived classes
| {
"content_hash": "00122d23bcecaf18802630db217f096e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 17.771428571428572,
"alnum_prop": 0.5321543408360129,
"repo_name": "loafdog/loaf-src",
"id": "ef9c6d8cdfc486f467a855476126907fbf14a657",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/py-scripts/except-class.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "16177"
},
{
"name": "Common Lisp",
"bytes": "50102"
},
{
"name": "Emacs Lisp",
"bytes": "13544"
},
{
"name": "Perl",
"bytes": "29815"
},
{
"name": "Perl 6",
"bytes": "1660"
},
{
"name": "Python",
"bytes": "81615"
},
{
"name": "Shell",
"bytes": "45771"
}
],
"symlink_target": ""
} |
"""
Author: Marco Dinacci <dev@dinointeractive.com>
License: BSD
World Editor
TODO
* to deal with entities I need an Entity Manager (see also multifiles).
It's ok to have multiple references
but there must be only a single place where to manage them.
* (re)code everything to use an EventManager (see taskMgr)
- change cell specific settings:
- model (material, lights, texture)
- color (optional)
- create multiple surfaces in ODE and bind them to cells ?
* implement scene save/load
- better ball physics (fix the fact that it never stops)
- better camera for the ball, must have constant X position and
constant Y distance
- new cell models to implement elevation
- curves :O
- fix the logger
* Configuration manager, all the parameters must be read from disk
- use egg-qtess to polygonize a NURBS surface
- I need a python shell inside the editor !
- use Panda3D multifiles to store entities !
- search useless imports and remove them
* implement messaging system
- optimize scene. Have four nodes: staticobjs, actors, sky, evrthng with alpha
"""
# useful for debugging
from mdlib.decorator import traceMethod, accepts, trace, dumpArgs
# load configuration
# TODO the ConfigurationManager should take care of this
from pandac.PandaModules import loadPrcFile, ConfigVariableString, ConfigVariableBool
loadPrcFile("../res/Config.prc")
loadPrcFile("../res/Editor.prc")
# panda 3d stuff
from direct.showbase.ShowBase import ShowBase
from direct.showbase.DirectObject import DirectObject
from direct.directtools.DirectGeometry import LineNodePath
from pandac.PandaModules import Point3, Vec4, Vec3, NodePath, Quat
from pandac.PandaModules import LightAttrib, AmbientLight, DirectionalLight
from pandac.PandaModules import EggData, Filename, BamFile
from pandac.PandaModules import WindowProperties
# collision to pick entities with mouse
from pandac.PandaModules import CollisionNode, CollisionHandlerQueue, \
CollisionTraverser, CollisionRay, GeomNode
# panda utilities and actors
from mdlib.panda import eventCallback, inputCallback, guiCallback, MouseWatcher
from mdlib.panda.core import *
from mdlib.panda.camera import *
from mdlib.panda.entity import *
from mdlib.panda.input import *
from mdlib.panda.data import *
from mdlib.panda.entity import *
from mdlib.panda.physics import POM
from mdlib.panda import event
# logging
from mdlib.log import ConsoleLogger, DEBUG,WARNING
logger = ConsoleLogger("editor", DEBUG)
# for debugging
import echo
# editor imports
from PyQt4.QtGui import QApplication
from gui.qt import EditorGUI
from gui import GUIPresenter
#echo.echo_class(EditorGUI)
import cPickle, time
from sys import exit
SCENE_FORMAT_VERSION = "0.1.1"
class EditorScene(AbstractScene):
def __init__(self):
super(EditorScene, self).__init__()
self._camera = None
# create initial lights
self._setupLights()
def _setupLights(self):
#Create some lights and add them to the scene. By setting the lights on
#render they affect the entire scene
#Check out the lighting tutorial for more information on lights
lAttrib = LightAttrib.makeAllOff()
ambientLight = AmbientLight( "ambientLight" )
ambientLight.setColor( Vec4(.4, .4, .35, 1) )
lAttrib = lAttrib.addLight( ambientLight )
directionalLight = DirectionalLight( "directionalLight" )
directionalLight.setDirection( Vec3( 0, 8, -2.5 ) )
directionalLight.setColor( Vec4( 0.9, 0.8, 0.9, 1 ) )
lAttrib = lAttrib.addLight( directionalLight )
self._rootNode.attachNewNode( directionalLight.upcastToPandaNode() )
self._rootNode.attachNewNode( ambientLight.upcastToPandaNode() )
self._rootNode.node().setAttrib( lAttrib )
"""
def deleteEntityFromNodePath(self, nodePath):
# FIXME must remove entity IF it is an entity (maybe just a tree)
nodePath.hideBounds()
nodePath.removeNode()
"""
camera = property(fget=lambda self: self._camera,
fset=lambda self,cam: setattr(self, '_camera', cam))
class EditorView(AbstractView):
_scene = EditorScene()
def __init__(self, inputMgr):
super(EditorView, self).__init__(inputMgr)
def enable(self):
# reenable camera controller
self.camera.setActive(True)
self.scene.camera = self.camera
self._inputMgr.switchSchemeTo(self.INPUT_SCHEME)
def disable(self):
# disable camera controller
self.camera.setActive(False)
self._inputMgr.switchSchemeTo(BASE_SCHEME)
def readInput(self, task):
self._inputMgr.update()
return task.cont
def update(self, task):
# entity position is updated automatically by the physic manager by
# setting parameters for position and rotation in params.
# TODO
# update GUI
self.scene.camera.update()
self.scene.update()
return task.cont
def render(self, task):
self.scene.render()
return task.cont
def setSceneRootNode(self, node):
self.scene.setRootNodeParent(node)
def addToScene(self, entity):
self._scene.addEntity(entity)
def _registerToCommands(self):
self._inputMgr.bindEvent("escape", event.REQUEST_SHUTDOWN,
scheme="base")
self._inputMgr.bindEvent("1", event.SWITCH_VIEW, ["roaming"],
scheme="base")
self._inputMgr.bindEvent("2", event.SWITCH_VIEW, ["editing"],
scheme="base")
self._inputMgr.bindEvent("3", event.SWITCH_VIEW, ["simulating"],
scheme="base")
self._inputMgr.bindEvent("4", event.SWITCH_VIEW, ["debugging"],
scheme="base")
self._inputMgr.bindCallback("0", self.scene.camera.lookAtOrigin)
def _subscribeToEvents(self):
pass
scene = property(fget = lambda self: self._scene, fset=None)
class RoamingView(EditorView):
"""
This mode allows to 'roam' freely inside the world.
"""
INPUT_SCHEME = "roaming"
def _setupCamera(self):
self.camera = RoamingCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(RoamingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
def enable(self):
self.camera.showCursor(False)
super(RoamingView, self).enable()
class EditingView(EditorView):
"""
The editing view is the most sophisticated view.
It transform the editor in a world editor allowing to insert
and to position objects.
Messages sent here are received by the GUI
Accepted inputs:
- space -> add a new row
- mouse1 press -> select a node
"""
INPUT_SCHEME = "editing"
def __init__(self, inputMgr):
super(EditingView, self).__init__(inputMgr)
self._setupCollisionDetection()
self._selectedObj = None
def getSelectedEntity(self):
if self._selectedObj is not None:
entity = self.scene.getEntityByID(int(self._selectedObj.getNetTag("UID")))
return entity
def deleteFromScene(self, entity):
self.scene.deleteEntity(entity)
def deleteSelectedObject(self):
if self._selectedObj is not None:
logger.debug("Deleting selected entity: %s " % self._selectedObj)
self.scene.deleteEntityByID(int(self._selectedObj.getNetTag("UID")))
#self.scene.deleteEntityFromNodePath(self._selectedObj) <-- delete single mesh
self._selectedObj = None
else:
logger.info("Nothing selected, can't delete")
def enable(self):
self.camera.showCursor(True)
super(EditingView, self).enable()
def disable(self):
super(EditingView, self).disable()
@inputCallback
def _onMousePress(self):
mousePos = base.mouseWatcherNode.getMouse()
self.pickerRay.setFromLens(self.scene.camera, mousePos.getX(),
mousePos.getY())
self.picker.traverse(self.scene.getRootNode())
entries = self.pq.getNumEntries()
logger.debug("Ray collided with %d entries" % entries)
if entries > 0:
if self._selectedObj is not None:
self._selectedObj.hideBounds()
self.pq.sortEntries()
for i in range(0, entries):
pickedObject = self.pq.getEntry(i).getIntoNodePath()
logger.debug("Picked object #%d = %s" % (i, pickedObject))
# highlight the closest selected object
pickedObject = self.pq.getEntry(0).getIntoNodePath()
pickedObject.showTightBounds()
# set it current and send a msg that a new entity has been selected
#self._selectedObj = self.scene.getEntityByID(pickedObject.getNetTag("ID"))
self._selectedObj = pickedObject
entity = self.scene.getEntityByID(pickedObject.getNetTag("UID"))
logger.debug("Set selected object to: %s" % entity)
messenger.send(event.SELECT_ENTITY, [entity])
else:
logger.debug("No collisions at: %s" % mousePos)
def _setupCollisionDetection(self):
self.picker = CollisionTraverser()
self.pq = CollisionHandlerQueue();
self.pickerNode = CollisionNode("entityPickRay")
self.pickerNP = camera.attachNewNode(self.pickerNode)
self.pickerNode.setFromCollideMask(GeomNode.getDefaultCollideMask())
self.pickerRay = CollisionRay()
self.pickerNode.addSolid(self.pickerRay)
self.picker.addCollider(self.pickerNP, self.pq)
def _setupCamera(self):
self.camera = FixedCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(EditingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
self._inputMgr.bindEvent("space", event.NEW_ROW)
self._inputMgr.bindCallback("mouse1", self._onMousePress)
class SimulatingView(EditorView):
"""
This mode simulates the game.
Accepted inputs:
- i -> move the ball forward
- j -> move the ball left
- k -> move the ball back
- l -> move the ball right
"""
INPUT_SCHEME = "simulating"
def __init__(self, inputMgr):
super(SimulatingView, self).__init__(inputMgr)
self._isPlayerSet = False
def _setupCamera(self):
self.camera = TheBallCamera(self._inputMgr)
self.camera.setPos(0,-40,15)
self.camera.lookAt(0,0,0)
self.scene.camera = self.camera
def _registerToCommands(self):
super(SimulatingView, self)._registerToCommands()
self._inputMgr.createSchemeAndSwitch(self.INPUT_SCHEME)
self._inputMgr.bindEvent("i", event.MOVE_PLAYER, [0,1.5,0])
self._inputMgr.bindEvent("i-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("j", event.MOVE_PLAYER, [-2,0,0])
self._inputMgr.bindEvent("j-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("k", event.MOVE_PLAYER, [0,-0.5,0])
self._inputMgr.bindEvent("k-up", event.MOVE_PLAYER, [0,0,0])
self._inputMgr.bindEvent("l", event.MOVE_PLAYER, [2,0,0])
self._inputMgr.bindEvent("l-up", event.MOVE_PLAYER, [0,0,0])
def setPlayer(self, actorID):
entity = self.scene.getEntityByID(actorID)
self.camera.setTarget(entity.render.nodepath)
self._isPlayerSet = True
logger.debug("Player set to: %s" % entity)
def enable(self):
self.camera.showCursor(False)
super(SimulatingView, self).enable()
# TODO create actors and create geometry from here using the physic manager
class EditorLogic(AbstractLogic):
"""
The editor allows to construct the games by managing 3D objects,
it allows also to debug and test the game.
TODO create a SceneDelegate object to deal with scene stuff
"""
def __init__(self, view):
super(EditorLogic, self).__init__(view)
# copied objects are stored here.
self._copyMemory = []
self._sceneFile = '/home/mdinacci/Work/MD/rtw/editor/res/scenes/editor_start_1.rtw'
#self.loadScene(self._sceneFile)
self._createInitialScene()
def getSavedScene(self):
return self._sceneFile
def _createInitialScene(self):
# create some background entities to populate a bit the space
self.view.addToScene(GOM.createEntity(environment_params.copy()))
#self._track = GOM.createEntity(track_params.copy())
#self.view.addToScene(self._track)
# create player
self._player = GOM.createEntity(golfball.copy())
self.view.addToScene(self._player)
def _subscribeToEvents(self):
self.listener = SafeDirectObject()
self.listener.accept(event.NEW_ROW, self.addRow)
self.listener.accept(event.MOVE_PLAYER, self._movePlayer)
@eventCallback
def _movePlayer(self, xForce, yForce, zForce):
logger.info("Moving player with vector force: %d,%d,%d"
% (xForce, yForce, zForce))
#entity = self.view.scene.getEntityByID(self._player.UID)
# FIXME refactor
path = getPropertyPath("xForce")
self.view.scene.editEntity(self._player.UID, path, xForce)
path = getPropertyPath("yForce")
self.view.scene.editEntity(self._player.UID, path, yForce)
path = getPropertyPath("zForce")
self.view.scene.editEntity(self._player.UID, path, zForce)
@eventCallback
def addRow(self):
for entity in self._track.createRow():
self.view.addToScene(entity)
@guiCallback
def loadScene(self, sceneFile):
fh = open(sceneFile, "rb")
# load function
load = lambda: cPickle.load(fh)
version = load()
entitiesNum = load()
entities = [self.view.addToScene(GOM.createEntity(load()))
for idx in range(0, entitiesNum)]
# set player and track
self._player = self.view.scene.getEntityByName("Ball")
self._track = self.view.scene.getEntityByName("Track")
@guiCallback
def hasSavedScene(self):
return self._sceneFile != ''
@guiCallback
def saveScene(self, sceneFile):
# TODO save to a multifile
fh = open(sceneFile, "wb")
# save function
dump = lambda x: cPickle.dump(x, fh, -1)
# get the serialised data from the scene
entities = self.view.scene.serialise()
# store version
dump(SCENE_FORMAT_VERSION)
# store the number of entities, useful when unpickling
dump(len(entities))
# save entities
[dump(entity) for entity in entities]
fh.close()
logger.info("Scene file saved to %s" % sceneFile )
self._sceneFile = sceneFile
@guiCallback
def addEntityFromFile(self, fileName):
pass
@guiCallback
def deleteSelectedObject(self):
entity = self.view.getSelectedEntity()
if entity.has_key("physics") and entity.physics.has_key("geom"):
POM.removeGeometryTo(entity)
self.view.deleteFromScene(entity)
@guiCallback
def copySelectedObject(self):
entity = self.view.getSelectedEntity()
if entity is not None:
self._copyMemory.append(entity)
@guiCallback
def editObject(self, eid, property, newValue):
self.view.scene.editEntity(eid, property, newValue)
@guiCallback
def pasteSelectedObject(self):
if len(self._copyMemory) > 0:
params = self._copyMemory.pop().serialise()
# slightly shifts the pasted object respect the original
params.position.x += 2
params.position.z += 2
# I need to create a new ID for the pasted entity, I can't rely
# on GOM because it will reuses the existing one, therefore creating
# an entity with the same ID as the copied one.
newUid = GOM.generateUID()
params._uid = newUid
self.view.addToScene(GOM.createEntity(params))
def showPlayer(self):
logger.debug("Showing player")
if hasattr(self._view,"setPlayer"):
self._view.setPlayer(self._player.UID)
def hidePlayer(self):
""" Hide the ball as we need it only in simulating mode """
logger.debug("Hiding player")
self._view.scene.hideEntityByID(self._player.UID)
def update(self, task):
# TODO
# update game state
# run ai behavior
# trigger new events
# run physics simulation
POM.update(self.view.scene)
# update particle systems
# moves animation forward for visible characters
# update player's position and cameras
return task.cont
class EditorApplication(AbstractApplication):
dta = 0
def __init__(self, qtApp):
super(EditorApplication, self).__init__()
self._isRunning = True
self._isPaused = False
self._qtApp = qtApp
def step(self):
taskMgr.step()
#self.dta += globalClock.getDt()
#while self.dta > self.stepSize:
# self.dta -= self.stepSize
# taskMgr.step()
#time.sleep(0.0001)
def shutdown(self):
logger.info("Shutdown requested")
self._isRunning = False
def restore(self):
self._isRunning = True
self.dta = 0
taskMgr.step()
self.run()
def run(self):
"""
Main loop of the application
First step, create the processes that will be constantly updated
Second, run them.
Third, destroy them
Now the loop is handled by QT, so all the tasks are executed when the
QT decides to execute the idle function I set up.
"""
logger.debug("Starting application")
# Create processes
self._createProcesses()
self._gui.show()
self._qtApp.exec_()
def _createProcesses(self):
# Start processes in the correct order
# - logic update
# - physic update, logic takes care
# - view update
# - input update view does it
# - scene update view does it
# - gui update view does it
# - view render
# - scene render view does it
# - gui render view does it
logger.debug("Creating processes")
taskMgr.add(self._view.readInput, "read-input")
taskMgr.add(self._logic.update, "logic-update")
taskMgr.add(self._view.update, "view-update")
taskMgr.add(self._view.render, "view-render")
#taskMgr.add(self._mouseWatcher.update, "mw-update")
def _shutDownProcesses(self):
taskMgr.stop()
self.nbase.userExit()
@eventCallback
def _switchView(self, view):
if view in self._views.keys():
# don't switch to the same view
if self._view != self._views[view]:
logger.debug("Switching to %s view" % view)
self._view.disable()
self._view = self._views[view]
self._logic.view = self._view
self._view.enable()
if view is "simulating":
self._logic.showPlayer()
else:
logger.error("View %s doesn't exists" % view)
def _subscribeToEvents(self):
self.listener = SafeDirectObject()
self.listener.accept(event.SWITCH_VIEW, self._switchView)
self.listener.accept(event.REQUEST_SHUTDOWN, self.shutdown)
def _createLogicAndView(self):
# TODO override ShowBase in order to use only what we really need
self.nbase = ShowBase()
self.nbase.windowType = "onscreen"
#taskMgr.popupControls()
self._mouseWatcher = MouseWatcher(self.nbase)
self._guiPresenter = GUIPresenter()
self._guiPresenter.setIdleCallback(self.step)
self._gui = EditorGUI(self._guiPresenter)
winHandle = self._gui.getHandle()
wp = WindowProperties().getDefault()
wp.setOrigin(0,0)
wp.setSize(self._gui.width(), self._gui.height())
wp.setParentWindow(int(winHandle)) # must be an int or it won't work on windows
self.nbase.openDefaultWindow(startDirect=False, props=wp)
self._gui.setPandaWindow(self.nbase.win)
inp = InputManager(self.nbase)
self._views = {"editing": EditingView(inp),
"roaming": RoamingView(inp),
"simulating": SimulatingView(inp)}
self._view = self._views["roaming"]
self._view.enable()
self._view.setSceneRootNode(self.nbase.render)
self._logic = EditorLogic(self._view)
# don't change the order
#self._guiPresenter.setPandaController(self._views["editing"])
self._guiPresenter.setPandaController(self._logic)
self._guiPresenter.setView(self._gui)
# FIXME
self._guiPresenter.setModel(self._views["editing"].scene)
# set a fixed frame rate
from pandac.PandaModules import ClockObject
FPS = 40
globalClock = ClockObject.getGlobalClock()
globalClock.setMode(ClockObject.MLimited)
globalClock.setFrameRate(FPS)
if __name__ == "__main__":
edApp = EditorApplication(QApplication(['']))
edApp.run()
| {
"content_hash": "c8fd823cb4bfc54638cd37a462b7338a",
"timestamp": "",
"source": "github",
"line_count": 655,
"max_line_length": 91,
"avg_line_length": 34.149618320610685,
"alnum_prop": 0.6192328326180258,
"repo_name": "mdinacci/rtw",
"id": "28109bf120a3a5bcbce8bc8b8f1ae812c7925b5b",
"size": "22392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "editor/src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "577524"
},
{
"name": "Shell",
"bytes": "1031"
}
],
"symlink_target": ""
} |
"""
This is a random gaussian noise generator module type 2.
This module generates the signal with oversampling=1 and then oversamples the signal
to the wanted representation sampling frequency. |br|
The generator is able to generate N random signals with a given min and max
frequency components. |br|
*Examples*:
Please go to the *examples/signals* directory for examples on how to use
the generator. |br|
*Settings*:
Parameters of the generator described below.
Take a look on '__parametersDefine' function for more info on the
parameters.
Parameters of the generator are attributes of the class which must/can
be set before the generator run.
Required parameters:
- a. **tS** (*float*): time of a signals
- b. **fR** (*float*): signals' representation sampling frequency
Optional parameters:
- c. **fMin** (*float*): minimum frequency component in the signal
[default = not regulated]
- d. **fMax** (*float*): maximum frequency component in the signal
[default = not regulated]
- e. **iP** (*float*): signals' power [default = 1W]
- f. **nSigs** (*int*): the number of signals to be generated
[default = 1]
Parameters given below are optional filter parameters.
There parameters describe the filter which limits the signals' frequency components.
The filter is applied only if **fMin** or **fMax** is given by user.
- g. **strFilt** (*string*): filter type. The allowed values are:
'butter', 'cheby1', 'cheby2', 'ellip', 'bessel'.
[default = 'butter']
- h. **nFiltOrd** (*int*): the fitler order [default = 10]
- i. **iRp** (*float*): max ripple in the filter's pass band.
Applicable to Chebyshev and elliptic filt. only.
[default = 0.1]
- j. **iRs** (*float*): min attenuation in the filter's stopband
Applicable to Chebyshev and elliptic filt. only.
[default = 60]
- k. **bMute** (*int*): mute the console output from the generator
[default = 0]
*Output*:
Description of the generator output is below.
This is the list of attributes of the generator class which are available
after calling the 'run' method:
- a. **mSig** (*Numpy array 2D*): Matrix with output signals,
one signal p. row
- b. **nSmp** (*int*): The number of samples in the signals
- c. **vP** (*Numpy array 1D*): Vector with the power of signals
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <jap@es.aau.dk>
*Version*:
1.0 | 20-JAN-2016 : * Version 1.0 released. |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import numpy as np
import rxcs
class gaussNoise2(rxcs._RxCSobject):
def __init__(self, *args):
rxcs._RxCSobject.__init__(self) # Make it a RxCS object
self.strRxCSgroup = 'Signal generator' # Name of group of RxCS modules
self.strModuleName = 'Random gaussian noise (type 2)' # Module name
self.__parametersDefine() # Define the parameters
# Import tools
self.gaussNoise = rxcs.sig.gaussNoise() # Import basic gaussian noise generator
self.oversampler = rxcs.sig.oversampler() # Import oversampler
self.powerRegulator = rxcs.sig.powerRegulator() # Power regulator
def __parametersDefine(self):
"""
Internal method which defines the parameters
"""
# Representation sampling frequency
self.paramAddMan('fR', 'Representation sampling frequency', unit='Hz')
self.paramType('fR', (int, float))
self.paramH('fR', 0) # Rep. samp. freq. must be higher than zero
self.paramL('fR', np.inf) # ...and lower than infinity
# Time of signal
self.paramAddMan('tS', 'Signal time', unit='s')
self.paramType('tS', (float, int))
self.paramH('tS', 0) # Time must be higher than zero
self.paramL('tS', np.inf) # ...and lower than infinity
# Minimum frequency of the signal
self.paramAddOpt('fMin', 'Minimum frequency component in the signal', unit='Hz')
self.paramType('fMin', (float, int))
self.paramHE('fMin', 0)
self.paramL('fMin', 'fMax')
# Maximum frequency of the signal
self.paramAddOpt('fMax', 'Maximum frequency component in the signal', unit='Hz')
self.paramType('fMax', (float, int))
self.paramH('fMax', 0)
self.paramLE('fMax', 'fR', mul=0.5)
# Power of a signal
self.paramAddOpt('iP', 'Signal power', unit='W', default=1)
self.paramType('iP',(float, int))
self.paramH('iP', 0) # Power of the signal must be higher than zero
self.paramL('iP', np.inf) # ...and lower than infinity
# The number of signals
self.paramAddOpt('nSigs', 'The number of signals', unit='', default=1)
self.paramType('nSigs',(int))
self.paramH('nSigs', 0) # The number of signals must be higher than zero
self.paramL('nSigs', np.inf) # ...and lower than infinity
# --------------------------------------------------------------------
# Filter parameters:
# Filter type
self.paramAddOpt('strFilt', 'Filter type', unit='', default='butter')
self.paramType('strFilt',str)
self.paramAllowed('strFilt',['butter', 'cheby1', 'cheby2', 'ellip', 'bessel'])
# Filter order
self.paramAddOpt('nFiltOrd', 'Filter order', unit='', default=10)
self.paramType('nFiltOrd',int)
self.paramHE('nFiltOrd',1)
self.paramLE('nFiltOrd', 100)
# Max ripple in the pass band
self.paramAddOpt('iRp', 'Max ripple in the passband', unit='db', default=0.1, noprint=1)
self.paramType('iRp',(float, int))
self.paramH('iRp',0)
# Min attenuation in the stopband
self.paramAddOpt('iRs', 'Min attenuation in the stopband', unit='db', default=60, noprint=1)
self.paramType('iRs',(float, int))
self.paramH('iRs',0)
# --------------------------------------------------------------------
# Mute the output flag
self.paramAddOpt('bMute', 'Mute the output', noprint=1, default=0)
self.paramType('bMute', int) # Must be of int type
self.paramAllowed('bMute',[0, 1]) # It can be either 1 or 0
def run(self):
"""
Run method, which starts the generator
"""
self.parametersCheck() # Check if all the needed partameters are in place and are correct
self.parametersPrint() # Print the values of parameters
self.engineStartsInfo() # Info that the engine starts
self.__engine() # Run the engine
self.engineStopsInfo() # Info that the engine ends
return self.__dict__ # Return dictionary with the parameters
def __engine(self):
"""
Engine of the function
"""
# Generate the basic
if not self.wasParamGiven('fMax'):
self.fMax = self.fR / 2
# Generate the basic nosie signal
self.gaussNoise.fR = 2*self.fMax
self.gaussNoise.tS = self.tS
self.gaussNoise.fMin = self.fMin
self.gaussNoise.iP = 1
self.gaussNoise.nSigs = self.nSigs
self.gaussNoise.strFilt = self.strFilt
self.gaussNoise.nFiltOrd = self.nFiltOrd
self.gaussNoise.iRp = self.iRp
self.gaussNoise.iRs = self.iRs
self.gaussNoise.bMute = 1
self.gaussNoise.run()
# Oversample the generated signal and
self.oversampler.mSig = self.gaussNoise.mSig
self.oversampler.iFLow = self.gaussNoise.fR
self.oversampler.iFHigh = self.fR
self.oversampler.bMute = 1
self.oversampler.run()
# Regulate the power of the signal and assign the signal with the regulated power
# as the output signal
self.powerRegulator.mSig = self.oversampler.mSigOversamp
self.powerRegulator.iP = self.iP
self.powerRegulator.bMute = 1
self.powerRegulator.run()
self.mSig = self.powerRegulator.mSigOut
self.vP = self.powerRegulator.vP
# Compute the number of samples in the output signal
self.nSmp = int(np.round(self.fR * self.tS))
return
| {
"content_hash": "5a6916f0f89395e4f2de1c94771996ab",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 105,
"avg_line_length": 36.531120331950206,
"alnum_prop": 0.578032712403453,
"repo_name": "JacekPierzchlewski/RxCS",
"id": "0414441bab0eae4fefb0727228c9b0245dfdde14",
"size": "8804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rxcs/sig/gaussNoise2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "28058"
},
{
"name": "Python",
"bytes": "803032"
}
],
"symlink_target": ""
} |
import rospy
from threading import Timer
from flexbe_core.logger import Logger
class ProxyPublisher(object):
"""
A proxy for publishing topics.
"""
_topics = {}
def __init__(self, topics={}, _latch=False, _queue_size=100):
"""
Initializes the proxy with optionally a given set of topics.
Automatically creates a publisher for sending status messages.
@type topics: dictionary string - message class
@param topics: A dictionay containing a collection of topic - message type pairs.
@type _latch: bool
@param: _latch: Defines if messages on the given topics should be latched.
@type _queue_size: int
@param: _queue_size: Defines the queue size of the new publishers.
"""
for topic, msg_type in topics.items():
self.createPublisher(topic, msg_type, _latch, _queue_size)
def createPublisher(self, topic, msg_type, _latch=False, _queue_size=100):
"""
Adds a new publisher to the proxy.
@type topic: string
@param topic: The topic to publish on.
@type msg_type: a message class
@param msg_type: The type of messages of this topic.
@type _latch: bool
@param: _latch: Defines if messages on the given topics should be latched.
@type _queue_size: int
@param: _queue_size: Defines the queue size of the publisher.
"""
if topic not in ProxyPublisher._topics:
ProxyPublisher._topics[topic] = rospy.Publisher(topic, msg_type, latch=_latch, queue_size=_queue_size)
def is_available(self, topic):
"""
Checks if the publisher on the given topic is available.
@type topic: string
@param topic: The topic of interest.
"""
return topic in ProxyPublisher._topics
def publish(self, topic, msg):
"""
Publishes a message on the specified topic.
@type topic: string
@param topic: The topic to publish on.
@type msg: message class (defined when created publisher)
@param msg: The message to publish.
"""
if topic not in ProxyPublisher._topics:
Logger.logwarn('ProxyPublisher: topic %s not yet registered!' % topic)
return
try:
ProxyPublisher._topics[topic].publish(msg)
except Exception as e:
Logger.logwarn('Something went wrong when publishing to %s!\n%s' % (topic, str(e)))
def wait_for_any(self, topic, timeout=5.0):
"""
Blocks until there are any subscribers to the given topic.
@type topic: string
@param topic: The topic to publish on.
@type timeout: float
@param timeout: How many seconds should be the maximum blocked time.
"""
pub = ProxyPublisher._topics.get(topic)
if pub is None:
Logger.logerr("Publisher %s not yet registered, need to add it first!" % topic)
return False
t = Timer(.5, self._print_wait_warning, [topic])
t.start()
available = self._wait_for_subscribers(pub, timeout)
warning_sent = False
try:
t.cancel()
except Exception:
# already printed the warning
warning_sent = True
if not available:
Logger.logerr("Waiting for subscribers on %s timed out!" % topic)
return False
else:
if warning_sent:
Logger.loginfo("Finally found subscriber on %s..." % (topic))
return True
def _print_wait_warning(self, topic):
Logger.logwarn("Waiting for subscribers on %s..." % (topic))
def _wait_for_subscribers(self, pub, timeout=5.0):
starting_time = rospy.get_rostime()
rate = rospy.Rate(100)
while not rospy.is_shutdown():
elapsed = rospy.get_rostime() - starting_time
if elapsed.to_sec() >= timeout:
return False
if pub.get_num_connections() >= 1:
return True
rate.sleep()
return False
| {
"content_hash": "90e66cd2fd01e5cd32d4e7fe6ed95f67",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 114,
"avg_line_length": 33.90909090909091,
"alnum_prop": 0.5956617109432123,
"repo_name": "team-vigir/flexbe_behavior_engine",
"id": "9503d795ed3752b0ccd2fbb08731f558541b3f64",
"size": "4125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexbe_core/src/flexbe_core/proxy/proxy_publisher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "8992"
},
{
"name": "Python",
"bytes": "204730"
},
{
"name": "Shell",
"bytes": "2896"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
version='4.2.0',
name='vcdriver',
description='A vcenter driver based on pyvmomi, fabric and pywinrm',
url='https://github.com/Lantero/vcdriver',
author='Carlos Ruiz Lantero',
author_email='carlos.ruiz.lantero@gmail.com',
license='MIT',
install_requires=['colorama', 'Fabric3', 'pyvmomi', 'pywinrm', 'six'],
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
],
)
| {
"content_hash": "deb79351847f62f1bcb2581783fd2319",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 74,
"avg_line_length": 36.93103448275862,
"alnum_prop": 0.61531279178338,
"repo_name": "Lantero/vcenter-driver",
"id": "5a009e1d0f18cc26579a7dfb5f26dbb196060e7a",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7071"
}
],
"symlink_target": ""
} |
import json
from flask import Blueprint
from flask import current_app
from flask import make_response
from flask import request
from flask import render_template
from flask import url_for
from discograph import exceptions
from discograph import helpers
blueprint = Blueprint('ui', __name__, template_folder='templates')
default_roles = (
'Alias',
'Member Of',
'Sublabel Of',
'Released On',
)
@blueprint.route('/')
def route__index():
import discograph
app = current_app._get_current_object()
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
initial_json = 'var dgData = null;'
on_mobile = request.MOBILE
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
url = url_for(
request.endpoint,
roles=original_roles,
)
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: visualizing music as a social graph',
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title='Disco/graph: Visualizing music as a social graph',
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response
@blueprint.route('/<entity_type>/<int:entity_id>')
def route__entity_type__entity_id(entity_type, entity_id):
import discograph
app = current_app._get_current_object()
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
if entity_type not in ('artist', 'label'):
raise exceptions.APIError(message='Bad Entity Type', status_code=404)
on_mobile = request.MOBILE
data = helpers.get_network(
entity_id,
entity_type,
on_mobile=on_mobile,
cache=True,
roles=original_roles,
)
if data is None:
raise exceptions.APIError(message='No Data', status_code=500)
initial_json = json.dumps(
data,
sort_keys=True,
indent=4,
separators=(',', ': '),
)
initial_json = 'var dgData = {};'.format(initial_json)
entity_name = data['center']['name']
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
key = '{}-{}'.format(entity_type, entity_id)
#url = '/{}/{}'.format(entity_type, entity_id)
url = url_for(
request.endpoint,
entity_type=entity_type,
entity_id=entity_id,
roles=original_roles,
)
title = 'Disco/graph: {}'.format(entity_name)
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
key=key,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: The "{}" network'.format(entity_name),
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title=title,
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response | {
"content_hash": "d30b66f7a7eeef439dcd87fad4eabf47",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 32.64601769911504,
"alnum_prop": 0.6457034426673895,
"repo_name": "josiah-wolf-oberholtzer/discograph",
"id": "ac2a57286b4f5b8563bbb92c0f36dad3453fd357",
"size": "3715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discograph/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15028"
},
{
"name": "HTML",
"bytes": "21647"
},
{
"name": "JavaScript",
"bytes": "118075"
},
{
"name": "Makefile",
"bytes": "128"
},
{
"name": "Python",
"bytes": "322033"
}
],
"symlink_target": ""
} |
from . import methods
from .registry import write_elem, read_elem, get_spec, _REGISTRY
| {
"content_hash": "ccfb290023dfcf9999337d5edc8d6154",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 64,
"avg_line_length": 43.5,
"alnum_prop": 0.7701149425287356,
"repo_name": "theislab/anndata",
"id": "162ecd77552f7c32126ce5af02e170a82160f69b",
"size": "87",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anndata/_io/specs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "493297"
}
],
"symlink_target": ""
} |
from bs4 import element
import re
def calculate_density(text, max_line=80):
regexp = "[^\W\d_]+"
sum_len = len(text)
lines = int(sum_len / max_line)
if lines > 0:
r = max_line * lines
reduced_text = text[0:r]
found = re.findall(regexp, reduced_text, re.UNICODE)
word_density = float(len(found)) / float(lines)
else:
found = re.findall(regexp, text, re.UNICODE)
word_density = float(len(found))
return word_density
def max_density(tag):
densities = []
for i in tag.contents:
if isinstance(i, element.NavigableString):
densities.append(calculate_density(unicode(i)))
else:
densities.append(max_density(i))
if len(densities) > 0:
return max(densities)
else:
return 0.0
def min_density(tag):
densities = []
for i in tag.contents:
if isinstance(i, element.NavigableString):
densities.append(calculate_density(unicode(i)))
else:
densities.append(min_density(i))
densities = filter(lambda x: x != 0.0, densities)
if len(densities) > 0:
return min(densities)
else:
return 0.0
def word_densities(tag):
densities = []
for i in tag.contents:
if isinstance(i, element.NavigableString):
densities.append(calculate_density(unicode(i)))
else:
densities.extend(word_densities(i))
densities = filter(lambda x: x != 0.0, densities)
return densities
def average_density(tag):
ds = word_densities(tag)
if len(ds) == 0:
return 0.0
return sum(ds) / float(len(ds)) | {
"content_hash": "d1ad04c20855c3ce02c777db9c5223ec",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 60,
"avg_line_length": 25.384615384615383,
"alnum_prop": 0.5963636363636363,
"repo_name": "tgodzik/automatic-segmentation-tool",
"id": "ec6434c4be3c3b825d2badd9679401cfe9db30f1",
"size": "1650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "segmentation/algorithms/densinometric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23179"
},
{
"name": "Shell",
"bytes": "49"
}
],
"symlink_target": ""
} |
from org.myrobotlab.opencv import OpenCVFilterAffine
affine = OpenCVFilterAffine("affine")
affine.setAngle(180.0)
leftPort= "/dev/cu.wchusbserial1450"
i01 = Runtime.start("i01","InMoov")
headTracking = i01.startHeadTracking(leftPort)
eyesTracking = i01.startEyesTracking(leftPort,10,12)
i01.headTracking.addPreFilter(affine)
i01.eyesTracking.addPreFilter(affine)
sleep(1)
i01.headTracking.faceDetect()
i01.eyesTracking.faceDetect()
| {
"content_hash": "7483f6dd35cc5d2b8fc465884640b725",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 52,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.8137931034482758,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "f6679a9438ace769e12392db8c1124a0324e1f4e",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/Alessandruino/PreFilterExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
} |
def find_pandigital_products():
pandigital = set(range(1,10))
for multiplicand in range(1,100):
# find min/max multipliers that will produce a product of only 4 digits
max_multiplier = 10**4/multiplicand
min_multiplier = 1000 if multiplicand < 10 else 100
for multiplier in range(min_multiplier, max_multiplier):
product = multiplicand * multiplier
numbers = (product, multiplicand, multiplier)
all_digits = {int(digit) for number in numbers
for digit in str(number)}
if all_digits == pandigital:
yield product
def solve_p032():
return sum(set(find_pandigital_products()))
if __name__ == '__main__':
print(solve_p032())
| {
"content_hash": "36f6354e6ca8c72ddf75c6c042454d43",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.80952380952381,
"alnum_prop": 0.6015523932729625,
"repo_name": "piohhmy/euler",
"id": "18e51a72a49cf62629c9c42b6d1989e8d4b9a2c0",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p032.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55545"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2012 Giacomo Bagnoli <g.bagnoli@asidev.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import collections
import logging
__all__ = ['setup_logging', 'MultiDict']
class _NullHandler(logging.Handler):
level = None
def emit(self, record):
pass
@classmethod
def handle(cls, record):
pass
def createLock(self):
return None
def setup_logging():
""" Setup logging adding a NullHandler, since logging configuration is
a task done by the application itself.
Prefer the logging.NullHandler handler if present (py>=2.7)
"""
logger = logging.getLogger('varnish')
logger.addHandler(_NullHandler)
class MultiDict(collections.MutableMapping):
"""
This is a modified version of MultiDict shamelessly stolen from WebOb
http://www.webob.org/
An ordered dictionary that can have multiple values for each key.
Adds the methods to the normal dictionary interface.
(c) 2005 Ian Bicking and contributors; written for Paste
(http://pythonpaste.org) Licensed under the MIT license:
http://www.opensource.org/licenses/mit-license.php
"""
def __init__(self, *args, **kw):
if len(args) > 1:
raise TypeError("MultiDict can only be called with one positional "
"argument")
if args:
if hasattr(args[0], 'iteritems'):
items = list(args[0].iteritems())
elif hasattr(args[0], 'items'):
items = args[0].items()
else:
items = list(args[0])
self._items = items
else:
self._items = []
if kw:
self._items.extend(kw.items())
def __getitem__(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
result = []
for k, v in self._items:
if key == k:
result.append(v)
if not result:
raise KeyError(key)
return result
def __setitem__(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self._items.append((key, value))
def __delitem__(self, key):
items = self._items
found = False
for i in range(len(items) - 1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def overwrite(self, key, value):
"""
Set the value at key, discarding previous values set if any
"""
try:
del self[key]
except:
pass
self[key] = value
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
v = self[key]
if len(v) > 1:
raise KeyError('Multiple values match %r: %r' % (key, v))
return v[0]
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a list of values
"""
r = {}
for key, val in self.items():
r.setdefault(key, []).append(val)
return r
def __contains__(self, key):
for k, v in self._items:
if k == key:
return True
return False
has_key = __contains__
def clear(self):
del self._items[:]
def copy(self):
return self.__class__(self)
def setdefault(self, key, default=None):
for k, v in self._items:
if key == k:
return v
self._items.append((key, default))
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got %s"
% repr(1 + len(args)))
for i in range(len(self._items)):
if self._items[i][0] == key:
v = self._items[i][1]
del self._items[i]
return v
if args:
return args[0]
else:
raise KeyError(key)
def popitem(self):
return self._items.pop()
def extend(self, other=None, **kwargs):
if other is None:
pass
elif hasattr(other, 'items'):
self._items.extend(other.items())
elif hasattr(other, 'keys'):
for k in other.keys():
self._items.append((k, other[k]))
else:
for k, v in other:
self._items.append((k, v))
if kwargs:
self.update(kwargs)
def __repr__(self):
items = map('(%r, %r)'.__mod__, _hide_passwd(self.items()))
return '%s([%s])' % (self.__class__.__name__, ', '.join(items))
def __len__(self):
return len(self._items)
def iterkeys(self):
for k, v in self._items:
yield k
def keys(self):
return [k for k, v in self._items]
__iter__ = iterkeys
def iteritems(self):
return iter(self._items)
def items(self):
return self._items[:]
def itervalues(self):
for k, v in self._items:
yield v
def values(self):
return [v for k, v in self._items]
def trim(self, size):
if len(self) <= size:
return
self._items = self._items[-size:]
def _hide_passwd(items):
for k, v in items:
try:
if ('password' in k
or 'passwd' in k
or 'pwd' in k
):
yield k, '******'
else:
yield k, v
except TypeError:
yield k, v
| {
"content_hash": "4fd78f59d6d66645e90e6101f702bf43",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 27.04,
"alnum_prop": 0.5497041420118344,
"repo_name": "gbagnoli/varnish-py",
"id": "84b6b62fbd4a091a94d16ad9caf90635069322c7",
"size": "6806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "varnish/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85498"
}
],
"symlink_target": ""
} |
"""Code as found in notebooks/semantic_similarity.ipynb."""
# Computing basic semantic similarities between GO terms
# Adapted from book chapter written by _Alex Warwick Vesztrocy and Christophe Dessimoz_
# How to compute semantic similarity between GO terms.
# First we need to write a function that calculates the minimum number
# of branches connecting two GO terms.
import os
from goatools.base import get_godag
from goatools.associations import dnld_assc
from goatools.semantic import semantic_similarity
from goatools.semantic import TermCounts
from goatools.semantic import get_info_content
from goatools.semantic import resnik_sim
from goatools.semantic import lin_sim
def test_semantic_similarity():
"""Computing basic semantic similarities between GO terms."""
godag = get_godag(os.path.join(os.getcwd(), "go-basic.obo"), loading_bar=None)
# Get all the annotations from arabidopsis.
associations = dnld_assc(os.path.join(os.getcwd(), 'gene_association.tair'), godag)
# Now we can calculate the semantic distance and semantic similarity, as so:
# "The semantic similarity between terms GO:0048364 and GO:0044707 is 0.25.
go_id3 = 'GO:0048364' # BP level-03 depth-04 root development
go_id4 = 'GO:0044707' # BP level-02 depth-02 single-multicellular organism process
sim = semantic_similarity(go_id3, go_id4, godag)
print('\nThe semantic similarity between terms {GO1} and {GO2} is {VAL}.'.format(
GO1=go_id3, GO2=go_id4, VAL=sim))
print(godag[go_id3])
print(godag[go_id4])
# Then we can calculate the information content of the single term, <code>GO:0048364</code>.
# "Information content (GO:0048364) = 7.75481392334
# First get the counts of each GO term.
termcounts = TermCounts(godag, associations)
# Calculate the information content
go_id = "GO:0048364"
infocontent = get_info_content(go_id, termcounts)
print('\nInformation content ({GO}) = {INFO}\n'.format(GO=go_id, INFO=infocontent))
# Resnik's similarity measure is defined as the information content of the most
# informative common ancestor. That is, the most specific common parent-term in
# the GO. Then we can calculate this as follows:
# "Resnik similarity score (GO:0048364, GO:0044707) = 4.0540784252
sim_r = resnik_sim(go_id3, go_id4, godag, termcounts)
print('Resnik similarity score ({GO1}, {GO2}) = {VAL}'.format(
GO1=go_id3, GO2=go_id4, VAL=sim_r))
# Lin similarity score (GO:0048364, GO:0044707) = -0.607721957763
sim_l = lin_sim(go_id3, go_id4, godag, termcounts)
print('Lin similarity score ({GO1}, {GO2}) = {VAL}'.format(GO1=go_id3, GO2=go_id4, VAL=sim_l))
if __name__ == '__main__':
test_semantic_similarity()
| {
"content_hash": "541a37b4b1e13411ee4103237e9d0e0d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 98,
"avg_line_length": 43.93650793650794,
"alnum_prop": 0.7117052023121387,
"repo_name": "lileiting/goatools",
"id": "87b3356764a18172f8b8d0e8b8048818ebf28c1a",
"size": "2768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_semantic_similarity.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "224437"
},
{
"name": "Makefile",
"bytes": "14930"
},
{
"name": "Python",
"bytes": "77536843"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'org/jetbrains/kotlin'
_MODULE_NAME = 'kotlin-stdlib-jdk7'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| {
"content_hash": "8ceee3425f4a5fc21f14ce8f50aabfd0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.6,
"alnum_prop": 0.5949367088607594,
"repo_name": "chromium/chromium",
"id": "f3317e1e1dc2a0f9dafc2f16006cd50b44d14129",
"size": "2479",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_jdk7/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Created on Tue Mar 1 20:19:04 2016
@author: nadiablago
"""
import os
import shutil
import subprocess
import numpy as np
abspath = os.path.dirname(__file__)
SEXCONFIG = os.path.abspath(os.path.join(abspath, '../../config/daofind.sex'))
if os.path.isfile(SEXCONFIG):
print ('Found sextractor configuration file: %s'%SEXCONFIG)
else:
print ('Please, put the sextractor configuration file daofind.sex in: %s'%(os.path.dirname(SEXCONFIG)))
print ('Otherwise, redefine the environmental variable SEXCONFIG to point to the location where daofind.sex is located.')
def run_sex(flist, overwrite=False):
d = os.path.dirname(flist[0])
if d == "":
d = "."
os.chdir(d)
#Create the directory where the sextracted images are going to go.
sexdir = os.path.join(d, "sextractor")
if (not os.path.isdir(sexdir)):
os.makedirs(sexdir)
newlist = []
for f in flist:
newimage = os.path.join(sexdir, os.path.basename(f).replace(".fits", ".sex"))
if (os.path.isfile(newimage) and not overwrite):
newlist.append(newimage)
print ("Sextracted image %s already exists."%newimage)
else:
try:
out = os.path.abspath(f)
cmd="sex -c %s %s"%(SEXCONFIG, out)
subprocess.call(cmd, shell=True)
shutil.move("image.sex", newimage)
newlist.append(newimage)
except IOError:
print ("IOError detected reading file",f)
pass
return newlist
def analyse_image(sexfile, arcsecpix):
'''
Analyses the sextractor filelist to determine the best focus.
If FWHM in pixes is required, set arcsecpix=1
returns: A tuple containing:
1. - Number of extracted sources.
2. - FWHM in arcsecs.
3. - Ellipticity.
4. - Background
# 1 X_IMAGE Object position along x [pixel]
# 2 Y_IMAGE Object position along y [pixel]
# 3 ALPHA_J2000 Right ascension of barycenter (J2000) [deg]
# 4 DELTA_J2000 Declination of barycenter (J2000) [deg]
# 5 MAG_BEST Best of MAG_AUTO and MAG_ISOCOR [mag]
# 6 MAGERR_BEST RMS error for MAG_BEST [mag]
# 7 FWHM_WORLD FWHM assuming a gaussian core [deg]
# 8 FWHM_IMAGE FWHM assuming a gaussian core [pixel]
# 9 ELLIPTICITY 1 - B_IMAGE/A_IMAGE
# 10 BACKGROUND Background at centroid position [count]
# 11 FLAGS Extraction flags
# 12 A_IMAGE Isophotal image mejor axis
# 13 B_IMAGE Isophotal image minor axis
# 14 THETA_IMAGE Isophotal image position angle
# 15 PETRO_RADIUS Petrosian radius
'''
s = np.genfromtxt(sexfile, comments="#", dtype=[("x", np.float), ("y", np.float), ("ra", np.float), ("dec", np.float), \
("mag", np.float), ("magerr",np.float), ("fwhm_world", np.float), ("fwhm_image", np.float), ("ellipticity",np.float), \
("background", np.float), ("flags", np.float), ("a_image", np.float), ("b_image", np.float),("theta_image", np.float), ("petro_radius", np.float)])
if (s is None or s.ndim==0 or len(s)==0):
print ("Empty content of the file for file %s. The length of the file is %d"%(sexfile, len(s)))
return 0,0,0,0
# Select with good flags only.
s = s[s["flags"]==0]
nsources = len(s)
if (nsources == 0):
return 0,0,0,0
#Select round sources (ellipticity is 1-axis_ratio)
s = s[s["ellipticity"]<0.3]
ellipticity = np.nanmedian(s["ellipticity"])
#Select FWHM at least 3 pixels and lower than 15 arcsec
s = s[ (s["fwhm_image"]>3)*(s["fwhm_image"]*arcsecpix<15)]
nsources = len(s)
if (nsources == 0):
return 0,0,0,0
#Select bright magnitudes
s = s[s["mag"]<np.percentile(s["mag"], 20)]
fwhm = np.nanmedian(s["fwhm_image"]*arcsecpix)
bkg = np.nanmedian(s["background"])
return nsources, fwhm, ellipticity, bkg
def get_image_pars(image, arcsecpix):
'''
Returns a set of statistics for a given image.
'''
sexfiles = run_sex([image])
pars = analyse_image(sexfiles[0], arcsecpix=arcsecpix)
return pars[0], pars[1], pars[2], pars[3]
| {
"content_hash": "0c7e795f8b0b867021fb850bad321b65",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 155,
"avg_line_length": 36.091603053435115,
"alnum_prop": 0.5425126903553299,
"repo_name": "nblago/utils",
"id": "d89187fa8931064a13f73e5780d3529f5c749fac",
"size": "4752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/photometry/sextractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193117"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
} |
"""Helper functions for XKNX."""
from __future__ import annotations
import asyncio
import ipaddress
import logging
import socket
from typing import cast
import ifaddr
from xknx.exceptions import XKNXException
from .const import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
logger = logging.getLogger("xknx.log")
async def get_default_local_ip(remote_ip: str = DEFAULT_MCAST_GRP) -> str | None:
"""Return the local ip used for communication with remote_ip."""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setblocking(False) # must be non-blocking for async
loop = asyncio.get_running_loop()
try:
await loop.sock_connect(sock, (remote_ip, DEFAULT_MCAST_PORT))
local_ip = sock.getsockname()[0]
logger.debug("Using local ip: %s", local_ip)
return cast(str, local_ip)
except Exception: # pylint: disable=broad-except
logger.warning(
"The system could not auto detect the source ip for %s on your operating system",
remote_ip,
)
return None
def get_local_ips() -> list[ifaddr.IP]:
"""Return list of local IPv4 addresses."""
return [ip for iface in ifaddr.get_adapters() for ip in iface.ips if ip.is_IPv4]
def get_local_interface_name(local_ip: str) -> str:
"""Return the name of the interface with the given ip."""
return next((link.nice_name for link in get_local_ips() if link.ip == local_ip), "")
def find_local_ip(gateway_ip: str) -> str | None:
"""Find local IP address on same subnet as gateway."""
gateway = ipaddress.IPv4Address(gateway_ip)
for link in get_local_ips():
network = ipaddress.IPv4Network((link.ip, link.network_prefix), strict=False)
if gateway in network:
logger.debug("Using interface: %s", link.nice_name)
return cast(str, link.ip)
logger.debug("No interface on same subnet as gateway found.")
return None
def validate_ip(address: str, address_name: str = "IP address") -> None:
"""Raise an exception if address cannot be parsed as IPv4 address."""
try:
ipaddress.IPv4Address(address)
except ipaddress.AddressValueError as ex:
raise XKNXException(f"{address_name} is not a valid IPv4 address.") from ex
| {
"content_hash": "ab634a214d01daf651c2fb667aa3bdc7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 97,
"avg_line_length": 36.171875,
"alnum_prop": 0.6600431965442765,
"repo_name": "XKNX/xknx",
"id": "f242662bb06098d0c9bf5860bc38e9e73c07d0a7",
"size": "2315",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xknx/io/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1545198"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
import re
import os
import sys
try:
import IDF
except ImportError:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
@IDF.idf_example_test(env_tag="Example_WIFI", ignore=True)
def test_examples_protocol_esp_http_client(env, extra_data):
"""
steps: |
1. join AP
2. Send HTTP request to httpbin.org
"""
dut1 = env.get_dut("esp_http_client", "examples/protocols/esp_http_client")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "esp-http-client-example.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("esp_http_client_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("esp_http_client_bin_size", bin_size // 1024)
# start test
dut1.start_app()
dut1.expect("Connected to AP, begin http example", timeout=30)
dut1.expect(re.compile(r"HTTP GET Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP POST Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP PUT Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP PATCH Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP DELETE Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP HEAD Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP Basic Auth Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP Basic Auth redirect Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP Digest Auth Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP Relative path redirect Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP Absolute path redirect Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTPS Status = 200, content_length = (\d)"))
dut1.expect(re.compile(r"HTTP redirect to HTTPS Status = 200, content_length = (\d)"), timeout=10)
dut1.expect(re.compile(r"HTTP chunk encoding Status = 200, content_length = -1"))
dut1.expect(re.compile(r"HTTP Stream reader Status = 200, content_length = (\d)"))
dut1.expect("Finish http example")
if __name__ == '__main__':
test_examples_protocol_esp_http_client()
| {
"content_hash": "5348e8fa890e9650edfbbf901ddcc384",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 102,
"avg_line_length": 47.31481481481482,
"alnum_prop": 0.6688845401174168,
"repo_name": "krzychb/rtd-test-bed",
"id": "0d161da4c272c91d48aa2b80fb91e378738f3c83",
"size": "2555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/protocols/esp_http_client/esp_http_client_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "248929"
},
{
"name": "Batchfile",
"bytes": "9428"
},
{
"name": "C",
"bytes": "42611901"
},
{
"name": "C++",
"bytes": "10437923"
},
{
"name": "CMake",
"bytes": "316611"
},
{
"name": "CSS",
"bytes": "1340"
},
{
"name": "Dockerfile",
"bytes": "4319"
},
{
"name": "GDB",
"bytes": "2764"
},
{
"name": "Go",
"bytes": "146670"
},
{
"name": "HCL",
"bytes": "468"
},
{
"name": "HTML",
"bytes": "115431"
},
{
"name": "Inno Setup",
"bytes": "14977"
},
{
"name": "Lex",
"bytes": "7273"
},
{
"name": "M4",
"bytes": "189150"
},
{
"name": "Makefile",
"bytes": "439631"
},
{
"name": "Objective-C",
"bytes": "133538"
},
{
"name": "PHP",
"bytes": "498"
},
{
"name": "Pawn",
"bytes": "151052"
},
{
"name": "Perl",
"bytes": "141532"
},
{
"name": "Python",
"bytes": "1868534"
},
{
"name": "Roff",
"bytes": "102712"
},
{
"name": "Ruby",
"bytes": "206821"
},
{
"name": "Shell",
"bytes": "625528"
},
{
"name": "Smarty",
"bytes": "5972"
},
{
"name": "Tcl",
"bytes": "110"
},
{
"name": "TeX",
"bytes": "1961"
},
{
"name": "Visual Basic",
"bytes": "294"
},
{
"name": "XSLT",
"bytes": "80335"
},
{
"name": "Yacc",
"bytes": "15875"
}
],
"symlink_target": ""
} |
from display.handlers.base import BaseHandler
class ExamplesProfileHandler(BaseHandler):
def get(self):
title = 'ExamplesProfileHandler'
self.render('examples/profile.html', title = title, **self.render_dict) | {
"content_hash": "ce227715e774677f1e6d002f9ef7398e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 79,
"avg_line_length": 38.166666666666664,
"alnum_prop": 0.7336244541484717,
"repo_name": "owlsn/h_crawl",
"id": "b43a80c6381df0b8724debe783c3a4f93b7f398b",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "display/display/handlers/examples/profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "111"
},
{
"name": "HTML",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "3257"
},
{
"name": "Python",
"bytes": "6605"
}
],
"symlink_target": ""
} |
import os
import sys
import datetime
import pytest
from msl.equipment import Config, constants, Backend
from msl.equipment.record_types import RecordDict
ROOT_DIR = os.path.join(os.path.dirname(__file__), 'db_files')
def teardown_module():
import cleanup_os_environ
cleanup_os_environ.cleanup()
def test_database_io_errors():
# no <path></path> tag
with pytest.raises(OSError, match=r'<path>'):
Config(os.path.join(ROOT_DIR, 'db_err0.xml')).database()
# database file does not exist
with pytest.raises(OSError, match=r'Cannot find the database'):
Config(os.path.join(ROOT_DIR, 'db_err1.xml')).database()
# unsupported database file
with pytest.raises(OSError, match=r'Unsupported equipment-registry database'):
Config(os.path.join(ROOT_DIR, 'db_err2.xml')).database()
# more than 1 Sheet in the Excel database
with pytest.raises(ValueError, match=r'You must specify the name of the sheet to read'):
Config(os.path.join(ROOT_DIR, 'db_err3.xml')).database()
# the 'equipment' item in the xml file is not a valid Equipment Record
with pytest.raises(AttributeError, match=r'attributes'):
Config(os.path.join(ROOT_DIR, 'db_err4.xml')).database()
# the 'equipment' item in the xml file is not a unique Equipment Record
with pytest.raises(AttributeError, match=r'unique'):
Config(os.path.join(ROOT_DIR, 'db_err5.xml')).database()
# the 'equipment' item in the xml file is has multiple aliases
with pytest.raises(ValueError, match=r'aliases'):
Config(os.path.join(ROOT_DIR, 'db_err6.xml')).database()
# invalid Sheet name in Excel database
with pytest.raises(ValueError, match=r'There is no sheet'):
Config(os.path.join(ROOT_DIR, 'db_err7.xml')).database()
def test_database():
path = os.path.join(ROOT_DIR, 'db.xml')
c = Config(path)
dbase = c.database()
assert path == dbase.path
assert len(dbase.records()) == 7 + 18
assert len(dbase.records(manufacturer='NOPE!')) == 0
assert len(dbase.records(manufacturer='^Ag')) == 10 # all records from Agilent
assert len(dbase.records(manufacturer='^Ag', connection=True)) == 3 # all records from Agilent with a ConnectionRecord
assert len(dbase.records(manufacturer='Agilent', model='83640L')) == 1
assert len(dbase.records(manufacturer=r'H.*P')) == 2 # all records from Hewlett Packard
assert len(dbase.records(manufacturer=r'H.*P|^Ag')) == 12 # all records from Hewlett Packard or Agilent
assert len(dbase.records(manufacturer='Bd6d850614')) == 1
assert len(dbase.records(model='00000')) == 1
num_connections_expected = 4
assert len(dbase.records(connection=True)) == num_connections_expected
assert len(dbase.records(connection=False)) == len(dbase.records()) - num_connections_expected
assert len(dbase.records(connection=1)) == num_connections_expected
assert len(dbase.records(connection=0)) == len(dbase.records()) - num_connections_expected
assert len(dbase.records(connection='anything that converts to a bool=True')) == num_connections_expected
assert len(dbase.records(connection='')) == len(dbase.records()) - num_connections_expected
dbase.records(flags=1) # a flags argument name is ok even though it not an EquipmentRecord property name
with pytest.raises(NameError):
dbase.records(unknown_name=None)
assert len(dbase.connections()) == 10
assert len(dbase.connections(backend='MSL')) == 5
assert len(dbase.connections(backend=constants.Backend.MSL)) == 5
assert len(dbase.connections(backend='PYVISA')) == 0
assert len(dbase.connections(backend='PyVISA')) == 5
assert len(dbase.connections(backend=constants.Backend.PyVISA)) == 5
assert len(dbase.connections(backend='PyVISA|MSL')) == 10
assert len(dbase.connections(backend='XXXXX')) == 0
assert len(dbase.connections(serial='A10008')) == 1
assert len(dbase.connections(manufacturer='^Ag')) == 4 # all records from Agilent
assert len(dbase.connections(model='DTMc300V_sub')) == 1
assert len(dbase.connections(manufacturer='Agilent', serial='G00001')) == 1
assert len(dbase.connections(manufacturer='Agilent|Fluke|Thorlabs')) == 6
assert len(dbase.connections(interface='SERIAL')) == 2 # != 3 since "Coherent Scientific" uses PyVISA
assert len(dbase.connections(interface=constants.MSLInterface.SDK)) == 2
assert len(dbase.connections(interface='SERIAL|SDK')) == 4
assert len(dbase.connections(interface=constants.MSLInterface.SERIAL)) == 2 # != 3 since "Coherent Scientific" uses PyVISA
assert len(dbase.connections(interface='XXXXXX')) == 0
dbase.connections(flags=1) # a flags argument name is ok even though it not a ConnectionRecord property name
with pytest.raises(NameError):
dbase.connections(unknown_name=None)
assert len(dbase.equipment) == 2
assert '712ae' in dbase.equipment # the model number is used as the key
assert 'dvm' in dbase.equipment # the alias is used as the key
def test_connection_properties():
dbase = Config(os.path.join(ROOT_DIR, 'db.xml')).database()
props = dbase.records(serial='37871232')[0].connection.properties
assert props['a'] == 1
assert props['b'] == 1.1
assert isinstance(props['c'], bool) and props['c']
assert isinstance(props['d'], bool) and props['d']
assert isinstance(props['e'], bool) and not props['e']
assert isinstance(props['f'], bool) and not props['f']
assert props['g'] is None
assert props['h'] == ''
assert props['i_termination'] == constants.LF
assert props['j_termination'] == constants.CR
assert props['k_termination'] == constants.CR + constants.LF
assert props['l'] == 'some text'
assert props['m'] == 'D:\\Data\\'
def test_encoding():
IS_PYTHON2 = sys.version_info[0] == 2
if IS_PYTHON2:
reload(sys) # required for the sys.setdefaultencoding() calls below
print('')
for cfg in ['utf8_txt.xml', 'cp1252_txt.xml', 'xlsx.xml']:
db = Config(os.path.join(ROOT_DIR, 'db_encoding_' + cfg)).database()
if IS_PYTHON2:
if cfg.startswith('cp1252'):
sys.setdefaultencoding('cp1252') # a legacy encoding used by Microsoft Windows
elif cfg.startswith('utf8'):
sys.setdefaultencoding('utf-8')
print(db.path)
# test printing the database records
for r in db.records():
print(r)
r.to_dict()
r.to_xml()
for r in db.connections():
print(r)
r.to_dict()
r.to_xml()
assert db.records(manufacturer='Kepco')[0].manufacturer == u'Kepco and \u201cTMK\u201d shunt'
assert db.records(model='MFF101/M')[0].description == u'Motorized Filter Flip Mount for \xd825mm Optics'
def test_database_user_defined():
path = os.path.join(ROOT_DIR, 'db_user_defined.xml')
cfg = Config(path)
db = cfg.database()
for record in db.records():
if record.team == 'Any':
assert len(record.user_defined) == 2
assert record.user_defined['nothing_relevant'] == 'XXXXXXXXXX'
assert record.user_defined['policies'] == 'MSLE.X.YYY'
else:
assert len(record.user_defined) == 0
path = os.path.join(ROOT_DIR, 'db_user_defined_bad.xml')
cfg = Config(path)
db = cfg.database()
for record in db.records():
if record.team == 'Any':
assert len(record.user_defined) == 1
assert record.user_defined['policies'] == 'MSLE.X.YYY'
else:
assert len(record.user_defined) == 0
def test_json_and_xml_db():
for filename in ['config_json.xml', 'config_xml.xml']:
db = Config(os.path.join(ROOT_DIR, filename)).database()
#
# EquipmentRecords
#
assert len(db.records()) == 3
assert len(db.records(is_operable=True)) == 2
assert len(db.records(is_operable=False)) == 1
assert len(db.records(category='Logger')) == 1
records = db.records(unique_key='AK1')
assert len(records) == 1
r = records[0]
assert len(r.calibrations) == 2
c0 = r.calibrations[0]
assert c0.report_date == datetime.date(2012, 10, 20)
assert c0.calibration_date == datetime.date(2012, 10, 20)
assert c0.report_number == 'PTB 44183/12'
assert c0.calibration_cycle == 5
assert len(c0.measurands) == 1
m = c0.measurands['spectral_radiance_d6']
assert m.type == 'spectral_radiance_d6'
assert m.unit == ''
assert m.conditions.measured_area_diameter == 10
assert m.conditions.measured_area_diameter_unit == 'mm'
assert m.conditions.bandwidth_below_1100_nm == 3
assert m.conditions.bandwidth_below_1100_nm_unit == 'nm'
assert m.conditions.bandwidth_above_1100_nm == 6
assert m.conditions.bandwidth_above_1100_nm_unit == 'nm'
assert m.calibration.calibration_type == 'dependent_artefact_values'
assert m.calibration.dependent_parameter == 'wavelength'
assert m.calibration.dependent_unit == 'nm'
assert m.calibration.dependent_minimum == 250
assert m.calibration.dependent_maximum == 2450
with pytest.raises(TypeError): # cannot change value
m.calibration.dependent_maximum = 1000
assert m.calibration.artefact_values == ((250, 0.938), (260, 0.945), (270, 0.950), (2450, 0.934))
assert m.calibration.expanded_uncertainty == ((0, 0.011), (0, 0.011), (0, 0.004), (0, 0.019))
assert m.calibration.coverage_factor == 2
assert m.calibration.level_of_confidence == 0.95
assert m.calibration.correlation_matrix == ()
c1 = r.calibrations[1]
assert c1.report_date == datetime.date(2012, 10, 20)
assert c1.calibration_date == datetime.date(2012, 10, 20)
assert c1.report_number == 'PTB 44188/12'
assert c1.calibration_cycle == 5
assert len(c1.measurands) == 1
m = c1.measurands['spectral_radiance_factor']
assert m.type == 'spectral_radiance_factor'
assert m.unit == ''
assert m.conditions.measured_area_diameter == "ellipse, 10 and 10/cos(theta_d)"
assert m.conditions.measured_area_diameter_unit == 'mm'
assert m.conditions.bandwidth_below_900_nm == 3
assert m.conditions.bandwidth_below_900_nm_unit == 'nm'
assert m.conditions.bandwidth_above_900_nm == 6
assert m.conditions.bandwidth_above_900_nm_unit == 'nm'
assert m.conditions.divergence_of_incident_beam == 1.5
assert m.conditions.divergence_of_incident_beam_unit == 'degrees'
assert m.conditions.divergence_of_detection_beam == 0.32
assert m.conditions.divergence_of_detection_beam_unit == 'degrees'
assert m.calibration.calibration_type == 'dependent_artefact_values'
assert m.calibration.dependent_measurands.wavelength.minimum == 350
assert m.calibration.dependent_measurands.wavelength['maximum'] == 800
assert m.calibration.dependent_measurands.wavelength.unit == 'nm'
assert m.calibration.dependent_measurands['incident_angle']['minimum'] == 45
assert m.calibration.dependent_measurands['incident_angle'].maximum == 45
assert m.calibration.dependent_measurands.incident_angle.unit == 'degrees'
assert m.calibration.dependent_measurands.detection_angle.minimum == -30
assert m.calibration['dependent_measurands']['detection_angle'].maximum == 65
assert m.calibration.dependent_measurands.detection_angle.unit == 'degrees'
assert m.calibration.artefact_values == ((350, 45, -30, 1.039), (400, 45, -30, 1.048), (800, 45, 65, 0.909))
assert m.calibration.expanded_uncertainty == ((0, 0, 0, 0.017), (0, 0, 0, 0.005), (0, 0, 0, 0.002), (0, 0, 0, 0.002))
assert m.calibration.coverage_factor == 2
assert m.calibration.level_of_confidence == 0.95
assert m.calibration.correlation_matrix == ()
assert r.category == 'reflectance standard'
assert r.connection is None
assert r.description == 'spectralon 99% reflectance standard'
assert r.is_operable
assert len(r.maintenances) == 0
assert r.manufacturer == 'Labsphere'
assert r.model == 'AS-01159-060, USRS-99-020, BT69E'
assert r.serial == '0.99'
assert r.team == 'Light'
assert r.unique_key == 'AK1'
assert isinstance(r.user_defined, RecordDict)
assert len(r.user_defined) == 0
records = db.records(manufacturer='OMEGA')
assert len(records) == 1
r = records[0]
assert len(r.calibrations) == 2
c0 = r.calibrations[0]
assert c0.report_date == datetime.date(2018, 7, 21)
assert c0.calibration_date == datetime.date(2018, 6, 8)
assert c0.report_number == 'Humidity/2018/386'
assert c0.calibration_cycle == 2
assert len(c0.measurands) == 2
t = c0.measurands['temperature']
assert t.type == 'temperature'
assert t.unit == 'C'
assert t.conditions.lab_temperature == 21
assert t.conditions.lab_temperature_uncertainty == 1
assert t.conditions.lab_temperature_unit == 'C'
assert t.calibration.minimum == 18
assert t.calibration.maximum == 24
assert t.calibration.correction_coefficients == (0.01,)
assert t.calibration.expanded_uncertainty == 0.13
assert t.calibration.coverage_factor == 2
assert t.calibration.level_of_confidence == 0.95
assert t.calibration.correlation_matrix == ()
h = c0.measurands['humidity']
assert h.type == 'humidity'
assert h.unit == '%rh'
assert h.conditions.lab_temperature == 21
assert h.conditions.lab_temperature_uncertainty == 1
assert h.conditions.lab_temperature_unit == 'C'
assert h.calibration.minimum == 30
assert h.calibration.maximum == 85
assert h.calibration.correction_coefficients == (-9.5, 0.326, -0.00505, 0.0000321)
assert h.calibration.expanded_uncertainty == 0.9
assert h.calibration.coverage_factor == 2
assert h.calibration.level_of_confidence == 0.95
assert h.calibration.correlation_matrix == ()
c1 = r.calibrations[1]
assert c1.report_date == datetime.date(2016, 2, 22)
assert c1.calibration_date == datetime.date(2016, 1, 20)
assert c1.report_number == 'Humidity/2016/322'
assert c1.calibration_cycle == 2
assert len(c1.measurands) == 2
t = c1.measurands['temperature']
assert t.type == 'temperature'
assert t.unit == 'C'
assert t.conditions.lab_temperature == 21
assert t.conditions.lab_temperature_uncertainty == 1
assert t.conditions.lab_temperature_unit == 'C'
assert t.calibration.minimum == 17
assert t.calibration.maximum == 23
assert t.calibration.correction_coefficients == (0.05,)
assert t.calibration.expanded_uncertainty == 0.12
assert t.calibration.coverage_factor == 2
assert t.calibration.level_of_confidence == 0.95
assert t.calibration.correlation_matrix == ()
h = c1.measurands['humidity']
assert h.type == 'humidity'
assert h.unit == '%rh'
assert h.conditions.lab_temperature == 21
assert h.conditions.lab_temperature_uncertainty == 1
assert h.conditions.lab_temperature_unit == 'C'
assert h.calibration.minimum == 30
assert h.calibration.maximum == 80
assert h.calibration.correction_coefficients == (-3.44, 0.0487)
assert h.calibration.expanded_uncertainty == 0.8
assert h.calibration.coverage_factor == 2
assert h.calibration.level_of_confidence == 0.95
assert h.calibration.correlation_matrix == ()
assert r.category == "Logger"
assert r.description == "Temperature, relative humidity and dew point reader"
assert r.is_operable
assert len(r.maintenances) == 2
m0 = r.maintenances[0]
assert m0.date == datetime.date(2019, 3, 24)
assert m0.comment == 'Nothing changed'
m1 = r.maintenances[1]
assert m1.date == datetime.date(2018, 1, 17)
assert m1.comment == 'ABCDEF ghijkl MNOP qrstuvwxyz'
assert r.manufacturer == "OMEGA"
assert r.model == "iTHX-W3-5"
assert r.serial == "4070777"
assert r.team == 'Light'
assert r.unique_key == "137154e9-da33-46c9-b85b-3a1a351969d6"
assert len(r.user_defined) == 1
assert r.user_defined['my_custom_key'] == "whatever I want"
r = db.records(manufacturer='foo')[0]
assert r.manufacturer == 'foo'
assert r.model == 'bar'
assert r.maintenances == tuple()
assert not r.is_operable
assert r.serial == 'xyz'
assert r.calibrations == tuple()
assert r.team == 'Light'
assert len(r.user_defined) == 0
#
# ConnectionRecords
#
assert len(db.connections()) == 3
assert len(db.connections(backend=Backend.MSL)) == 2
assert len(db.connections(backend=Backend.PyVISA)) == 1
assert len(db.connections(manufacturer='foo')) == 1
c = db.connections(manufacturer='foo')[0]
assert c is db.equipment['The A Team'].connection
assert c.manufacturer == 'foo'
assert c.model == 'bar'
assert c.address == 'COM7'
assert c.backend == Backend.MSL
assert c.serial == 'xyz'
assert len(c.properties) == 5
assert c.properties['timeout'] == 5
assert c.properties['baud_rate'] == 38400
assert c.properties['termination'] == b'\r\n'
assert c.properties['parity'] == constants.Parity.ODD
assert c.properties['data_bits'] == constants.DataBits.SEVEN
c = db.connections(manufacturer='Company B')[0]
assert c.manufacturer == 'Company B'
assert c.model == 'DEF'
assert c.address == 'TCP::169.254.146.227::9221'
assert c.backend == Backend.MSL
assert c.serial == '123456'
assert isinstance(c.properties, dict)
assert len(c.properties) == 0
c = db.connections(manufacturer='Company C')[0]
assert c.manufacturer == 'Company C'
assert c.model == 'GHI'
assert c.address == 'GPIB::22'
assert c.backend == Backend.PyVISA
assert c.serial == 'aabbcc'
assert len(c.properties) == 1
assert c.properties['termination'] == b'\r'
#
# Equipment tag
#
assert db.connections(manufacturer='foo')[0] is db.equipment['The A Team'].connection
| {
"content_hash": "c3f8f9d7547a8d8277454e814be303ae",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 127,
"avg_line_length": 44.995215311004785,
"alnum_prop": 0.6379200340280732,
"repo_name": "MSLNZ/msl-equipment",
"id": "d43133f0891b5df8bc254ff25e22f5ab42c59eb8",
"size": "18808",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2490464"
}
],
"symlink_target": ""
} |
import json
import random
import datetime
from flask.ext.admin.contrib.mongoengine import ModelView
from flask.ext.admin.contrib.fileadmin import FileAdmin as _FileAdmin
from flask.ext.admin.babel import gettext, ngettext
from flask.ext.admin import AdminIndexView
from flask.ext.admin import BaseView as AdminBaseView
from flask.ext.admin.actions import action
from flask.ext.admin import helpers as h
from flask.ext.security import current_user
from flask.ext.security.utils import url_for_security
from flask import redirect, flash, url_for, Response, current_app
from flask.ext.htmlbuilder import html
from quokka.modules.accounts.models import User
from quokka.core.templates import render_template
from quokka.core.widgets import PrepopulatedText
from quokka.core.admin.fields import ContentImageField
from quokka.utils.upload import dated_path, lazy_media_path
from quokka.utils import lazy_str_setting, is_accessible
from .fields import ThumbField
from .utils import _, _l, _n
class ThemeMixin(object):
def render(self, template, **kwargs):
# Store self as admin_view
kwargs['admin_view'] = self
kwargs['admin_base_template'] = self.admin.base_template
# Provide i18n support even if flask-babel is not installed or enabled.
kwargs['_gettext'] = gettext
kwargs['_ngettext'] = ngettext
kwargs['h'] = h
# Contribute extra arguments
kwargs.update(self._template_args)
theme = current_app.config.get('ADMIN_THEME', None)
return render_template(template, theme=theme, **kwargs)
class Roled(object):
def is_accessible(self):
roles_accepted = getattr(self, 'roles_accepted', None)
return is_accessible(roles_accepted=roles_accepted, user=current_user)
def _handle_view(self, name, *args, **kwargs):
if not current_user.is_authenticated():
return redirect(url_for_security('login', next="/admin"))
if not self.is_accessible():
return self.render("admin/denied.html")
def format_datetime(self, request, obj, fieldname, *args, **kwargs):
return html.div(style='min-width:130px;')(
getattr(obj, fieldname).strftime(self.get_datetime_format())
)
def view_on_site(self, request, obj, fieldname, *args, **kwargs):
endpoint = kwargs.pop('endpoint', 'detail')
return html.a(
href=obj.get_absolute_url(endpoint),
target='_blank',
)(html.i(class_="icon icon-eye-open", style="margin-right: 5px;")(),
_l('View on site'))
def format_ul(self, request, obj, fieldname, *args, **kwars):
field = getattr(obj, fieldname)
column_formatters_args = getattr(self, 'column_formatters_args', {})
_args = column_formatters_args.get('ul', {}).get(fieldname, {})
ul = html.ul(style=_args.get('style', "min-width:200px;max-width:300px;"))
placeholder = _args.get('placeholder', u"{i}")
lis = [html.li(placeholder.format(item=item)) for item in field]
return ul(*lis)
def format_status(self, request, obj, fieldname, *args, **kwargs):
status = getattr(obj, fieldname)
column_formatters_args = getattr(self, 'column_formatters_args', {})
_args = column_formatters_args.get('status', {}).get(fieldname, {})
labels = _args.get('labels', {})
return html.span(
class_="label label-{0}".format(labels.get(status, 'default')),
style=_args.get('style', 'min-height:18px;')
)(status)
def get_url(self, request, obj, fieldname, *args, **kwargs):
column_formatters_args = getattr(self, 'column_formatters_args', {})
_args = column_formatters_args.get('get_url', {}).get(fieldname, {})
attribute = _args.get('attribute', None)
method = _args.get('method', 'get_absolute_url')
text = getattr(obj, fieldname, '')
if attribute:
target = getattr(obj, attribute, None)
else:
target = obj
url = getattr(target, method, lambda: '#')()
return html.a(href=url)(text if text not in [None, 'None'] else '')
class FileAdmin(ThemeMixin, Roled, _FileAdmin):
def __init__(self, *args, **kwargs):
self.roles_accepted = kwargs.pop('roles_accepted', list())
self.editable_extensions = kwargs.pop('editable_extensions', tuple())
super(FileAdmin, self).__init__(*args, **kwargs)
class ModelAdmin(ThemeMixin, Roled, ModelView):
form_subdocuments = {}
datetime_format = "%Y-%m-%d %H:%M"
formatters = {
'datetime': format_datetime,
'view_on_site': view_on_site,
'ul': format_ul,
'status': format_status,
'get_url': get_url
}
column_formatters_args = {}
def get_datetime_format(self):
return current_app.config.get('DATETIME_FORMAT', self.datetime_format)
def get_instance(self, i):
try:
return self.model.objects.get(id=i)
except self.model.DoesNotExist:
flash(_("Item not found %(i)s", i=i), "error")
@action(
'toggle_publish',
_l('Publish/Unpublish'),
_l('Publish/Unpublish?')
)
def action_toggle_publish(self, ids):
for i in ids:
instance = self.get_instance(i)
instance.published = not instance.published
instance.save()
count = len(ids)
flash(_n('Item successfully published/Unpublished.',
'%(count)s items were successfully published/Unpublished.',
count,
count=count))
@action(
'clone_item',
_l('Create a copy'),
_l('Are you sure you want a copy?')
)
def action_clone_item(self, ids):
if len(ids) > 1:
flash(
_("You can select only one item for this action"),
'error'
)
return
instance = self.get_instance(ids[0])
new = instance.from_json(instance.to_json())
new.id = None
new.published = False
new.last_updated_by = User.objects.get(id=current_user.id)
new.updated_at = datetime.datetime.now()
new.slug = "{0}-{1}".format(new.slug, random.getrandbits(32))
new.save()
return redirect(url_for('.edit_view', id=new.id))
@action('export_to_json', _l('Export as json'))
def export_to_json(self, ids):
qs = self.model.objects(id__in=ids)
return Response(
qs.to_json(),
mimetype="text/json",
headers={
"Content-Disposition":
"attachment;filename=%s.json" % self.model.__name__.lower()
}
)
@action('export_to_csv', _l('Export as csv'))
def export_to_csv(self, ids):
qs = json.loads(self.model.objects(id__in=ids).to_json())
def generate():
yield ','.join(list(qs[0].keys())) + '\n'
for item in qs:
yield ','.join([str(i) for i in list(item.values())]) + '\n'
return Response(
generate(),
mimetype="text/csv",
headers={
"Content-Disposition":
"attachment;filename=%s.csv" % self.model.__name__.lower()
}
)
class BaseIndexView(Roled, ThemeMixin, AdminIndexView):
pass
class BaseView(Roled, ThemeMixin, AdminBaseView):
pass
class BaseContentAdmin(ModelAdmin):
"""
All attributes added here for example
more info in admin source
github.com/mrjoes/flask-admin/blob/master/flask_admin/model/base.py
or Flask-admin documentation
"""
roles_accepted = ('admin', 'editor', 'author')
can_create = True
can_edit = True
can_delete = True
# list_template = 'admin/model/list.html'
# edit_template = 'admin/custom/edit.html'
# create_template = 'admin/custom/create.html'
column_list = ('title', 'slug', 'channel', 'published', 'created_at',
'available_at', 'view_on_site')
column_formatters = {
'view_on_site': ModelAdmin.formatters.get('view_on_site'),
'created_at': ModelAdmin.formatters.get('datetime'),
'available_at': ModelAdmin.formatters.get('datetime')
}
# column_type_formatters = {}
# column_labels = {}
# column_descriptions = {}
# column_sortable_list = [] / ('name', ('user', 'user.username'))
# column_default_sort = 'pk'
# column_choices = {'column': ('value', 'display')}
# column_display_pk = True
column_filters = ['published', 'title', 'summary',
'created_at', 'available_at']
column_searchable_list = ('title', 'summary')
form_columns = ['title', 'slug', 'channel', 'related_channels', 'summary',
'published', 'add_image', 'contents',
'show_on_channel', 'available_at', 'available_until',
'tags', 'values', 'template_type', 'license', 'authors']
# form_excluded_columns = []
# form = None
# form_overrides = None
form_widget_args = {
'summary': {
'style': 'width: 400px; height: 100px;'
},
'title': {'style': 'width: 400px'},
'slug': {'style': 'width: 400px'},
}
form_args = {
# 'body': {'widget': TextEditor()},
'slug': {'widget': PrepopulatedText(master='title')}
}
form_subdocuments = {
'contents': {
'form_subdocuments': {
None: {
'form_columns': ('content', 'caption', 'purpose',
'order', 'thumb'),
'form_ajax_refs': {
'content': {
'fields': ['title', 'long_slug', 'summary']
}
},
'form_extra_fields': {
'thumb': ThumbField('thumb', endpoint="media")
}
}
}
},
}
# form_extra_fields = {}
form_extra_fields = {
'add_image': ContentImageField(
'Add new image',
base_path=lazy_media_path(),
thumbnail_size=lazy_str_setting('MEDIA_IMAGE_THUMB_SIZE',
default=(100, 100, True)),
endpoint="media",
namegen=dated_path,
permission=0o777,
allowed_extensions="MEDIA_IMAGE_ALLOWED_EXTENSIONS",
)
}
# action_disallowed_list
# page_size = 20
# form_ajax_refs = {
# 'main_image': {"fields": ('title',)}
# }
def after_model_change(self, form, model, is_created):
if not hasattr(form, 'add_image'):
return
form.add_image.save_contents(model)
| {
"content_hash": "0bd6a5f1d0763e8ca7a0f29bc23570e3",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 79,
"avg_line_length": 33.202492211838006,
"alnum_prop": 0.5808782135485082,
"repo_name": "felipevolpone/quokka",
"id": "6738496ce4ba493cc18f67c1314200398bdd06d3",
"size": "10676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quokka/core/admin/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "68245"
},
{
"name": "HTML",
"bytes": "169100"
},
{
"name": "JavaScript",
"bytes": "546416"
},
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "178190"
},
{
"name": "Shell",
"bytes": "9253"
}
],
"symlink_target": ""
} |
class Node(object):
def __init__(self, parent, contribution, perct):
self._children = []
self._parent = parent
self._contribution = contribution
self._perct = perct
@property
def children(self):
return self._children
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, parent):
self._parent = parent
@property
def contribution(self):
return self._contribution
@contribution.setter
def contribution(self, contribution):
self._contribution = contribution
@property
def perct(self):
return self._perct
@perct.setter
def perct(self, perct):
self._perct = perct
@property
def depth(self):
depth = 0
node = self._parent
while node != None:
depth += 1
node = node.parent
return depth
def addChild(self, child):
self._children.append(child)
class VarNode(Node):
def __init__(self, function, parent, contribution, perct):
super(VarNode, self).__init__(parent, contribution, perct)
self._func = function
@property
def func(self):
return self._func
@func.setter
def func(self, func):
self._func = func
class CovNode(Node):
def __init__(self, func1, func2, parent, contribution, perct):
super(CovNode, self).__init__(parent, contribution, perct)
self._func1 = func1
self._func2 = func2
@property
def func(self):
return self._func1 + ',' + self._func2
@func.setter
def func(self, func1, func2):
self._func1 = func1
self._func2 = func2
class Tree(object):
def __init__(self, func):
self._root = VarNode(func, None, 0, 100)
@property
def root(self):
return self._root
@root.setter
def root(self, root):
self._root = root
def getLeaves(self):
leaves = []
nodesToLookAt = [self._root]
while len(nodesToLookAt) > 0:
node = nodesToLookAt.pop(0)
if len(node.children) == 0:
if node.perct > 5:
leaves.append(node)
else:
nodesToLookAt += node.children
return leaves
def selectFactors(self, k):
leaves = self.getLeaves()
leaves.sort(key=lambda x: x.perct, reverse=True)
num_selected = min(k, len(leaves))
return leaves[:num_selected]
| {
"content_hash": "3bd62d52f2d15be0d8de12e5755af6d8",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 66,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.5667466027178257,
"repo_name": "mozafari/vprofiler",
"id": "36962c330776ffbe603b5c1ef07c2d70309d814e",
"size": "2502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/FactorSelector/VarTree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12119"
},
{
"name": "C++",
"bytes": "215676"
},
{
"name": "Makefile",
"bytes": "7101"
},
{
"name": "Python",
"bytes": "47531"
},
{
"name": "Shell",
"bytes": "1050"
}
],
"symlink_target": ""
} |
from twisted.internet import reactor, defer, task
from twisted.internet.protocol import Factory
from twisted.protocols.basic import LineReceiver
# System Imports
from collections import namedtuple
# Package Imports
from ..machine import Machine, Component, Property, Stream, ui
from ..util import now
from ..protocol import basic, gsioc
from gilson_components import layout
#__all__ = ["UVVis151"]
class Error (Exception):
"Base class for exceptions in this module"
pass
class GSIOC (Machine):
protocolFactory = Factory.forProtocol(gsioc.Receiver)
name = "GSIOC Connection"
def setup (self):
def connected (result):
reactor.callLater(0.5, self._connect_wait.callback, True)
return result
# Put in a delay to allow the GSIOC to intialise
self._connect_wait = defer.Deferred()
self.ready.addCallback(connected)
def gsioc (self, id):
d = defer.Deferred()
def send_slave (result):
d.callback(self.protocol.slave(id))
# Wait until GSIOC has connected
self._connect_wait.addCallback(send_slave)
return d
def _iter_ci_FIFO (s):
for i in xrange(0, len(s), 7):
yield s[i:i + 6]
def _set_output (machine, i):
i = str(i)
def set_output (value):
if value == "open":
machine.protocol.buffered_command("D" + i)
else:
machine.protocol.buffered_command("C" + i)
return set_output
class ControlModule506C (Machine):
protocolFactory = Factory.forProtocol(gsioc.Receiver)
name = "Gilson 506C Control Module"
A = 1
B = 2
C = 4
D = 8
input_map = {
"@": 0, "A": A, "B": B, "C": A | B, "D": C,
"E": A | C, "F": B | C, "G": A | B | C, "H": D,
"I": A | D, "J": B | D, "K": A | B | D, "L": C | D,
"M": A | C | D, "N": B | C | D, "O": A | B | C | D
}
analogue_sample_frequency = 0.1
analogue_sample_interval = 0.5
contact_input_sample_interval = 0.5
contact_output_sample_interval = 0.5
def setup (self, **kwargs):
self.analogue1 = gsioc.FIFOStream(channel = 0, title = "Analogue Input A", type = float, unit = "mV", factor = 0.01)
self.analogue2 = gsioc.FIFOStream(channel = 1, title = "Analogue Input B", type = float, unit = "mV", factor = 0.01)
self.analogue3 = gsioc.FIFOStream(channel = 2, title = "Analogue Input C", type = float, unit = "mV", factor = 0.01)
self.analogue4 = gsioc.FIFOStream(channel = 3, title = "Analogue Input D", type = float, unit = "mV", factor = 0.01)
self.input1 = Property(title = "Contact Input A", type = str)
self.input2 = Property(title = "Contact Input B", type = str)
self.input3 = Property(title = "Contact Input C", type = str)
self.input4 = Property(title = "Contact Input D", type = str)
self.output1 = Property(title = "Contact Output A", type = str, options = ("open", "closed"), setter = _set_output(self, 1))
self.output2 = Property(title = "Contact Output B", type = str, options = ("open", "closed"), setter = _set_output(self, 2))
self.output3 = Property(title = "Contact Output C", type = str, options = ("open", "closed"), setter = _set_output(self, 3))
self.output4 = Property(title = "Contact Output D", type = str, options = ("open", "closed"), setter = _set_output(self, 4))
self.output5 = Property(title = "Contact Output E", type = str, options = ("open", "closed"), setter = _set_output(self, 5))
self.output6 = Property(title = "Contact Output F", type = str, options = ("open", "closed"), setter = _set_output(self, 6))
self.ui = ui(
traces = [{
"title": "Analogue Inputs",
"unit": self.analogue1.unit,
"traces": [self.analogue1, self.analogue2, self.analogue3, self.analogue4],
"colours": ["#FF1300", "#FFB100", "#1435AD", "#00C322"]
}],
properties = [
self.input1,
self.input2,
self.input3,
self.input4,
self.output1,
self.output2,
self.output3,
self.output4,
self.output5,
self.output6
]
)
def start (self):
# Reset Analogue Input FIFO buffers
self.analogue1.reset(self.protocol, self.analogue_sample_frequency)
self.analogue2.reset(self.protocol, self.analogue_sample_frequency)
self.analogue3.reset(self.protocol, self.analogue_sample_frequency)
self.analogue4.reset(self.protocol, self.analogue_sample_frequency)
def monitorAnalogueInputs ():
self.analogue1.update(self.protocol)
self.analogue2.update(self.protocol)
self.analogue3.update(self.protocol)
self.analogue4.update(self.protocol)
self._tick(monitorAnalogueInputs, self.analogue_sample_interval)
# Reset Contact Event FIFO
def resetContactInputs ():
def interpret (result):
if len(result) != 4:
return
self.input1._push("closed" if result[0] == "C" else "open")
self.input2._push("closed" if result[1] == "C" else "open")
self.input3._push("closed" if result[2] == "C" else "open")
self.input4._push("closed" if result[3] == "C" else "open")
self._last_contact_update = now()
self.protocol.buffered_command("9")
self.protocol.immediate_command("*").addCallback(interpret)
def interpretContactInputs (result):
if result[0] == "|":
return # Buffer is empty
if len(result) % 7 > 0:
raise Exception("Malformed contact event FIFO: " + str(result))
for entry in _iter_ci_FIFO(result):
try:
state = self.input_map[result[0]]
time = self._last_contact_update + (int(result[1:6], 16) * 0.01)
except IndexError, KeyError:
raise Exception("Malformed contact event FIFO: " + str(result))
self.input1._push("closed" if state & self.A else "open", time)
self.input2._push("closed" if state & self.B else "open", time)
self.input3._push("closed" if state & self.C else "open", time)
self.input4._push("closed" if state & self.D else "open", time)
def interpretContactOutputs (result):
if len(result) != 6:
return
self.output1._push("closed" if result[0] == "C" else "open")
self.output2._push("closed" if result[1] == "C" else "open")
self.output3._push("closed" if result[2] == "C" else "open")
self.output4._push("closed" if result[3] == "C" else "open")
self.output5._push("closed" if result[4] == "C" else "open")
self.output6._push("closed" if result[5] == "C" else "open")
def monitorContactInputs ():
self.protocol.immediate_command("9").addCallback(interpretContactInputs)
def monitorContactOutputs ():
self.protocol.immediate_command("?").addCallback(interpretContactOutputs)
self._tick(resetContactInputs, 45 * 3600) # Event buffer runs out after ~46h
self._tick(monitorContactInputs, self.contact_input_sample_interval)
self._tick(monitorContactOutputs, self.contact_output_sample_interval)
def stop (self):
self._stopTicks()
class SampleInjector233 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson Sampling Injector"
_layouts = {}
_current_position = (0, 0, 0)
# Positions determined by manual calibration of our device.
# Testing recommended in case of non-calibrated machine!
_default_locations = {
"zero": (0, 350, 0),
"inject:1": (2460, 516, 515),
"inject:2": (3866, 516, 515),
"wash:a:deep": (140, 400, 750),
"wash:a:shallow": (70, 400, 400),
"wash:a:drain": (0, 400, 350)
}
def add_layout (self, name, layout):
self._layouts[name] = layout
def remove_layout (self, name):
if name in self._layouts:
del self._layouts[name]
def clear_layouts (self):
self._layouts = {}
def setup (self):
def set_position (location):
if location in self._default_locations:
x, y, z = self._default_locations[location]
elif ":" in location:
name, pos = location.split(":")
if name not in self._layouts:
raise Exception ("Unknown layout: %s" % name)
x, y, z = self._layouts[name].xyz(pos)
else:
raise Exception ("Invalid location: %s" % location)
# Move to z_up
self.protocol.buffered_command("z0")
self.protocol.buffered_command("W")
# Move to x,y
self.protocol.buffered_command("x{:d}".format(x))
self.protocol.buffered_command("y{:d}".format(y))
self.protocol.buffered_command("W")
# Move to z_down
self.protocol.buffered_command("z{:d}".format(z))
self.protocol.buffered_command("W")
# Time for both Z movements
z_time = (self._current_position[2] / 1250. + z / 900.)
# Time for XY movement
xy_time = max(
abs(self._current_position[0] - x) / 2500.,
abs(self._current_position[1] - y) / 2500.
)
# Start checking a bit before anticipated
# completion time
expected_time = max(0, z_time + xy_time - 0.5)
self._current_position = (x, y, z)
finished = defer.Deferred()
def check_finished ():
def cb (result):
if result[1] == "1":
finished.errback()
elif result[0] == "1":
reactor.callLater(0.1, check)
elif result[0] == "0":
self.position._push(location)
finished.callback("ok")
def check ():
self.protocol.immediate_command("S").addCallback(cb)
check()
reactor.callLater(expected_time, check_finished)
return finished
def set_valve (valve):
c = "I{:d}" + ("/" if valve == "switching" else "")
def set_valve (pos):
return self.protocol.buffered_command(c.format(1 if pos == "inject" else 0));
return set_valve
# setup variables
self.position = Property(title = "Position", type = str, setter = set_position)
self.injection = Property(title = "Injection Valve", type = str, options = ("load", "inject"), setter = set_valve("injection"))
self.switching = Property(title = "Switching Valve", type = str, options = ("load", "inject"), setter = set_valve("switching"))
#self.status = Property(title = "Status", type = str)
self.ui = ui(
properties = [
self.position,
self.injection,
self.switching,
#self.status
]
)
def start (self):
def get_param (id):
def request (result):
return self.protocol.immediate_command("P")
return self.protocol.buffered_command("P" + str(id)).addCallback(request)
def interpretState (result):
if result[1] == "1":
self.status._push("error")
elif result[0] == "1":
self.status._push("busy")
elif result[0] == "0":
self.status._push("idle")
valve_states = ("load", "inject", "running", "error", "missing")
def interpretValveState (result):
if result[0] == "0":
pass
# print "Injection Valve on Right"
self.injection._push(valve_states[int(result[1])])
self.switching._push(valve_states[int(result[2])])
def monitor1 ():
#self.protocol.immediate_command("S").addCallback(interpretState)
self.protocol.immediate_command("P").addCallback(interpretValveState)
self._tick(monitor1, 0.5)
def stop (self):
self._stopTicks()
def reset (self):
return defer.gatherResults([
self.injection.set("load"),
self.switching.set("load"),
self.position.set("zero")
])
class Pump305 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson 305 HPLC Pump"
def setup (self):
pass
def start (self):
pass
def reset (self):
return defer.success()
class InvalidPistonSize (Error):
"The requested piston size is not in the configured list."
class InitializationFailed (Error):
"The requested piston failed to initialise."
class InvalidTarget (Error):
"The requested target volume is not supported."
class ValveMoveFailed (Error):
"The requested valve movement failed."
_PistonSize = namedtuple('_PistonSize', ["flow_max", "flow_sanitise"])
class _syringe_piston (Component):
piston_ids = ("L", "R")
piston_sizes = {
None: _PistonSize( 0, lambda x: 0),
100: _PistonSize( 6, lambda x: round(max(0.001, min(x, 6)), 3)),
250: _PistonSize( 15, lambda x: round(max(0.001, min(x, 15)), 3)),
500: _PistonSize( 30, lambda x: round(max(0.001, min(x, 30)), 3)),
1000: _PistonSize( 60, lambda x: round(max(0.01, min(x, 60)), 2)),
5000: _PistonSize(120, lambda x: round(max(0.01, min(x, 120)), 2)),
10000: _PistonSize(240, lambda x: round(max(0.02, min(x, 240)), 2)),
25000: _PistonSize(240, lambda x: round(max(0.04, min(x, 240)), 2)),
39000: _PistonSize(39000, lambda x: int(max(1, min(x, 39000))))
}
status_text = {
"N": "ready",
"R": "running",
"O": "error",
"I": "uninitialized",
"M": "missing",
"H": "paused",
"W": "waiting"
}
def __init__ (self, machine, id, size):
if id not in (0, 1):
raise Error ("Piston id must be 0 or 1")
if size not in self.piston_sizes:
raise InvalidPistonSize(size)
self._i = id
self._id = self.piston_ids[id]
self._size = size
self._machine = machine
self._rate = self.piston_sizes[size].flow_max / 4.
self.title = self._id + " Piston"
self.status = Property(title = self._id + " Syringe Status", type = str)
self.target = Property(title = self._id + " Syringe Target Volume", type = float, unit = "uL", setter = self.set_target)
self.volume = Stream(title = self._id + " Syringe Volume", type = float, unit = "uL")
def set_target (self, target, timely_start = False):
"""
Move to a target volume by aspirating or dispensing
the appropriate volume.
@param target: The desired volume of aspirated liquid in uL.
@param timely_start: Synchronise with other syringe.
"""
if self._size is None:
raise Error ("Syringe " + self._id + " not installed")
finished = defer.Deferred()
current_target = self.target.value
target = min(max(target, 0), self._size)
movement = target - current_target
# For 100, 250 uL pistons, the pump expects the volume parameter
# as a 5-character float. For all others, as a 5-char integer.
if self._size in (100, 250):
command = "{:s}{:s}{:05.1f}"
else:
command = "{:s}{:s}{:05d}"
movement = int(movement)
# Send the command, e.g. "AL00100", followed by a go command, e.g. "BL"
self._machine.protocol.buffered_command(command.format(
"D" if movement < 0 else "A",
self._id,
abs(movement)
))
if timely_start:
self._machine.protocol.buffered_command("T{:s}".format(self._id))
self._machine.protocol.buffered_command("B{:s}".format(self._id))
self.target._push(target)
def check_finished (delay):
def cb (result):
status = result[6 * self._i]
if status == "N":
# Movement complete, now idle
monitor.stop()
finished.callback(None)
elif status == "R":
# Keep checking rapidly if it is still running
reactor.callLater(0.1, check)
elif status == "W" or status == "H":
# Less frequent checks if the syringe is waiting
reactor.callLater(delay, check)
else:
# Error returned
monitor.stop()
finished.errback(None)
def check ():
self._machine.protocol.immediate_command("M").addCallback(cb)
check()
def monitor_movement ():
def cb (result):
self.update(result[0 + 6 * self._i : 6 + 6 * self._i])
return self._machine.protocol.immediate_command("M").addCallback(cb)
expected_time = max(round((abs(movement) / 1000 / self._rate) * 60, 1) - 0.5, 0)
reactor.callLater(expected_time, check_finished, expected_time)
monitor = task.LoopingCall(monitor_movement)
monitor.start(1, now = True)
return finished
def set_rate (self, rate):
"""
Set the syringe piston flow rate.
@param rate: The desired flow rate in mL/min
"""
if self._size is None:
raise Error ("Syringe " + self._id + " not installed")
# Return a flow rate within the allowed bounds
rate = self.piston_sizes[self._size].flow_sanitise(rate)
self._rate = rate
# It seems that the specified flow rate can be only 5 characters long
if self._size is 39000:
rate = "{:05d}".format(rate)
else:
rate = "{:05.3f}".format(rate)[:5]
print "set rate: S" + self._id + rate
return self._machine.protocol.buffered_command(
"S" + self._id + rate
)
def aspirate (self, volume, timely_start = False):
"""
Aspirate a volume of solution.
@param volume: The volume to aspirate in uL.
@param timely_start: Synchronise with other syringe.
"""
return self.set_target(self.target.value + volume, timely_start)
def dispense (self, volume, timely_start = False):
"""
Dispense a volume of solution.
@param volume: The volume to dispense in uL.
@param timely_start: Synchronise with other syringe.
"""
return self.set_target(self.target.value - volume, timely_start)
def initialize (self):
"Initialise syringe."
# An error will be returned if the pump doesn't recognise the size
def cb (result):
if result[1] == "1":
raise InitializationFailed
else:
self.target._push(0)
return self._machine.protocol.buffered_command(
"O{:s}".format(self._id)
)
# TODO: monitor / update whilst initializing, return when done...
def initialisation_failed (failure):
failure.trap(InitializationFailed)
print "Syringe Initialisation failed. Trying again"
return task.deferLater(reactor, 1, self.initialize)
# Send commands to initialise the syringe
if self._size is not None:
self._machine.protocol.buffered_command(
"P{:s}{:05d}".format(self._id, self._size)
)
d = self._machine.protocol.immediate_command("S")
d.addCallback(cb)
d.addErrback(initialisation_failed)
return d
else:
return defer.succeed(None)
def update (self, status):
self.status._push(self.status_text[status[0]])
self.volume._push(float(status[1:]))
class SyringePump402 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson Piston Pump 402"
initialise_on_start = True
valve_positions = {
"N": "needle",
"R": "reservoir",
"X": "moving",
"O": "error",
"M": "missing"
}
def setup (self, syringe_sizes):
if all(s is None for s in syringe_sizes):
raise InvalidPistonSize(syringe_sizes)
self.piston1 = _syringe_piston(self, 0, syringe_sizes[0])
self.piston2 = _syringe_piston(self, 1, syringe_sizes[1])
def _set_valve_position (id):
command = ("VL", "VR")[id]
def start_checking (result, position, finished):
return task.deferLater(
reactor, 0.5, check_finished,
position, finished
)
def check_finished (position, finished):
def cb (result):
status = result[id]
if status == "N" or status == "R":
# Workaround...
if id is 0:
self.valve1._push(position)
elif id is 1:
self.valve2._push(position)
finished.callback(None)
elif status == "X": # Still running
reactor.callLater(0.1, check)
else: # Error condition
finished.errback(ValveMoveFailed())
def check ():
self.protocol.immediate_command("V").addCallback(cb)
check()
def setter (position):
finished = defer.Deferred()
self.protocol.buffered_command(
command + ("R" if position == "reservoir" else "N")
).addCallback(
start_checking, position, finished
).addErrback(finished.errback)
return finished
return setter
self.valve1 = Property(
title = "L Valve Position", type = str,
options = ("reservoir", "needle"),
setter = _set_valve_position(0)
)
self.valve2 = Property(
title = "R Valve Position", type = str,
options = ("reservoir", "needle"),
setter = _set_valve_position(1)
)
self.ui = ui(
properties = [
self.piston1.status,
self.piston1.volume,
self.valve1,
self.piston2.status,
self.piston2.volume,
self.valve2
]
)
def start (self):
if self.initialise_on_start:
self.piston1.initialize()
self.piston2.initialize()
def interpret_status (result):
self.piston1.update(result[0:6])
self.piston2.update(result[6:12])
def interpret_valves (result):
self.valve1._push(self.valve_positions[result[0]])
self.valve2._push(self.valve_positions[result[1]])
self.protocol.immediate_command("M").addCallback(interpret_status)
self.protocol.immediate_command("V").addCallback(interpret_valves)
def stop (self):
pass
def reset (self):
return defer.succeed(None)
def pause (self):
return self.protocol.buffered_command("HB")
def resume (self):
return self.protocol.buffered_command("BB")
def _set_lamp (machine):
def set_lamp (power):
return machine.protocol.buffered_command("L%d" % (1 if power == "on" else 0));
return set_lamp
def _set_wavelength (machine):
def set_wavelength (wavelength):
return machine.protocol.buffered_command("P0=%s" % wavelength);
return set_wavelength
def _set_sensitivity (machine, i):
def set_sensitivity (AU):
return machine.protocol.buffered_command("P%s=%s" % (i, AU));
return set_sensitivity
class UVVis151 (Machine):
protocolFactory = Factory.forProtocol(basic.QueuedLineReceiver)
name = "Gilson 151 UV/VIS Spectrometer"
analogue_sample_frequency = 0.1
analogue_sample_interval = 0.5
default_wavelength = 254
def setup (self):
# setup variables
self.power = Property(title = "Lamp Power", type = str, options = ("on", "off"), setter = _set_lamp(self))
self.wavelength = Property(title = "Wavelength", type = int, min = 170, max = 700, unit = "nm", setter = _set_wavelength(self))
self.sensitivity1 = Property(title = "Sensitivity 1", type = float, min = 0.001, max = 2., unit = "AUFS", setter = _set_sensitivity(self, 1))
self.sensitivity2 = Property(title = "Sensitivity 2", type = float, min = 0.001, max = 2., unit = "AUFS", setter = _set_sensitivity(self, 2))
self.detection1 = gsioc.FIFOStream(channel = 0, title = "Detection at Sensitivity 1", type = float)
self.detection2 = gsioc.FIFOStream(channel = 1, title = "Detection at Sensitivity 2", type = float)
self.transmittance = gsioc.FIFOStream(channel = 2, title = "Transmittance", type = float, unit = "%", factor = 0.1)
self.ui = ui(
traces = [{
"title": "Detection",
"unit": self.detection1.unit,
"traces": [self.detection1, self.detection2],
"colours": ["#000", "#07F"]
}, {
"title": "Transmittance",
"unit": self.transmittance.unit,
"traces": [self.transmittance],
"colours": ["#0c4"]
}],
)
def start (self):
def get_param (id):
def request (result):
return self.protocol.immediate_command("P")
return self.protocol.buffered_command("P" + str(id)).addCallback(request)
def interpretLampStatus (result):
if len(result) == 9:
self.power._push("on")
else:
self.power._push("off")
def interpretWavelength (result):
if result[0:1] == "00=":
self.wavelength._push(int(result[2:]))
def monitorStatus ():
pass
# i = monitors.__iter__()
#self.protocol.immediate_command("L").addCallback(interpretLampStatus)
#get_param(0).addCallback(interpretWavelength)
def monitorData ():
self.detection1.update(self.protocol)
self.detection2.update(self.protocol)
self.transmittance.update(self.protocol)
def reset ():
self.detection1.reset(self.protocol, self.analogue_sample_frequency)
self.detection2.reset(self.protocol, self.analogue_sample_frequency)
self.transmittance.reset(self.protocol, self.analogue_sample_frequency)
# Reset the buffers every minute.
self._tick(reset, 60)
self._tick(monitorData, self.analogue_sample_interval)
# Temp: Get wavelength at startup
get_param(0).addCallback(interpretWavelength)
def stop (self):
self._stopTicks()
def reset (self):
return defer.gatherResults([
self.wavelength.set(self.default_wavelength)
])
def zero (self):
return self.protocol.buffered_command("Z")
| {
"content_hash": "bf2c4589b2ad0be7ba3da62a94a199c7",
"timestamp": "",
"source": "github",
"line_count": 811,
"max_line_length": 143,
"avg_line_length": 28.76572133168927,
"alnum_prop": 0.6587509108834498,
"repo_name": "rasata/octopus",
"id": "56dd3504cdeb0f3fea586a7c1dfc9f8e358c957e",
"size": "23348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "octopus/manufacturer/gilson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1586"
},
{
"name": "HTML",
"bytes": "20245"
},
{
"name": "JavaScript",
"bytes": "108649"
},
{
"name": "Python",
"bytes": "242378"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.contrib.staticfiles.finders import BaseFinder
from django.core.files.storage import FileSystemStorage
logo_paths = {
'header_logo': settings.HEADER_LOGO,
'favicon': settings.FAVICON,
}
logofinder_results = {
os.path.join(prefix, os.path.basename(path)): path
for prefix, path in logo_paths.items()
if path
}
favicon_package_files = {
'android-chrome-192x192.png',
'android-chrome-512x512.png',
'apple-touch-icon.png',
'browserconfig.xml',
'mstile-150x150.png',
'mstile-310x310.png',
'safari-pinned-tab.svg',
'site.webmanifest',
}
if settings.FAVICON_PACKAGE and os.path.isdir(settings.FAVICON_PACKAGE):
logofinder_results.update({
os.path.join('favicon_package', file): os.path.join(settings.FAVICON_PACKAGE, file)
for file in favicon_package_files
})
class LogoFinder(BaseFinder):
def find(self, path, all=False):
result = logofinder_results.get(path)
if not result:
return []
if all:
return [result]
return result
def list(self, ignore_patterns):
result = []
for prefix, path in logo_paths.items():
if not path:
continue
basedir, filename = os.path.split(path)
storage = FileSystemStorage(location=basedir)
storage.prefix = prefix
result.append((filename, storage))
if settings.FAVICON_PACKAGE and os.path.isdir(settings.FAVICON_PACKAGE):
storage = FileSystemStorage(location=settings.FAVICON_PACKAGE)
storage.prefix = 'favicon_package'
result += [(filename, storage) for filename in favicon_package_files]
return result
| {
"content_hash": "c0f9daa7ddb5fdaf93b9d24e7a074353",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 30.517241379310345,
"alnum_prop": 0.6468926553672316,
"repo_name": "c3nav/c3nav",
"id": "d14b368ae20492898f3cfbc679fd739be9f5c952",
"size": "1770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/c3nav/site/finders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "986"
},
{
"name": "HTML",
"bytes": "89944"
},
{
"name": "JavaScript",
"bytes": "179692"
},
{
"name": "Python",
"bytes": "1061013"
},
{
"name": "SCSS",
"bytes": "41200"
},
{
"name": "Sass",
"bytes": "11121"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
import json
from flask_login import AnonymousUserMixin
import slot.db_fieldbook as db
from . import models
def convert_user_dict_to_user_instance(user_dict):
return models.User(user_dict['username'], user_dict['password'])
def return_user_instance_or_anonymous(thing):
if type(thing) == type(dict()):
user_dict = thing
user_instance = convert_user_dict_to_user_instance(user_dict)
elif type(thing) == type(json()):
user_dict = json.loads(thing)
user_instance = convert_user_dict_to_user_instance(user_dict)
elif type(thing) == type(models.User()):
user_instance = thing
if isinstance(user_instance, models.User):
return user_instance
else:
return AnonymousUserMixin()
def return_user_if_valid_credentials(username, password):
# Try and retrieve the user from the database using the username - if successful it means the user exists
user = db.get_user(username)
if user:
# Check that the password is correct
if password == user['password']:
print('Valid credentials')
user_instance = return_user_instance_or_anonymous(user)
if isinstance(user_instance, models.User):
return user_instance
else:
print('Invalid credentials')
return None
| {
"content_hash": "a4d766a3509e35418ff7d875c4ccb183",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 31.523809523809526,
"alnum_prop": 0.6608761329305136,
"repo_name": "nhshd-slot/SLOT",
"id": "a094a62c2d8c5a43899ab3f1eb335601b7c318e4",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slot/users/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "84"
},
{
"name": "HTML",
"bytes": "10079"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "30140"
}
],
"symlink_target": ""
} |
"""
Script to run the experiments described in:
Julian Hough and David Schlangen.
Joint, Incremental Disfluency Detection and
Utterance Segmentation from Speech.
EACL 2017.
"""
import sys
import subprocess
import os
import urllib
import zipfile
import tarfile
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(THIS_DIR + "/../../")
from deep_disfluency.tagger.deep_tagger import DeepDisfluencyTagger
# The data must been downloaded
# and put in place according to the top-level README
# each of the parts of the below can be turned off
# though they must be run in order so the latter stages work
download_raw_data = False
create_disf_corpus = False
extract_features = False
train_models = False
test_models = True
asr = False # extract and test on ASR results too
partial = True # whether to include partial words or not
range_dir = THIS_DIR + \
'/../data/disfluency_detection/swda_divisions_disfluency_detection'
file_divisions_transcripts = [
('train', range_dir + '/swbd_disf_train_1_ranges.text'),
# range_dir + '/swbd_disf_train_audio_ranges.text',
('heldout', range_dir + '/swbd_disf_heldout_ranges.text'),
('test', range_dir + '/swbd_disf_test_ranges.text'),
]
SWBD_TIMINGS_URL = 'http://www.isip.piconepress.com/' + \
'projects/switchboard/releases/ptree_word_alignments.tar.gz'
SWDA_CORPUS_URL = 'https://github.com/julianhough/' + \
'swda/blob/master/swda.zip?raw=true'
SWBD_TIMINGS_DIR = THIS_DIR + '/../data/raw_data/' + \
SWBD_TIMINGS_URL.split('/')[-1].replace(".tar.gz", "")
SWDA_CORPUS_DIR = THIS_DIR + '/../data/raw_data/' + \
SWDA_CORPUS_URL.split('/')[-1].replace(".zip", "")
# the experiments in the EACL paper
# 33 RNN simple tags, disf + utt joint
# 34 RNN complex tags, disf + utt joint
# 35 LSTM simple tags, disf + utt joint
# 36 LSTM complex tags, disf + utt joint
# 37 LSTM simple tags, disf only
# 38 LSTM simple tags, utt only
# 39 LSTM complex tags, disf only
experiments = [33, 34, 35, 36, 37, 38]
# experiments = [35] # short version for testing
# 1. Download the SWDA and word timings
if download_raw_data:
name = THIS_DIR + '/../data/raw_data/swda.zip'
if not os.path.isfile(name):
print 'downloading', name
urllib.urlretrieve(SWDA_CORPUS_URL, name)
zipf = zipfile.ZipFile(name)
zipf.extractall(path=SWDA_CORPUS_DIR)
zipf.close()
print 'extracted at', SWDA_CORPUS_DIR
name = THIS_DIR + '/../data/raw_data/' + SWBD_TIMINGS_URL.split('/')[-1]
if not os.path.isfile(name):
print 'downloading', name
urllib.urlretrieve(SWBD_TIMINGS_URL, name)
tar = tarfile.open(name)
tar.extractall(path=SWBD_TIMINGS_DIR)
tar.close()
print 'extracted at', SWBD_TIMINGS_DIR
# 2. Create the base disfluency tagged corpora in a standard format
"""
for all divisions call the corpus creator
parse c line parameters
Optional arguments:
-i string, path of source data (in swda style)
-t string, target path of folder for the preprocessed data
-f string, path of file with the division of files to be turned into
a corpus
-a string, path to disfluency annotations
-lm string, Location of where to write a clean language\
model files out of this corpus
-pos boolean, Whether to write a word2pos mapping folder
in the sister directory to the corpusLocation, else assume it is there
-p boolean, whether to include partial words or not
-d boolean, include dialogue act tags in the info
"""
if create_disf_corpus:
print 'Creating corpus...'
write_pos_map = True
for div, divfile in file_divisions_transcripts:
c = [sys.executable, THIS_DIR +
'/../corpus/disfluency_corpus_creator.py',
'-i', SWDA_CORPUS_DIR + '/swda',
'-t', THIS_DIR + '/../data/disfluency_detection/switchboard',
'-f', div,
'-a', THIS_DIR +
'/../data/disfluency_detection/swda_disfluency_annotations',
# '-lm', 'data/lm_corpora',
'-d'
]
if partial:
c.append('-p')
if write_pos_map:
c.append('-pos')
write_pos_map = False # just call it once
subprocess.call(c)
print 'Finished creating corpus.'
# 3. Run the preprocessing and extraction of features for all files
"""
note to get the audio feature extraction to work you need to have
optional arguments are:
i string, path of source disfluency corpus
t string, target path of folder feature matrices in this folder
(rather than use text files)
f string, path of file with the division of files to be turned into
a corpus of vectors
p boolean, whether to include partial words or not
a string, path to word alignment folder
tag string, path of folder with tag representations
new_tag bool, whether to write new tag representations or use old ones
pos path, path to POS tagger if using one, if None use gold
train_pos bool, whether to train pos tagger or not and put it in pos
u bool, include utterance segmentation tags, derivable from utts
d bool, include dialogue act tags
l bool, include laughter tags on words- either speech laugh on word or
bout
joint bool, include big joint tag set as well as the individual ones
lm string, Location of where to write a clean language\
model files out of this corpus
xlm boolean, Whether to use a cross language model\
training to be used for getting lm features on the same data.
asr boolean, whether to produce ASR results for creation of the
data or not
credentials string, username:password for IBM ASR
audio string, path to open smile for audio features, if None
no audio extraction.
"""
if extract_features:
print 'Extracting features...'
tags_created = False
tagger_trained = False
for div, divfile in file_divisions_transcripts:
c = [sys.executable, 'feature_extraction/extract_features.py',
'-i', 'data/disfluency_detection/switchboard',
'-t', 'data/disfluency_detection/switchboard/' +
'feature_matrices',
'-f', div,
'-p',
'-a', SWBD_TIMINGS_DIR + '/data/alignments',
'-tag', 'data/tag_representations',
'-u',
'-d',
'-l',
'-joint',
# '-lm', "data/lm_corpora"
]
if 'train' in div and '-lm' in c:
c.append('-xlm')
if not tags_created:
c.append('-new_tag')
tags_created = True
if asr and 'ASR' in div:
c.extend(['-pos', 'data/crfpostagger'])
if not tagger_trained:
c.append('-train_pos')
credentials = \
'1841487c-30f4-4450-90bd-38d1271df295:EcqA8yIP7HBZ'
c.extend(['-asr', '-credentials', credentials])
subprocess.call(c)
print 'Finished extracting features.'
# 4. Train the model on the transcripts (and audio data if available)
# NB each of these experiments can take up to 24 hours
systems_best_epoch = {}
if train_models:
feature_matrices_filepath = THIS_DIR + '/../data/disfluency_detection/' + \
'feature_matrices/train'
validation_filepath = THIS_DIR + '/../data/disfluency_detection/' + \
'feature_matrices/heldout'
# train until convergence
# on the settings according to the numbered experiments in
# experiments/config.csv file
for exp in experiments:
disf = DeepDisfluencyTagger(
config_file=THIS_DIR + "/experiment_configs.csv",
config_number=exp
)
exp_str = '%03d' % exp
e = disf.train_net(
train_dialogues_filepath=feature_matrices_filepath,
validation_dialogues_filepath=validation_filepath,
model_dir=THIS_DIR + '/' + exp_str,
tag_accuracy_file_path=THIS_DIR +
'/results/tag_accuracies/{}.text'.format(exp_str))
systems_best_epoch[exp] = e
else:
# 33 RNN simple tags, disf + utt joint
# 34 RNN complex tags, disf + utt joint
# 35 LSTM simple tags, disf + utt joint
# 36 LSTM complex tags, disf + utt joint
# 37 LSTM simple tags, disf only
# 38 LSTM simple tags, utt only
# 39 LSTM complex tags, disf only
# Take our word for it that the saved models are the best ones:
systems_best_epoch[33] = 45 # RNN
systems_best_epoch[34] = 37 # RNN (complex tags)
systems_best_epoch[35] = 6 # LSTM
systems_best_epoch[36] = 15 # LSTM (complex tags)
systems_best_epoch[37] = 6 # LSTM (disf only)
systems_best_epoch[38] = 8 # LSTM (utt only)
# 5. Test the models on the test transcripts according to the best epochs
# from training.
# The output from the models is made in the folders
# For now all use timing data
if test_models:
print "testing models..."
for exp, best_epoch in sorted(systems_best_epoch.items(),
key=lambda x: x[0]):
for timing_bool in [
False,
True
]: # test with and without timing info
if exp in [37, 39] and timing_bool:
print "skipping timing condition for disfluency-only tagger"
continue
exp_str = '%03d' % exp
# load the model
disf = DeepDisfluencyTagger(
config_file=THIS_DIR + '/experiment_configs.csv',
config_number=exp,
saved_model_dir=THIS_DIR +
'/{0}/epoch_{1}'.format(exp_str, best_epoch),
use_timing_data=timing_bool
)
# simulating (or using real) ASR results
# for now just saving these in the same folder as the best epoch
# also outputs the speed
timing_string = '_timings' if timing_bool else ''
partial_string = '_partial' if partial else ''
for div in [
'heldout',
'test'
]:
disf.incremental_output_from_file(
THIS_DIR +
'/../data/disfluency_detection/switchboard/' +
'swbd_disf_{0}{1}_data_timings.csv'.format(
div, partial_string),
target_file_path=THIS_DIR + '/{0}/epoch_{1}/'.format(
exp_str, best_epoch) +
'swbd_disf_{0}{1}{2}_data_output_increco.text'
.format(div, partial_string, timing_string)
)
# 6. To get the numbers run the notebook:
# experiments/analysis/EACL_2017/EACL_2017.ipynb
# The results should be consistent with that in the EACL 2017 paper.
| {
"content_hash": "2b732cb4ead85163ba2b074c290217b9",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 79,
"avg_line_length": 39.46715328467153,
"alnum_prop": 0.6191973367856483,
"repo_name": "dsg-bielefeld/deep_disfluency",
"id": "c0e176ea49be0a5093881f934d997108d50f62db",
"size": "10814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_disfluency/experiments/EACL_2017.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "722308"
},
{
"name": "Python",
"bytes": "1120517"
}
],
"symlink_target": ""
} |
"""modoboa-stats tests."""
import datetime
import os
import shutil
import tempfile
from django.core.management import call_command
from django.urls import reverse
from django.test import override_settings
from modoboa.admin import factories as admin_factories
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoTestCase
class RunCommandsMixin(object):
"""Mixin to run management commands."""
def setUp(self):
super(RunCommandsMixin, self).setUp()
self.workdir = tempfile.mkdtemp()
self.set_global_parameter("rrd_rootdir", self.workdir)
def tearDown(self):
shutil.rmtree(self.workdir)
def run_logparser(self):
"""Run logparser command."""
path = os.path.join(
os.path.dirname(__file__), "mail.log")
with open(path) as fp:
content = fp.read() % {
"day": datetime.date.today().strftime("%b %d")}
path = os.path.join(self.workdir, "mail.log")
with open(path, "w") as fp:
fp.write(content)
self.set_global_parameter("logfile", path)
call_command("logparser")
def run_update_statistics(self, rebuild=False):
"""Run update_statistics command."""
args = []
if rebuild:
args.append("--rebuild")
call_command("update_statistics", *args)
@override_settings(RRDTOOL_TEST_MODE=True)
class ViewsTestCase(RunCommandsMixin, ModoTestCase):
"""Views test cases."""
@classmethod
def setUpTestData(cls): # noqa
super(ViewsTestCase, cls).setUpTestData()
admin_factories.populate_database()
cls.da = core_models.User.objects.get(username="admin@test.com")
def tearDown(self):
super(ViewsTestCase, self).tearDown()
self.set_global_parameter("greylist", False)
def test_index(self):
"""Test index view."""
url = reverse("modoboa_stats:fullindex")
response = self.client.get(url)
self.assertContains(response, 'id="graphs_accountgraphicset"')
self.assertContains(response, 'id="graphs_mailtraffic"')
self.client.force_login(self.da)
response = self.client.get(url)
self.assertContains(response, 'id="graphs_mailtraffic"')
def test_graphs(self):
"""Test graphs views."""
self.run_logparser()
url = reverse("modoboa_stats:graph_list")
self.ajax_get(url, status=404)
response = self.ajax_get("{}?gset=mailtraffic".format(url))
self.assertIn("averagetraffic", response["graphs"])
for period in ["week", "month", "year"]:
response = self.ajax_get(
"{}?gset=mailtraffic&period={}".format(url, period))
self.assertIn("averagetraffic", response["graphs"])
self.assertEqual(response["period_name"], period)
# custom period
today = datetime.date.today()
start = "{} 11:00:00".format(today)
end = "{} 11:40:00".format(today)
response = self.ajax_get(
"{}?gset=mailtraffic&period=custom&start={}&end={}".format(
url, start, end)
)
self.assertIn("averagetraffic", response["graphs"])
# unknown domain
response = self.ajax_get(
"{}?gset=mailtraffic&searchquery=unknown.com".format(url),
status=400)
# check with greylist enabled
self.set_global_parameter("greylist", True)
response = self.ajax_get("{}?gset=mailtraffic".format(url))
self.assertIn("averagetraffic", response["graphs"])
def test_account_created_graph(self):
"""Check data."""
self.run_update_statistics(rebuild=True)
url = reverse("modoboa_stats:graph_list")
response = self.ajax_get("{}?gset=accountgraphicset".format(url))
data = (
response["graphs"]["accountcreationgraphic"]["curves"][0]["data"])
self.assertEqual(data[-1]["y"], 5.0)
def test_graphs_as_domainadmin(self):
"""Test graph views as domain admin."""
self.run_logparser()
self.client.force_login(self.da)
url = "{}?gset=mailtraffic".format(reverse("modoboa_stats:graph_list"))
response = self.ajax_get(url)
self.assertIn("averagetraffic", response["graphs"])
response = self.ajax_get("{}&searchquery=test.com".format(url))
self.assertIn("averagetraffic", response["graphs"])
response = self.ajax_get(
"{}&searchquery=test2.com".format(url), status=403)
def test_get_domain_list(self):
"""Test get_domain_list view."""
url = reverse("modoboa_stats:domain_list")
response = self.ajax_get(url)
self.assertIn("test.com", response)
self.assertIn("test2.com", response)
@override_settings(RRDTOOL_TEST_MODE=True)
class ManagementCommandsTestCase(RunCommandsMixin, ModoTestCase):
"""Management command test cases."""
@classmethod
def setUpTestData(cls): # noqa
super(ManagementCommandsTestCase, cls).setUpTestData()
admin_factories.populate_database()
def test_logparser(self):
"""Test logparser command."""
self.run_logparser()
for d in ["global", "test.com"]:
path = os.path.join(self.workdir, "{}.rrd".format(d))
self.assertTrue(os.path.exists(path))
def test_logparser_with_greylist(self):
"""Test logparser when greylist activated."""
self.set_global_parameter("greylist", True)
self.run_logparser()
for d in ["global", "test.com"]:
path = os.path.join(self.workdir, "{}.rrd".format(d))
self.assertTrue(os.path.exists(path))
def test_update_statistics(self):
"""Test update_statistics command."""
self.run_update_statistics()
path = os.path.join(self.workdir, "new_accounts.rrd")
self.assertTrue(os.path.exists(path))
self.run_update_statistics(rebuild=True)
self.assertTrue(os.path.exists(path))
| {
"content_hash": "8ec4741adc496078e0725250d6eef790",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 35.904761904761905,
"alnum_prop": 0.6205238726790451,
"repo_name": "modoboa/modoboa-stats",
"id": "1db987de2eaa8ad9af4e3fb886808113126c6da2",
"size": "6032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modoboa_stats/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "929"
},
{
"name": "HTML",
"bytes": "3198"
},
{
"name": "JavaScript",
"bytes": "19847"
},
{
"name": "Python",
"bytes": "57356"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2012 Legoktm
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
#
# See https://en.wikipedia.org/w/index.php?title=Wikipedia:Bot_requests&oldid=504648019#Baronetcy_articles
#
# Will mass move all non-redirect articles to their lower-case variants
import os
import pywikibot
import robot
def log(old_title, new_title):
LOGFILE = 'movepages.log'
if os.path.isfile(LOGFILE):
f = open(LOGFILE, 'r')
old = f.read()
f.close()
else:
old = ''
msg = '*[[:%s]] --> [[:%s]]\n' % (old_title, new_title)
f = open(LOGFILE, 'w')
f.write(old+msg)
f.close()
class RMBot(robot.Robot):
def __init__(self):
robot.Robot.__init__(self, task=16)
self.reason = 'BOT: Moving %s to %s per [[Talk:Abdy_Baronets#Requested_move|RM]]'
def run(self):
cat = pywikibot.Category(pywikibot.Page(self.site, 'Category:Baronetcies'))
gen = pywikibot.pagegenerators.CategorizedPageGenerator(cat)
for page in gen:
self.do_page(page)
def do_page(self, page):
old_title = page.title()
if page.isRedirectPage():
self.output('Skipping %s, it\'s a redirect' % page.title())
return
if not 'Baronets' in old_title:
self.output('Skipping %s, doesnt contain \'Baronets\' in it.' % page.title())
return
new_title = old_title.replace('Baronets', 'baronets')
if old_title == new_title:
self.output('New title is same as old title? logging.')
log(old_title, new_title)
edit_summary = self.reason % (old_title, new_title)
self.output('Moving: %s --> %s' % (old_title, new_title))
try:
if not self.isEnabled():
self.output('Disabled, quitting.')
self.quit()
page.move(new_title, reason=edit_summary, movetalkpage=True)
except pywikibot.exceptions.Error, e:
self.output(e)
log(old_title, new_title)
return
if __name__ == "__main__":
bot = RMBot()
bot.run() | {
"content_hash": "cacfc38c830b9da84ec0c2d205a3b2e6",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 106,
"avg_line_length": 36.38823529411765,
"alnum_prop": 0.646944713870029,
"repo_name": "legoktm/pywikipedia-scripts",
"id": "b45aae02881b9f3d5f4374a78b8cd0f5257c34fa",
"size": "3117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baronetcies.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "290594"
}
],
"symlink_target": ""
} |
from tools import cli, service_instance
parser = cli.Parser()
parser.add_required_arguments(cli.Argument.UUID)
parser.add_custom_argument('--instance', required=False, action='store_true',
help="Flag to indicate the UUID is an instance UUID")
parser.add_custom_argument('--description', required=False, help="Description for the snapshot")
parser.add_custom_argument('--name', required=True, help="Name for the Snapshot")
args = parser.get_args()
si = service_instance.connect(args)
instance_search = False
if not si:
raise SystemExit("Unable to connect to host with supplied info.")
if args.instance:
instance_search = True
vm = si.content.searchIndex.FindByUuid(None, args.uuid, True, instance_search)
if vm is None:
raise SystemExit("Unable to locate VirtualMachine.")
desc = None
if args.description:
desc = args.description
task = vm.CreateSnapshot_Task(name=args.name,
description=desc,
memory=True,
quiesce=False)
print("Snapshot Completed.")
del vm
vm = si.content.searchIndex.FindByUuid(None, args.uuid, True, instance_search)
snap_info = vm.snapshot
tree = snap_info.rootSnapshotList
while tree[0].childSnapshotList is not None:
print("Snap: {0} => {1}".format(tree[0].name, tree[0].description))
if len(tree[0].childSnapshotList) < 1:
break
tree = tree[0].childSnapshotList
| {
"content_hash": "e1b4139fab548e21a5e6056986d86db3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 96,
"avg_line_length": 36.1,
"alnum_prop": 0.6821329639889196,
"repo_name": "vmware/pyvmomi-community-samples",
"id": "e74b97b64b3591e108a89f636988d0e78b2e3dff",
"size": "2065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/create_snapshot.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model('sites', 'Site')
Site.objects.create(
id=1,
domain='schools.texastribune.org',
name='Texas Public Schools Explorer')
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(create_site)
]
| {
"content_hash": "6bb1ebb304a18f1723578ea57673206c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 45,
"avg_line_length": 19.91304347826087,
"alnum_prop": 0.6353711790393013,
"repo_name": "texastribune/scuole",
"id": "9362f2b4865d9aeab928d0f18c66b1fd83bc78b7",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scuole/core/sites_migrations/0002_auto_20151207_1859.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1596"
},
{
"name": "HTML",
"bytes": "122611"
},
{
"name": "JavaScript",
"bytes": "44740"
},
{
"name": "Jupyter Notebook",
"bytes": "18454"
},
{
"name": "Makefile",
"bytes": "7139"
},
{
"name": "Python",
"bytes": "611799"
},
{
"name": "SCSS",
"bytes": "32865"
},
{
"name": "Shell",
"bytes": "700"
}
],
"symlink_target": ""
} |
import logging
import os
import subprocess
from common import SDK_ROOT
from common import GetHostArchFromPlatform
from common import GetHostToolPathFromPlatform
def BuildIdsPaths(package_paths):
"""Generates build ids paths for symbolizer processes."""
return [
os.path.join(os.path.dirname(package_path), 'ids.txt')
for package_path in package_paths
]
def RunSymbolizer(input_fd, output_fd, ids_txt_paths):
"""Starts a symbolizer process.
input_fd: Input file to be symbolized.
output_fd: Output file for symbolizer stdout and stderr.
ids_txt_paths: Path to the ids.txt files which map build IDs to
unstripped binaries on the filesystem.
Returns a Popen object for the started process."""
symbolizer = GetHostToolPathFromPlatform('symbolizer')
symbolizer_cmd = [
symbolizer, '--omit-module-lines', '--build-id-dir',
os.path.join(SDK_ROOT, '.build-id')
]
for ids_txt in ids_txt_paths:
symbolizer_cmd.extend(['--ids-txt', ids_txt])
logging.debug('Running "%s".' % ' '.join(symbolizer_cmd))
return subprocess.Popen(symbolizer_cmd,
stdin=input_fd,
stdout=output_fd,
stderr=subprocess.STDOUT,
close_fds=True)
| {
"content_hash": "e64bed688ae0dfd01c6856d2b869abb2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 65,
"avg_line_length": 31.48780487804878,
"alnum_prop": 0.6615027110766848,
"repo_name": "scheib/chromium",
"id": "67f19667adac2c571bd04b0995b3f8b284970545",
"size": "1454",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/fuchsia/symbolizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django_markdown.models
class Migration(migrations.Migration):
dependencies = [
('disease', '0011_allelecolor_description'),
]
operations = [
migrations.CreateModel(
name='SNPMarkerArticle',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('title', models.CharField(help_text='Title for SNM description page', max_length=128)),
('header', django_markdown.models.MarkdownField(help_text='Introductory text appearing in header section of SNP description')),
('footer', django_markdown.models.MarkdownField(help_text='Place for footer, bibliography etc.')),
('snp_marker', models.ForeignKey(to='disease.SNPMarker', related_name='snp_article')),
],
),
]
| {
"content_hash": "ec7768e3414c3690ddbf8a26371964b0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 143,
"avg_line_length": 39.916666666666664,
"alnum_prop": 0.6346555323590815,
"repo_name": "jiivan/genoomy",
"id": "4db9d68c03b58d50f10831962badcc2405791ca2",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev_deploy",
"path": "genoome/disease/migrations/0012_snpmarkerarticle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51082"
},
{
"name": "HTML",
"bytes": "47982"
},
{
"name": "JavaScript",
"bytes": "31061"
},
{
"name": "Python",
"bytes": "138292"
},
{
"name": "Shell",
"bytes": "5962"
}
],
"symlink_target": ""
} |
from typing import List
from pylint.exceptions import InvalidMessageError, UnknownMessageError
class MessageIdStore:
"""The MessageIdStore store MessageId and make sure that there is a 1-1 relation between msgid and symbol."""
def __init__(self):
self.__msgid_to_symbol = {}
self.__symbol_to_msgid = {}
self.__old_names = {}
def __len__(self):
return len(self.__msgid_to_symbol)
def __repr__(self):
result = "MessageIdStore: [\n"
for msgid, symbol in self.__msgid_to_symbol.items():
result += f" - {msgid} ({symbol})\n"
result += "]"
return result
def get_symbol(self, msgid: str) -> str:
try:
return self.__msgid_to_symbol[msgid]
except KeyError as e:
msg = f"'{msgid}' is not stored in the message store."
raise UnknownMessageError(msg) from e
def get_msgid(self, symbol: str) -> str:
try:
return self.__symbol_to_msgid[symbol]
except KeyError as e:
msg = f"'{symbol}' is not stored in the message store."
raise UnknownMessageError(msg) from e
def register_message_definition(self, message_definition):
self.check_msgid_and_symbol(message_definition.msgid, message_definition.symbol)
self.add_msgid_and_symbol(message_definition.msgid, message_definition.symbol)
for old_msgid, old_symbol in message_definition.old_names:
self.check_msgid_and_symbol(old_msgid, old_symbol)
self.add_legacy_msgid_and_symbol(
old_msgid, old_symbol, message_definition.msgid
)
def add_msgid_and_symbol(self, msgid: str, symbol: str) -> None:
"""Add valid message id.
There is a little duplication with add_legacy_msgid_and_symbol to avoid a function call,
this is called a lot at initialization."""
self.__msgid_to_symbol[msgid] = symbol
self.__symbol_to_msgid[symbol] = msgid
def add_legacy_msgid_and_symbol(self, msgid: str, symbol: str, new_msgid: str):
"""Add valid legacy message id.
There is a little duplication with add_msgid_and_symbol to avoid a function call,
this is called a lot at initialization."""
self.__msgid_to_symbol[msgid] = symbol
self.__symbol_to_msgid[symbol] = msgid
existing_old_names = self.__old_names.get(msgid, [])
existing_old_names.append(new_msgid)
self.__old_names[msgid] = existing_old_names
def check_msgid_and_symbol(self, msgid: str, symbol: str) -> None:
existing_msgid = self.__symbol_to_msgid.get(symbol)
existing_symbol = self.__msgid_to_symbol.get(msgid)
if existing_symbol is None and existing_msgid is None:
return
if existing_msgid is not None:
if existing_msgid != msgid:
self._raise_duplicate_msgid(symbol, msgid, existing_msgid)
if existing_symbol != symbol:
self._raise_duplicate_symbol(msgid, symbol, existing_symbol)
@staticmethod
def _raise_duplicate_symbol(msgid, symbol, other_symbol):
"""Raise an error when a symbol is duplicated.
:param str msgid: The msgid corresponding to the symbols
:param str symbol: Offending symbol
:param str other_symbol: Other offending symbol
:raises InvalidMessageError:"""
symbols = [symbol, other_symbol]
symbols.sort()
error_message = f"Message id '{msgid}' cannot have both "
error_message += f"'{symbols[0]}' and '{symbols[1]}' as symbolic name."
raise InvalidMessageError(error_message)
@staticmethod
def _raise_duplicate_msgid(symbol, msgid, other_msgid):
"""Raise an error when a msgid is duplicated.
:param str symbol: The symbol corresponding to the msgids
:param str msgid: Offending msgid
:param str other_msgid: Other offending msgid
:raises InvalidMessageError:"""
msgids = [msgid, other_msgid]
msgids.sort()
error_message = (
f"Message symbol '{symbol}' cannot be used for "
f"'{msgids[0]}' and '{msgids[1]}' at the same time."
f" If you're creating an 'old_names' use 'old-{symbol}' as the old symbol."
)
raise InvalidMessageError(error_message)
def get_active_msgids(self, msgid_or_symbol: str) -> List[str]:
"""Return msgids but the input can be a symbol."""
# Only msgid can have a digit as second letter
is_msgid = msgid_or_symbol[1:].isdigit()
if is_msgid:
msgid = msgid_or_symbol.upper()
symbol = self.__msgid_to_symbol.get(msgid)
else:
msgid = self.__symbol_to_msgid.get(msgid_or_symbol)
symbol = msgid_or_symbol
if not msgid or not symbol:
error_msg = f"No such message id or symbol '{msgid_or_symbol}'."
raise UnknownMessageError(error_msg)
return self.__old_names.get(msgid, [msgid])
| {
"content_hash": "b28131d194bf9835a8848c4ae7905dc1",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 113,
"avg_line_length": 41.278688524590166,
"alnum_prop": 0.6185464654487689,
"repo_name": "ruchee/vimrc",
"id": "9f4d4fd2a30c1d256b2cf39b827340d8d9411864",
"size": "5185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/pylint/message/message_id_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
from django.conf import settings
from syncrae.events.event import Event
from webdnd.player.models.roll import roll_text
import logging
logging = logging.getLogger('')
class Terminal(object):
def __init__(self, web):
self.web = web
def __call__(self, data):
full_cmd = data.get('cmd')
cmd = full_cmd.split()
if len(cmd) == 0:
return
elif cmd[0] not in Terminal.COMMANDS.keys():
Event('/terminal/result', {
'level': 'error',
'text': 'Invalid Command: `%s`' % full_cmd,
}).write_message(self)
return
cmd, args = cmd[0], ' '.join([] if len(cmd) <= 1 else cmd[1:])
# Return the command to the client to state that it was recieved
Event('/terminal/result', {
'cmd': True,
'level': 'cmd',
'text': full_cmd,
}).write_message(self)
# only log accepted commands
logging.info('New Command - %s' % full_cmd)
handler = 'term_' + Terminal.COMMANDS[cmd]['handler']
if hasattr(self, handler):
getattr(self, handler)(args)
return
else:
logging.error('Invalid Handler for cmd: < %s > - %s:%s' % (self.__full, full_cmd, handler))
return
def write_message(self, json):
self.web.write_message(json)
def start(self):
pass
def terminal_write(self, text, level='info', err=False):
Event('/terminal/result', {
'level': level,
'text': text or ' ', #
}, err=err).write_message(self)
def terminal_err(self, err, level=None):
data = {}
if not level is None:
data['level'] = level
Event('/terminal/error', data, err=err).write_message(self)
##############################################
# Actual commands
##############################################
COMMANDS = {
'colors': {
'handler': 'color_test',
},
'echo': {
'handler': 'echo',
},
'error': {
'handler': 'error',
},
'roll': {
'handler': 'roll',
}
}
def term_color_test(self, cmd):
self.terminal_write('Color Test:')
levels = ['cmd', 'normal', 'info', 'warn', 'error', 'critical', 'muted']
for level in levels:
self.terminal_write(level=level, text=" >> %s" % level)
def term_echo(self, cmd):
self.terminal_write(cmd, level='normal')
def term_error(self, cmd):
if len(cmd) > 1 and cmd in settings.SYNCRAE_ERR_CODES:
self.terminal_err(level='error', err=cmd)
else:
self.terminal_write('Invalid err code.', level='error')
def term_roll(self, cmd):
self.terminal_write(roll_text(cmd))
| {
"content_hash": "0bd00e2a1545d41e85aa0049b7d7c968",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 103,
"avg_line_length": 27.864077669902912,
"alnum_prop": 0.5017421602787456,
"repo_name": "Saevon/syncrae",
"id": "510b088d1a050ab636a3fca6dfa156138f0c8a64",
"size": "2870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "5578"
},
{
"name": "Python",
"bytes": "22727"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from indico.modules.designer.controllers import (RHAddCategoryTemplate, RHAddEventTemplate, RHCloneCategoryTemplate,
RHCloneEventTemplate, RHDeleteDesignerTemplate,
RHDownloadTemplateImage, RHEditDesignerTemplate, RHGetTemplateData,
RHListBacksideTemplates, RHListCategoryTemplates, RHListEventTemplates,
RHToggleTemplateDefaultOnCategory, RHUploadBackgroundImage)
from indico.util.caching import memoize
from indico.web.flask.util import make_view_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('designer', __name__, template_folder='templates', virtual_template_folder='designer')
@memoize
def _dispatch(event_rh, category_rh):
event_view = make_view_func(event_rh)
categ_view = make_view_func(category_rh)
def view_func(**kwargs):
return categ_view(**kwargs) if kwargs['object_type'] == 'category' else event_view(**kwargs)
return view_func
_bp.add_url_rule('/category/<int:category_id>/manage/designer/<int:template_id>/toggle-default',
'toggle_category_default', RHToggleTemplateDefaultOnCategory, methods=('POST',))
for object_type in ('event', 'category'):
if object_type == 'category':
prefix = '/category/<int:category_id>'
else:
prefix = '/event/<int:confId>'
prefix += '/manage/designer'
_bp.add_url_rule(prefix + '/', 'template_list', _dispatch(RHListEventTemplates, RHListCategoryTemplates),
defaults={'object_type': object_type})
_bp.add_url_rule(prefix + '/<int:template_id>/backsides', 'backside_template_list', RHListBacksideTemplates,
defaults={'object_type': object_type})
_bp.add_url_rule(prefix + '/add', 'add_template', _dispatch(RHAddEventTemplate, RHAddCategoryTemplate),
defaults={'object_type': object_type}, methods=('GET', 'POST'))
_bp.add_url_rule(prefix + '/<int:template_id>/', 'edit_template', RHEditDesignerTemplate,
defaults={'object_type': object_type}, methods=('GET', 'POST'))
_bp.add_url_rule(prefix + '/<int:template_id>/', 'delete_template', RHDeleteDesignerTemplate,
defaults={'object_type': object_type}, methods=('DELETE',))
_bp.add_url_rule(prefix + '/<int:template_id>/clone', 'clone_template',
_dispatch(RHCloneEventTemplate, RHCloneCategoryTemplate),
defaults={'object_type': object_type}, methods=('POST',))
_bp.add_url_rule(prefix + '/<int:template_id>/data', 'get_template_data',
RHGetTemplateData, defaults={'object_type': object_type})
_bp.add_url_rule(prefix + '/<int:template_id>/images/<int:image_id>/<filename>', 'download_image',
RHDownloadTemplateImage, defaults={'object_type': object_type})
_bp.add_url_rule(prefix + '/<int:template_id>/images', 'upload_image',
RHUploadBackgroundImage, defaults={'object_type': object_type}, methods=('POST',))
| {
"content_hash": "943f8eea83fb5c19f0305be44e379b4c",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 120,
"avg_line_length": 58.2,
"alnum_prop": 0.6354264292408622,
"repo_name": "mic4ael/indico",
"id": "e49a83d648257838f07411437f80362d7586a10d",
"size": "3415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/designer/blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import tkinter as tk
from tkinter import Button
import time
import numpy as np
from PIL import ImageTk, Image
PhotoImage = ImageTk.PhotoImage
UNIT = 100 # 픽셀 수
HEIGHT = 5 # 그리드월드 세로
WIDTH = 5 # 그리드월드 가로
TRANSITION_PROB = 1
POSSIBLE_ACTIONS = [0, 1, 2, 3] # 좌, 우, 상, 하
ACTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 좌표로 나타낸 행동
REWARDS = []
class GraphicDisplay(tk.Tk):
def __init__(self, agent):
super(GraphicDisplay, self).__init__()
self.title('Policy Iteration')
self.geometry('{0}x{1}'.format(HEIGHT * UNIT, HEIGHT * UNIT + 50))
self.texts = []
self.arrows = []
self.env = Env()
self.agent = agent
self.evaluation_count = 0
self.improvement_count = 0
self.is_moving = 0
(self.up, self.down, self.left, self.right), self.shapes = self.load_images()
self.canvas = self._build_canvas()
self.text_reward(2, 2, "R : 1.0")
self.text_reward(1, 2, "R : -1.0")
self.text_reward(2, 1, "R : -1.0")
def _build_canvas(self):
canvas = tk.Canvas(self, bg='white',
height=HEIGHT * UNIT,
width=WIDTH * UNIT)
# 버튼 초기화
iteration_button = Button(self, text="Evaluate",
command=self.evaluate_policy)
iteration_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.13, HEIGHT * UNIT + 10,
window=iteration_button)
policy_button = Button(self, text="Improve",
command=self.improve_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.37, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="move", command=self.move_by_policy)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.62, HEIGHT * UNIT + 10,
window=policy_button)
policy_button = Button(self, text="reset", command=self.reset)
policy_button.configure(width=10, activebackground="#33B5E5")
canvas.create_window(WIDTH * UNIT * 0.87, HEIGHT * UNIT + 10,
window=policy_button)
# 그리드 생성
for col in range(0, WIDTH * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = col, 0, col, HEIGHT * UNIT
canvas.create_line(x0, y0, x1, y1)
for row in range(0, HEIGHT * UNIT, UNIT): # 0~400 by 80
x0, y0, x1, y1 = 0, row, HEIGHT * UNIT, row
canvas.create_line(x0, y0, x1, y1)
# 캔버스에 이미지 추가
self.rectangle = canvas.create_image(50, 50, image=self.shapes[0])
canvas.create_image(250, 150, image=self.shapes[1])
canvas.create_image(150, 250, image=self.shapes[1])
canvas.create_image(250, 250, image=self.shapes[2])
canvas.pack()
return canvas
def load_images(self):
up = PhotoImage(Image.open("../img/up.png").resize((13, 13)))
right = PhotoImage(Image.open("../img/right.png").resize((13, 13)))
left = PhotoImage(Image.open("../img/left.png").resize((13, 13)))
down = PhotoImage(Image.open("../img/down.png").resize((13, 13)))
rectangle = PhotoImage(Image.open("../img/rectangle.png").resize((65, 65)))
triangle = PhotoImage(Image.open("../img/triangle.png").resize((65, 65)))
circle = PhotoImage(Image.open("../img/circle.png").resize((65, 65)))
return (up, down, left, right), (rectangle, triangle, circle)
def reset(self):
if self.is_moving == 0:
self.evaluation_count = 0
self.improvement_count = 0
for i in self.texts:
self.canvas.delete(i)
for i in self.arrows:
self.canvas.delete(i)
self.agent.value_table = [[0.0] * WIDTH for _ in range(HEIGHT)]
self.agent.policy_table = ([[[0.25, 0.25, 0.25, 0.25]] * WIDTH
for _ in range(HEIGHT)])
self.agent.policy_table[2][2] = []
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
def text_value(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 85, 70
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def text_reward(self, row, col, contents, font='Helvetica', size=10,
style='normal', anchor="nw"):
origin_x, origin_y = 5, 5
x, y = origin_y + (UNIT * col), origin_x + (UNIT * row)
font = (font, str(size), style)
text = self.canvas.create_text(x, y, fill="black", text=contents,
font=font, anchor=anchor)
return self.texts.append(text)
def rectangle_move(self, action):
base_action = np.array([0, 0])
location = self.find_rectangle()
self.render()
if action == 0 and location[0] > 0: # 상
base_action[1] -= UNIT
elif action == 1 and location[0] < HEIGHT - 1: # 하
base_action[1] += UNIT
elif action == 2 and location[1] > 0: # 좌
base_action[0] -= UNIT
elif action == 3 and location[1] < WIDTH - 1: # 우
base_action[0] += UNIT
# move agent
self.canvas.move(self.rectangle, base_action[0], base_action[1])
def find_rectangle(self):
temp = self.canvas.coords(self.rectangle)
x = (temp[0] / 100) - 0.5
y = (temp[1] / 100) - 0.5
return int(y), int(x)
def move_by_policy(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.policy_table[x][y]) != 0:
self.after(100,
self.rectangle_move(self.agent.get_action([x, y])))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, policy):
if col == 2 and row == 2:
return
if policy[0] > 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.up))
if policy[1] > 0: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.down))
if policy[2] > 0: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.left))
if policy[3] > 0: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y,
image=self.right))
def draw_from_policy(self, policy_table):
for i in range(HEIGHT):
for j in range(WIDTH):
self.draw_one_arrow(i, j, policy_table[i][j])
def print_value_table(self, value_table):
for i in range(WIDTH):
for j in range(HEIGHT):
self.text_value(i, j, value_table[i][j])
def render(self):
time.sleep(0.1)
self.canvas.tag_raise(self.rectangle)
self.update()
def evaluate_policy(self):
self.evaluation_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.policy_evaluation()
self.print_value_table(self.agent.value_table)
def improve_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
self.agent.policy_improvement()
self.draw_from_policy(self.agent.policy_table)
class Env:
def __init__(self):
self.transition_probability = TRANSITION_PROB
self.width = WIDTH
self.height = HEIGHT
self.reward = [[0] * WIDTH for _ in range(HEIGHT)]
self.possible_actions = POSSIBLE_ACTIONS
self.reward[2][2] = 1 # (2,2) 좌표 동그라미 위치에 보상 1
self.reward[1][2] = -1 # (1,2) 좌표 세모 위치에 보상 -1
self.reward[2][1] = -1 # (2,1) 좌표 세모 위치에 보상 -1
self.all_state = []
for x in range(WIDTH):
for y in range(HEIGHT):
state = [x, y]
self.all_state.append(state)
def get_reward(self, state, action):
next_state = self.state_after_action(state, action)
return self.reward[next_state[0]][next_state[1]]
def state_after_action(self, state, action_index):
action = ACTIONS[action_index]
return self.check_boundary([state[0] + action[0], state[1] + action[1]])
@staticmethod
def check_boundary(state):
state[0] = (0 if state[0] < 0 else WIDTH - 1
if state[0] > WIDTH - 1 else state[0])
state[1] = (0 if state[1] < 0 else HEIGHT - 1
if state[1] > HEIGHT - 1 else state[1])
return state
def get_transition_prob(self, state, action):
return self.transition_probability
def get_all_states(self):
return self.all_state
| {
"content_hash": "4df6d281777d4d2cfb2c136ab26c0f65",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 85,
"avg_line_length": 40.77049180327869,
"alnum_prop": 0.5341777241656614,
"repo_name": "rlcode/reinforcement-learning-kr",
"id": "c6f7b6f49a3dd6e53137049187e86999e2878f70",
"size": "10110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1-grid-world/1-policy-iteration/environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115864"
}
],
"symlink_target": ""
} |
from flask import jsonify, Blueprint
status = Blueprint('status', __name__)
@status.route('/_status')
def get_status():
return jsonify(
status="ok",
), 200
| {
"content_hash": "4051b21a47421ac2cf4f827a9d538b88",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 38,
"avg_line_length": 16,
"alnum_prop": 0.6193181818181818,
"repo_name": "alphagov/notifications-delivery",
"id": "dd3b912bfea3733c144781ae33168c9d29a3af18",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications_delivery/app/status/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48058"
},
{
"name": "Shell",
"bytes": "3209"
}
],
"symlink_target": ""
} |
import uuid
import os
import argparse
import boto3
import botocore
boto3_session = boto3.Session(profile_name='serverless-meow')
client = boto3_session.client('s3')
BUCKET = os.getenv('BUCKET_NAME')
REGION = 'us-east-2'
def upload_photo(photo_path):
try:
client.head_object(Bucket=BUCKET,
Key=os.path.basename(photo_path))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
client.upload_file(Filename=photo_path,
Bucket=BUCKET,
Key=os.path.basename(photo_path),
ExtraArgs={'ACL': 'public-read'})
else:
raise
else:
print('Kitteh photo {path} already exists, skipping'.format(path=os.path.basename(photo_path)))
def remove_photo(photo_path):
try:
client.head_object(Bucket=BUCKET, Key=os.path.basename(photo_path))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == '404':
print('Kitteh photo {path} does not exist, skipping'.format(path=os.path.basename(photo_path)))
else:
raise
else:
client.delete_object(Bucket=BUCKET, Key=os.path.basename(photo_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Manage your kitteh's photos")
parser.add_argument('-a', '--add', dest='add_photo_path', help='Path to kitteh photo that you want to upload to s3')
parser.add_argument('-r', '--remove', dest='remove_photo_path', help='Path to kitteh photo that you want to remove from s3')
args = parser.parse_args()
if args.add_photo_path:
if os.path.isdir(args.add_photo_path):
for item in os.listdir(args.add_photo_path):
file_path = os.path.join(args.add_photo_path, item)
if os.path.isfile(file_path):
print('Uploading kitteh photo {path} to s3 bucket {bucket}...'.format(path=file_path, bucket=BUCKET))
upload_photo(file_path)
print('Upload complete')
elif os.path.isfile(args.add_photo_path):
print('Uploading kitteh photo {path} to s3 bucket {bucket}...'.format(path=args.add_photo_path, bucket=BUCKET))
upload_photo(args.add_photo_path)
print('Upload complete')
else:
print('Do not know what to do with this')
if args.remove_photo_path:
if os.path.isdir(args.remove_photo_path):
for item in os.listdir(args.remove_photo_path):
file_path = os.path.join(args.remove_photo_path, item)
if os.path.isfile(file_path):
print('Removing kitteh photo {path} from s3 bucket {bucket}...'.format(path=file_path, bucket=BUCKET))
remove_photo(file_path)
print('Removal complete')
elif os.path.isfile(args.remove_photo_path):
print('Removing kitteh photo {path} from s3 bucket {bucket}...'.format(path=args.remove_photo_path, bucket=BUCKET))
remove_photo(args.remove_photo_path)
print('Removal complete')
| {
"content_hash": "404f92594efa63fee6ec7c16d5aea93a",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 128,
"avg_line_length": 43.57534246575342,
"alnum_prop": 0.6045268783401446,
"repo_name": "ashimaathri/serverless-meow",
"id": "8bdf1905454123aefb56ff73b03e312b4401c673",
"size": "3205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/manage_kitteh_photos.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "4052"
},
{
"name": "Python",
"bytes": "3205"
}
],
"symlink_target": ""
} |
import goPyServer as GP
# Step2: Create a sample "ADD" which returns the sum of 2 numbers passed in parameters
def ADD(arg1,arg2):
return arg1+arg2
# Step3: Initialise the object of goPyServer
obj = GP.pyServ(TCP_IP="127.0.0.1",TCP_PORT=9001,Listen=4,buff=1024)
# Step4: Register the ADD method to the RPC Server
obj.register_function(ADD)
# Step5: connect to the Server
obj.connect()
# Step6: Finally Run the JSON RPCServer
print "Server Running on \"127.0.0.1:9001 ...\""
obj.RPCServer()
| {
"content_hash": "370946f60cf1f0941ea9852604b5c6cc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 86,
"avg_line_length": 27.72222222222222,
"alnum_prop": 0.7334669338677354,
"repo_name": "chawlanikhil24/gopy",
"id": "87f5e6275ad69441930119eb6b1f6555533933e6",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/pytutorial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "2723"
},
{
"name": "Python",
"bytes": "532"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="marker", parent_name="choropleth.selected", **kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
opacity
Sets the marker opacity of selected points.
""",
),
**kwargs,
)
| {
"content_hash": "e1dcc1ff39535b02628ac86d5bac2ec7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 30.85,
"alnum_prop": 0.5429497568881686,
"repo_name": "plotly/plotly.py",
"id": "48a15eec4d84997f5482a6bb7952b3d0289dc2f3",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choropleth/selected/_marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
"""The tests for the Scene component."""
import io
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import light, scene
from homeassistant.util.yaml import loader as yaml_loader
from tests.common import get_test_home_assistant
from tests.components.light import common as common_light
from tests.components.scene import common
class TestScene(unittest.TestCase):
"""Test the scene component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
test_light = getattr(self.hass.components, 'test.light')
test_light.init()
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {'platform': 'test'}
})
self.light_1, self.light_2 = test_light.DEVICES[0:2]
common_light.turn_off(
self.hass, [self.light_1.entity_id, self.light_2.entity_id])
self.hass.block_till_done()
assert not self.light_1.is_on
assert not self.light_2.is_on
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_config_yaml_alias_anchor(self):
"""Test the usage of YAML aliases and anchors.
The following test scene configuration is equivalent to:
scene:
- name: test
entities:
light_1: &light_1_state
state: 'on'
brightness: 100
light_2: *light_1_state
When encountering a YAML alias/anchor, the PyYAML parser will use a
reference to the original dictionary, instead of creating a copy, so
care needs to be taken to not modify the original.
"""
entity_state = {
'state': 'on',
'brightness': 100,
}
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: entity_state,
self.light_2.entity_id: entity_state,
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_1.last_call('turn_on')[1].get('brightness')
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_config_yaml_bool(self):
"""Test parsing of booleans in yaml config."""
config = (
'scene:\n'
' - name: test\n'
' entities:\n'
' {0}: on\n'
' {1}:\n'
' state: on\n'
' brightness: 100\n').format(
self.light_1.entity_id, self.light_2.entity_id)
with io.StringIO(config) as file:
doc = yaml_loader.yaml.safe_load(file)
assert setup_component(self.hass, scene.DOMAIN, doc)
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_activate_scene(self):
"""Test active scene."""
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: 'on',
self.light_2.entity_id: {
'state': 'on',
'brightness': 100,
}
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
| {
"content_hash": "5465771f831a54f98158625899877c3d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 76,
"avg_line_length": 32.520325203252035,
"alnum_prop": 0.548,
"repo_name": "jabesq/home-assistant",
"id": "94746cce0f00dc7e05ba8922e9647738aa5fdcf5",
"size": "4000",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/scene/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .orbit import OrbitCamera
__all__ = ['OrbitCamera'] | {
"content_hash": "355b2a840fdab4850162ddf5d863bd8b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 19.4,
"alnum_prop": 0.7216494845360825,
"repo_name": "colour-science/colour-analysis",
"id": "f7bc420dc4e3bbaab862529a9e85906a778386f7",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colour_analysis/cameras/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "161677"
}
],
"symlink_target": ""
} |
"""Dag sub-commands"""
import ast
import errno
import json
import logging
import signal
import subprocess
import sys
from typing import Optional
from graphviz.dot import Dot
from sqlalchemy.sql.functions import func
from airflow import settings
from airflow.api.client import get_current_api_client
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException, BackfillUnfinished
from airflow.executors.debug_executor import DebugExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.models.dag import DAG
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils import cli as cli_utils
from airflow.utils.cli import (
get_dag,
get_dag_by_file_location,
process_subdir,
sigint_handler,
suppress_logs_and_warning,
)
from airflow.utils.dot_renderer import render_dag, render_dag_dependencies
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
@cli_utils.action_cli
def dag_backfill(args, dag=None):
"""Creates backfill job or dry run for a DAG"""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
signal.signal(signal.SIGTERM, sigint_handler)
import warnings
warnings.warn(
'--ignore-first-depends-on-past is deprecated as the value is always set to True',
category=PendingDeprecationWarning,
)
if args.ignore_first_depends_on_past is False:
args.ignore_first_depends_on_past = True
if not args.start_date and not args.end_date:
raise AirflowException("Provide a start_date and/or end_date")
dag = dag or get_dag(args.subdir, args.dag_id)
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.partial_subset(
task_ids_or_regex=args.task_regex, include_upstream=not args.ignore_dependencies
)
if not dag.task_dict:
raise AirflowException(
f"There are no tasks that match '{args.task_regex}' regex. Nothing to run, exiting..."
)
run_conf = None
if args.conf:
run_conf = json.loads(args.conf)
if args.dry_run:
print(f"Dry run of DAG {args.dag_id} on {args.start_date}")
dr = DagRun(dag.dag_id, execution_date=args.start_date)
for task in dag.tasks:
print(f"Task {task.task_id}")
ti = TaskInstance(task, run_id=None)
ti.dag_run = dr
ti.dry_run()
else:
if args.reset_dagruns:
DAG.clear_dags(
[dag],
start_date=args.start_date,
end_date=args.end_date,
confirm_prompt=not args.yes,
include_subdags=True,
dag_run_state=State.NONE,
)
try:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
local=args.local,
donot_pickle=(args.donot_pickle or conf.getboolean('core', 'donot_pickle')),
ignore_first_depends_on_past=args.ignore_first_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
pool=args.pool,
delay_on_limit_secs=args.delay_on_limit,
verbose=args.verbose,
conf=run_conf,
rerun_failed_tasks=args.rerun_failed_tasks,
run_backwards=args.run_backwards,
)
except ValueError as vr:
print(str(vr))
sys.exit(1)
@cli_utils.action_cli
def dag_trigger(args):
"""Creates a dag run for the specified dag"""
api_client = get_current_api_client()
try:
message = api_client.trigger_dag(
dag_id=args.dag_id, run_id=args.run_id, conf=args.conf, execution_date=args.exec_date
)
print(message)
except OSError as err:
raise AirflowException(err)
@cli_utils.action_cli
def dag_delete(args):
"""Deletes all DB records related to the specified dag"""
api_client = get_current_api_client()
if (
args.yes
or input("This will drop all existing records related to the specified DAG. Proceed? (y/n)").upper()
== "Y"
):
try:
message = api_client.delete_dag(dag_id=args.dag_id)
print(message)
except OSError as err:
raise AirflowException(err)
else:
print("Cancelled")
@cli_utils.action_cli
def dag_pause(args):
"""Pauses a DAG"""
set_is_paused(True, args)
@cli_utils.action_cli
def dag_unpause(args):
"""Unpauses a DAG"""
set_is_paused(False, args)
def set_is_paused(is_paused, args):
"""Sets is_paused for DAG by a given dag_id"""
dag = DagModel.get_dagmodel(args.dag_id)
if not dag:
raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table")
dag.set_is_paused(is_paused=is_paused)
print(f"Dag: {args.dag_id}, paused: {is_paused}")
def dag_dependencies_show(args):
"""Displays DAG dependencies, save to file or show as imgcat image"""
dot = render_dag_dependencies(SerializedDagModel.get_dag_dependencies())
filename = args.save
imgcat = args.imgcat
if filename and imgcat:
raise SystemExit(
"Option --save and --imgcat are mutually exclusive. "
"Please remove one option to execute the command.",
)
elif filename:
_save_dot_to_file(dot, filename)
elif imgcat:
_display_dot_via_imgcat(dot)
else:
print(dot.source)
def dag_show(args):
"""Displays DAG or saves it's graphic representation to the file"""
dag = get_dag(args.subdir, args.dag_id)
dot = render_dag(dag)
filename = args.save
imgcat = args.imgcat
if filename and imgcat:
raise SystemExit(
"Option --save and --imgcat are mutually exclusive. "
"Please remove one option to execute the command.",
)
elif filename:
_save_dot_to_file(dot, filename)
elif imgcat:
_display_dot_via_imgcat(dot)
else:
print(dot.source)
def _display_dot_via_imgcat(dot: Dot):
data = dot.pipe(format='png')
try:
with subprocess.Popen("imgcat", stdout=subprocess.PIPE, stdin=subprocess.PIPE) as proc:
out, err = proc.communicate(data)
if out:
print(out.decode('utf-8'))
if err:
print(err.decode('utf-8'))
except OSError as e:
if e.errno == errno.ENOENT:
raise SystemExit(
"Failed to execute. Make sure the imgcat executables are on your systems \'PATH\'"
)
else:
raise
def _save_dot_to_file(dot: Dot, filename: str):
filename_without_ext, _, ext = filename.rpartition('.')
dot.render(filename=filename_without_ext, format=ext, cleanup=True)
print(f"File {filename} saved")
@cli_utils.action_cli
def dag_state(args):
"""
Returns the state (and conf if exists) of a DagRun at the command line.
>>> airflow dags state tutorial 2015-01-01T00:00:00.000000
running
>>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000
failed, {"name": "bob", "age": "42"}
"""
if args.subdir:
dag = get_dag(args.subdir, args.dag_id)
else:
dag = get_dag_by_file_location(args.dag_id)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
out = dr[0].state if dr else None
conf_out = ''
if out and dr[0].conf:
conf_out = ', ' + json.dumps(dr[0].conf)
print(str(out) + conf_out)
@cli_utils.action_cli
def dag_next_execution(args):
"""
Returns the next execution datetime of a DAG at the command line.
>>> airflow dags next-execution tutorial
2018-08-31 10:38:00
"""
dag = get_dag(args.subdir, args.dag_id)
if dag.get_is_paused():
print("[INFO] Please be reminded this DAG is PAUSED now.", file=sys.stderr)
with create_session() as session:
max_date_subq = (
session.query(func.max(DagRun.execution_date).label("max_date"))
.filter(DagRun.dag_id == dag.dag_id)
.subquery()
)
max_date_run: Optional[DagRun] = (
session.query(DagRun)
.filter(DagRun.dag_id == dag.dag_id, DagRun.execution_date == max_date_subq.c.max_date)
.one_or_none()
)
if max_date_run is None:
print("[WARN] Only applicable when there is execution record found for the DAG.", file=sys.stderr)
print(None)
return
next_info = dag.next_dagrun_info(dag.get_run_data_interval(max_date_run), restricted=False)
if next_info is None:
print(
"[WARN] No following schedule can be found. "
"This DAG may have schedule interval '@once' or `None`.",
file=sys.stderr,
)
print(None)
return
print(next_info.logical_date.isoformat())
for _ in range(1, args.num_executions):
next_info = dag.next_dagrun_info(next_info.data_interval, restricted=False)
print(next_info.logical_date.isoformat())
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_list_dags(args):
"""Displays dags with or without stats at the command line"""
dagbag = DagBag(process_subdir(args.subdir))
AirflowConsole().print_as(
data=sorted(dagbag.dags.values(), key=lambda d: d.dag_id),
output=args.output,
mapper=lambda x: {
"dag_id": x.dag_id,
"filepath": x.filepath,
"owner": x.owner,
"paused": x.get_is_paused(),
},
)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_report(args):
"""Displays dagbag stats at the command line"""
dagbag = DagBag(process_subdir(args.subdir))
AirflowConsole().print_as(
data=dagbag.dagbag_stats,
output=args.output,
mapper=lambda x: {
"file": x.file,
"duration": x.duration,
"dag_num": x.dag_num,
"task_num": x.task_num,
"dags": sorted(ast.literal_eval(x.dags)),
},
)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_list_jobs(args, dag=None):
"""Lists latest n jobs"""
queries = []
if dag:
args.dag_id = dag.dag_id
if args.dag_id:
dagbag = DagBag()
if args.dag_id not in dagbag.dags:
error_message = f"Dag id {args.dag_id} not found"
raise AirflowException(error_message)
queries.append(BaseJob.dag_id == args.dag_id)
if args.state:
queries.append(BaseJob.state == args.state)
fields = ['dag_id', 'state', 'job_type', 'start_date', 'end_date']
with create_session() as session:
all_jobs = (
session.query(BaseJob)
.filter(*queries)
.order_by(BaseJob.start_date.desc())
.limit(args.limit)
.all()
)
all_jobs = [{f: str(job.__getattribute__(f)) for f in fields} for job in all_jobs]
AirflowConsole().print_as(
data=all_jobs,
output=args.output,
)
@cli_utils.action_cli
@suppress_logs_and_warning
def dag_list_dag_runs(args, dag=None):
"""Lists dag runs for a given DAG"""
if dag:
args.dag_id = dag.dag_id
dagbag = DagBag()
if args.dag_id is not None and args.dag_id not in dagbag.dags:
error_message = f"Dag id {args.dag_id} not found"
raise AirflowException(error_message)
state = args.state.lower() if args.state else None
dag_runs = DagRun.find(
dag_id=args.dag_id,
state=state,
no_backfills=args.no_backfill,
execution_start_date=args.start_date,
execution_end_date=args.end_date,
)
dag_runs.sort(key=lambda x: x.execution_date, reverse=True)
AirflowConsole().print_as(
data=dag_runs,
output=args.output,
mapper=lambda dr: {
"dag_id": dr.dag_id,
"run_id": dr.run_id,
"state": dr.state,
"execution_date": dr.execution_date.isoformat(),
"start_date": dr.start_date.isoformat() if dr.start_date else '',
"end_date": dr.end_date.isoformat() if dr.end_date else '',
},
)
@provide_session
@cli_utils.action_cli
def dag_test(args, session=None):
"""Execute one single DagRun for a given DAG and execution date, using the DebugExecutor."""
dag = get_dag(subdir=args.subdir, dag_id=args.dag_id)
dag.clear(start_date=args.execution_date, end_date=args.execution_date, dag_run_state=State.NONE)
try:
dag.run(
executor=DebugExecutor(),
start_date=args.execution_date,
end_date=args.execution_date,
# Always run the DAG at least once even if no logical runs are
# available. This does not make a lot of sense, but Airflow has
# been doing this prior to 2.2 so we keep compatibility.
run_at_least_once=True,
)
except BackfillUnfinished as e:
print(str(e))
show_dagrun = args.show_dagrun
imgcat = args.imgcat_dagrun
filename = args.save_dagrun
if show_dagrun or imgcat or filename:
tis = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == args.dag_id,
TaskInstance.execution_date == args.execution_date,
)
.all()
)
dot_graph = render_dag(dag, tis=tis)
print()
if filename:
_save_dot_to_file(dot_graph, filename)
if imgcat:
_display_dot_via_imgcat(dot_graph)
if show_dagrun:
print(dot_graph.source)
@provide_session
@cli_utils.action_cli
def dag_reserialize(args, session=None):
session.query(SerializedDagModel).delete(synchronize_session=False)
if not args.clear_only:
dagbag = DagBag()
dagbag.collect_dags(only_if_updated=False, safe_mode=False)
dagbag.sync_to_db()
| {
"content_hash": "65dec8b248c71dc680e4311470d0479b",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 110,
"avg_line_length": 31.39387308533917,
"alnum_prop": 0.6094653934620479,
"repo_name": "mistercrunch/airflow",
"id": "2e537fb86ce6f7e7534b1a5ffdb2e5566c0b8b9f",
"size": "15133",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/cli/commands/dag_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
} |
from enum import Enum
from simplads.simplad_monad.delta_type import DeltaType
from simplads.simplad_monad.namedtuples.bind_args import BindArgs
from simplads.simplad_monad.namedtuples.delta_overwrite import DeltaOverwrite
from simplads.simplad_monad.simplad_base_helper import SimpladBaseHelper
from .namedtuples.error_res import ErrorRes
import abc
# delta: [messages], {new_listeners}, return_messages
# annotation_wrapper: is_default, {listeners}
# bind (func)(i) return (annotation_wrapper, unbound), higher_deltas
# unit(i) returns annotation_wrapper, unbound
class ErrorSimplad(SimpladBaseHelper):
@staticmethod
def initial_annotation(unbound):
return ErrorType.none
@staticmethod
# returns Bound(unbound, higher_deltas)
def run(func, annotation, unbound, higher_deltas):
if annotation is ErrorType.none:
return func(BindArgs(bound=unbound, deltas=higher_deltas))
return BindArgs(bound=unbound, deltas=higher_deltas)
@staticmethod
# returns annotation, overwrite_unbound
def apply_delta(annotation, delta, unbound):
print('okay')
if delta[0] is ErrorType.none:
return delta[0], DeltaOverwrite()
if delta[0] is ErrorType.error:
print('now')
return delta[0], DeltaOverwrite(
overwrite=True,
new_value=ErrorRes(has_error=True, error_text=delta[1], result=unbound))
return delta[0], DeltaOverwrite(
overwrite=True,
new_value=ErrorRes(has_error=False, result=unbound))
@staticmethod
def merge_deltas(a, b):
if a is ErrorType.error:
return a
return b
class ErrorType(Enum):
error = 1
none = 2
finish = 3
class ErrorDeltaMaker():
@staticmethod
def error(i):
return [ErrorType.error, i]
@staticmethod
def no_error():
return [ErrorType.none]
@staticmethod
def finish():
return [ErrorType.finish]
| {
"content_hash": "9488c8eeeccf88f21f2545a0dc5965a9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 88,
"avg_line_length": 31.109375,
"alnum_prop": 0.6750376695128076,
"repo_name": "Cogmob/simplads",
"id": "e9c943bf0598796ffc68b5b6da2a09a3132c5eaf",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplads/simplads/error_simplad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65399"
},
{
"name": "Shell",
"bytes": "238"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from sympy import Symbol, sympify
from sympy.core.compatibility import is_sequence, range, string_types
from sympy.geometry.entity import GeometryEntity
from .plot_interval import PlotInterval
from .plot_object import PlotObject
from .util import parse_option_string
class PlotMode(PlotObject):
"""
Grandparent class for plotting
modes. Serves as interface for
registration, lookup, and init
of modes.
To create a new plot mode,
inherit from PlotModeBase
or one of its children, such
as PlotSurface or PlotCurve.
"""
## Class-level attributes
## used to register and lookup
## plot modes. See PlotModeBase
## for descriptions and usage.
i_vars, d_vars = '', ''
intervals = []
aliases = []
is_default = False
## Draw is the only method here which
## is meant to be overridden in child
## classes, and PlotModeBase provides
## a base implementation.
def draw(self):
raise NotImplementedError()
## Everything else in this file has to
## do with registration and retrieval
## of plot modes. This is where I've
## hidden much of the ugliness of automatic
## plot mode divination...
## Plot mode registry data structures
_mode_alias_list = []
_mode_map = {
1: {1: {}, 2: {}},
2: {1: {}, 2: {}},
3: {1: {}, 2: {}},
} # [d][i][alias_str]: class
_mode_default_map = {
1: {},
2: {},
3: {},
} # [d][i]: class
_i_var_max, _d_var_max = 2, 3
def __new__(cls, *args, **kwargs):
"""
This is the function which interprets
arguments given to Plot.__init__ and
Plot.__setattr__. Returns an initialized
instance of the appropriate child class.
"""
newargs, newkwargs = PlotMode._extract_options(args, kwargs)
mode_arg = newkwargs.get('mode', '')
# Interpret the arguments
d_vars, intervals = PlotMode._interpret_args(newargs)
i_vars = PlotMode._find_i_vars(d_vars, intervals)
i, d = max([len(i_vars), len(intervals)]), len(d_vars)
# Find the appropriate mode
subcls = PlotMode._get_mode(mode_arg, i, d)
# Create the object
o = object.__new__(subcls)
# Do some setup for the mode instance
o.d_vars = d_vars
o._fill_i_vars(i_vars)
o._fill_intervals(intervals)
o.options = newkwargs
return o
@staticmethod
def _get_mode(mode_arg, i_var_count, d_var_count):
"""
Tries to return an appropriate mode class.
Intended to be called only by __new__.
mode_arg
Can be a string or a class. If it is a
PlotMode subclass, it is simply returned.
If it is a string, it can an alias for
a mode or an empty string. In the latter
case, we try to find a default mode for
the i_var_count and d_var_count.
i_var_count
The number of independent variables
needed to evaluate the d_vars.
d_var_count
The number of dependent variables;
usually the number of functions to
be evaluated in plotting.
For example, a Cartesian function y = f(x) has
one i_var (x) and one d_var (y). A parametric
form x,y,z = f(u,v), f(u,v), f(u,v) has two
two i_vars (u,v) and three d_vars (x,y,z).
"""
# if the mode_arg is simply a PlotMode class,
# check that the mode supports the numbers
# of independent and dependent vars, then
# return it
try:
m = None
if issubclass(mode_arg, PlotMode):
m = mode_arg
except TypeError:
pass
if m:
if not m._was_initialized:
raise ValueError(("To use unregistered plot mode %s "
"you must first call %s._init_mode().")
% (m.__name__, m.__name__))
if d_var_count != m.d_var_count:
raise ValueError(("%s can only plot functions "
"with %i dependent variables.")
% (m.__name__,
m.d_var_count))
if i_var_count > m.i_var_count:
raise ValueError(("%s cannot plot functions "
"with more than %i independent "
"variables.")
% (m.__name__,
m.i_var_count))
return m
# If it is a string, there are two possibilities.
if isinstance(mode_arg, string_types):
i, d = i_var_count, d_var_count
if i > PlotMode._i_var_max:
raise ValueError(var_count_error(True, True))
if d > PlotMode._d_var_max:
raise ValueError(var_count_error(False, True))
# If the string is '', try to find a suitable
# default mode
if not mode_arg:
return PlotMode._get_default_mode(i, d)
# Otherwise, interpret the string as a mode
# alias (e.g. 'cartesian', 'parametric', etc)
else:
return PlotMode._get_aliased_mode(mode_arg, i, d)
else:
raise ValueError("PlotMode argument must be "
"a class or a string")
@staticmethod
def _get_default_mode(i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
try:
return PlotMode._mode_default_map[d][i]
except KeyError:
# Keep looking for modes in higher i var counts
# which support the given d var count until we
# reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_default_mode(i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a default mode "
"for %i independent and %i "
"dependent variables.") % (i_vars, d))
@staticmethod
def _get_aliased_mode(alias, i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
if alias not in PlotMode._mode_alias_list:
raise ValueError(("Couldn't find a mode called"
" %s. Known modes: %s.")
% (alias, ", ".join(PlotMode._mode_alias_list)))
try:
return PlotMode._mode_map[d][i][alias]
except TypeError:
# Keep looking for modes in higher i var counts
# which support the given d var count and alias
# until we reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a %s mode "
"for %i independent and %i "
"dependent variables.")
% (alias, i_vars, d))
@classmethod
def _register(cls):
"""
Called once for each user-usable plot mode.
For Cartesian2D, it is invoked after the
class definition: Cartesian2D._register()
"""
name = cls.__name__
cls._init_mode()
try:
i, d = cls.i_var_count, cls.d_var_count
# Add the mode to _mode_map under all
# given aliases
for a in cls.aliases:
if a not in PlotMode._mode_alias_list:
# Also track valid aliases, so
# we can quickly know when given
# an invalid one in _get_mode.
PlotMode._mode_alias_list.append(a)
PlotMode._mode_map[d][i][a] = cls
if cls.is_default:
# If this mode was marked as the
# default for this d,i combination,
# also set that.
PlotMode._mode_default_map[d][i] = cls
except Exception as e:
raise RuntimeError(("Failed to register "
"plot mode %s. Reason: %s")
% (name, (str(e))))
@classmethod
def _init_mode(cls):
"""
Initializes the plot mode based on
the 'mode-specific parameters' above.
Only intended to be called by
PlotMode._register(). To use a mode without
registering it, you can directly call
ModeSubclass._init_mode().
"""
def symbols_list(symbol_str):
return [Symbol(s) for s in symbol_str]
# Convert the vars strs into
# lists of symbols.
cls.i_vars = symbols_list(cls.i_vars)
cls.d_vars = symbols_list(cls.d_vars)
# Var count is used often, calculate
# it once here
cls.i_var_count = len(cls.i_vars)
cls.d_var_count = len(cls.d_vars)
if cls.i_var_count > PlotMode._i_var_max:
raise ValueError(var_count_error(True, False))
if cls.d_var_count > PlotMode._d_var_max:
raise ValueError(var_count_error(False, False))
# Try to use first alias as primary_alias
if len(cls.aliases) > 0:
cls.primary_alias = cls.aliases[0]
else:
cls.primary_alias = cls.__name__
di = cls.intervals
if len(di) != cls.i_var_count:
raise ValueError("Plot mode must provide a "
"default interval for each i_var.")
for i in range(cls.i_var_count):
# default intervals must be given [min,max,steps]
# (no var, but they must be in the same order as i_vars)
if len(di[i]) != 3:
raise ValueError("length should be equal to 3")
# Initialize an incomplete interval,
# to later be filled with a var when
# the mode is instantiated.
di[i] = PlotInterval(None, *di[i])
# To prevent people from using modes
# without these required fields set up.
cls._was_initialized = True
_was_initialized = False
## Initializer Helper Methods
@staticmethod
def _find_i_vars(functions, intervals):
i_vars = []
# First, collect i_vars in the
# order they are given in any
# intervals.
for i in intervals:
if i.v is None:
continue
elif i.v in i_vars:
raise ValueError(("Multiple intervals given "
"for %s.") % (str(i.v)))
i_vars.append(i.v)
# Then, find any remaining
# i_vars in given functions
# (aka d_vars)
for f in functions:
for a in f.free_symbols:
if a not in i_vars:
i_vars.append(a)
return i_vars
def _fill_i_vars(self, i_vars):
# copy default i_vars
self.i_vars = [Symbol(str(i)) for i in self.i_vars]
# replace with given i_vars
for i in range(len(i_vars)):
self.i_vars[i] = i_vars[i]
def _fill_intervals(self, intervals):
# copy default intervals
self.intervals = [PlotInterval(i) for i in self.intervals]
# track i_vars used so far
v_used = []
# fill copy of default
# intervals with given info
for i in range(len(intervals)):
self.intervals[i].fill_from(intervals[i])
if self.intervals[i].v is not None:
v_used.append(self.intervals[i].v)
# Find any orphan intervals and
# assign them i_vars
for i in range(len(self.intervals)):
if self.intervals[i].v is None:
u = [v for v in self.i_vars if v not in v_used]
if len(u) == 0:
raise ValueError("length should not be equal to 0")
self.intervals[i].v = u[0]
v_used.append(u[0])
@staticmethod
def _interpret_args(args):
interval_wrong_order = "PlotInterval %s was given before any function(s)."
interpret_error = "Could not interpret %s as a function or interval."
functions, intervals = [], []
if isinstance(args[0], GeometryEntity):
for coords in list(args[0].arbitrary_point()):
functions.append(coords)
intervals.append(PlotInterval.try_parse(args[0].plot_interval()))
else:
for a in args:
i = PlotInterval.try_parse(a)
if i is not None:
if len(functions) == 0:
raise ValueError(interval_wrong_order % (str(i)))
else:
intervals.append(i)
else:
if is_sequence(a, include=str):
raise ValueError(interpret_error % (str(a)))
try:
f = sympify(a)
functions.append(f)
except TypeError:
raise ValueError(interpret_error % str(a))
return functions, intervals
@staticmethod
def _extract_options(args, kwargs):
newkwargs, newargs = {}, []
for a in args:
if isinstance(a, string_types):
newkwargs = dict(newkwargs, **parse_option_string(a))
else:
newargs.append(a)
newkwargs = dict(newkwargs, **kwargs)
return newargs, newkwargs
def var_count_error(is_independent, is_plotting):
"""
Used to format an error message which differs
slightly in 4 places.
"""
if is_plotting:
v = "Plotting"
else:
v = "Registering plot modes"
if is_independent:
n, s = PlotMode._i_var_max, "independent"
else:
n, s = PlotMode._d_var_max, "dependent"
return ("%s with more than %i %s variables "
"is not supported.") % (v, n, s)
| {
"content_hash": "17212f13f1da8168da378f85681ece4e",
"timestamp": "",
"source": "github",
"line_count": 401,
"max_line_length": 82,
"avg_line_length": 35.413965087281795,
"alnum_prop": 0.5199633828603619,
"repo_name": "kaushik94/sympy",
"id": "f902d8aeef934d13d1f0515fede3cc15622496be",
"size": "14201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/plotting/pygletplot/plot_mode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""__init__.py
The resumepy package.
"""
from .exceptions import resumepyException # noqa
from .exceptions import CreateDirError # noqa
from .exceptions import CreateFileError # noqa
from .exceptions import DirError # noqa
from .exceptions import FileError # noqa
from .process import create_parser_letter
from .process import process_pdf_letter
from .process import create_parser_resume
from .process import process_html_resume
from .process import process_pdf_resume
from .process import process_text_resume
from .process import resumepy_path
from .utils import copy_example
__all__ = ['create_parser_letter', 'process_pdf_letter',
'create_parser_resume', 'process_html_resume',
'process_pdf_resume', 'process_text_resume',
'resumepy_path', 'copy_example']
| {
"content_hash": "e9fe39b791dbddb4413e1d4e8305e128",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 58,
"avg_line_length": 32.12,
"alnum_prop": 0.7397260273972602,
"repo_name": "cstrelioff/resumepy",
"id": "3d10d88effbd81485458aa28961e1812ef536092",
"size": "991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resumepy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6112"
},
{
"name": "Python",
"bytes": "19005"
},
{
"name": "TeX",
"bytes": "15880"
}
],
"symlink_target": ""
} |
r"""
From my blog post:
<http://tanghaibao.blogspot.com/2010/02/getting-phylogeny-from-list-of.html>
Example:
>>> mylist = [3702, 3649, 3694, 3880]
>>> t = TaxIDTree(mylist)
>>> print t
(((Carica_papaya,Arabidopsis_thaliana)Brassicales,(Medicago_truncatula,Populus_trichocarpa)fabids)rosids);
>>> t.print_tree()
<BLANKLINE>
/-Carica_papaya
/---|
| \-Arabidopsis_thaliana
---- /---|
| /-Medicago_truncatula
\---|
\-Populus_trichocarpa
"""
import sys
import time
import logging
from functools import lru_cache
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from ete3 import Tree
from ClientForm import ParseResponse
from BeautifulSoup import BeautifulSoup
from jcvi.apps.base import OptionParser, ActionDispatcher
URL = "http://itol.embl.de/other_trees.shtml"
class TaxIDTree(object):
def __init__(self, list_of_taxids):
# If only one taxid provided, get full tree with nameExp
# else, get default tree
if isinstance(list_of_taxids, int): # single taxon
list_of_taxids = [list_of_taxids]
form_element_id = "nameExp"
else:
form_element_id = "nameCol"
# the data to send in
form_data = "\n".join(str(x) for x in list_of_taxids)
success = False
while not success:
try:
response = urlopen(URL)
success = True
except (URLError, HTTPError, RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
forms = ParseResponse(response, backwards_compat=False)
form = forms[0]
form["ncbiIDs"] = form_data
page = urlopen(form.click()).read()
soup = BeautifulSoup(page)
self.newick = ""
for element in soup("textarea"):
if element["id"] == form_element_id:
self.newick = str(element.contents[0])
if self.newick == "":
print(soup)
def __str__(self):
return self.newick
def print_tree(self):
t = Tree(self.newick, format=8)
print(t)
def get_names(list_of_taxids):
"""
>>> mylist = [3702, 3649, 3694, 3880]
>>> get_names(mylist)
['Arabidopsis thaliana', 'Carica papaya', 'Populus trichocarpa', 'Medicago truncatula']
"""
from jcvi.apps.fetch import batch_taxonomy
list_of_taxids = [str(x) for x in list_of_taxids]
return list(batch_taxonomy(list_of_taxids))
def get_taxids(list_of_names):
"""
>>> mylist = ['Arabidopsis thaliana', 'Carica papaya']
>>> get_taxids(mylist)
[1, 2]
"""
from jcvi.apps.fetch import batch_taxids
return [int(x) for x in batch_taxids(list_of_names)]
def MRCA(list_of_taxids):
"""
This gets the most recent common ancester (MRCA) for a list of taxids
>>> mylist = [3702, 3649, 3694, 3880]
>>> MRCA(mylist)
'rosids'
"""
t = TaxIDTree(list_of_taxids)
t = Tree(str(t), format=8)
ancestor = t.get_common_ancestor(*t.get_leaves())
return ancestor.name
@lru_cache(maxsize=None)
def isPlantOrigin(taxid):
"""
Given a taxid, this gets the expanded tree which can then be checked to
see if the organism is a plant or not
>>> isPlantOrigin(29760)
True
"""
assert isinstance(taxid, int)
t = TaxIDTree(taxid)
try:
return "Viridiplantae" in str(t)
except AttributeError:
raise ValueError("{0} is not a valid ID".format(taxid))
def main():
actions = (
("newick", "query a list of IDs to newick"),
("test", "test taxonomy module"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def test(args):
print("Testing isPlantOrigin():")
print(3702, isPlantOrigin(3702)) # Arabidopsis thaliana
print(10090, isPlantOrigin(10090)) # Mus musculus
print("\nTest cache by 10K calls:")
for i in range(10000):
isPlantOrigin(3702)
isPlantOrigin(10090)
print("done")
print("\nTest invalid ID:")
print(10099, isPlantOrigin(10099)) # Wrong ID
def newick(args):
"""
%prog newick idslist
Query a list of IDs to retrieve phylogeny.
"""
p = OptionParser(newick.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(idsfile,) = args
mylist = [x.strip() for x in open(idsfile) if x.strip()]
print(get_taxids(mylist))
t = TaxIDTree(mylist)
print(t)
if __name__ == "__main__":
main()
| {
"content_hash": "50161a7cec6a654830a83784bf18a7d2",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 106,
"avg_line_length": 23.434343434343436,
"alnum_prop": 0.5987068965517242,
"repo_name": "tanghaibao/jcvi",
"id": "50435dceb6140304dc1e9b73fc83e3801c9260d7",
"size": "4687",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jcvi/utils/taxonomy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Cython",
"bytes": "10467"
},
{
"name": "Dockerfile",
"bytes": "1150"
},
{
"name": "Makefile",
"bytes": "445"
},
{
"name": "Python",
"bytes": "2635155"
}
],
"symlink_target": ""
} |
import os
import logging
import shutil
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
base_dir = os.path.dirname((os.path.realpath(__file__)))
def get_user_desktop():
"""
Find the user's desktop folder.
:return: Path to user's desktop as a string
"""
return os.path.join(os.path.expanduser("~"), "Desktop")
home = get_user_desktop()
default_dist_path = os.path.join(home, 'py_gallery_dist')
def make_dist_dir(path):
"""
Create the distribution directory and it's subdirectories.
:param path: Path to the distribution directory
:return: None
"""
if not os.path.exists(path):
os.mkdir(path)
else:
print("The specified destination already exists.")
def copy_resources_gui(temp_path, dest_path):
"""
Copy the javascript directories and their contents to the dist directory.
This function must be used for the GUI version.
:param temp_path: Path to the distribution directory
:param dest_path: Path to the destination directory
:return: None
"""
shutil.copytree("{0}/lightbox".format(temp_path), "{0}/lightbox".format(dest_path))
shutil.copytree("{0}/montage".format(temp_path), "{0}/montage".format(dest_path))
shutil.copytree("{0}/bootstrap-gallery".format(temp_path), "{0}/bootstrap-gallery".format(dest_path))
shutil.copytree("{0}/js".format(temp_path), "{0}/js".format(dest_path))
| {
"content_hash": "a023fb5a4b87b76337fff2b90f2e96af",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 105,
"avg_line_length": 30.244897959183675,
"alnum_prop": 0.6558704453441295,
"repo_name": "btnpushnmunky/pygallerycreator",
"id": "ac51d7b80fa23e9ffb0da46c890c5c0d230acd5f",
"size": "1482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygallerycreator/copier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16897"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.