code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for user in orm['auth.user'].objects.all():
notification = orm.Notification()
notification.user = user
notification.save()
def backwards(self, orm):
orm['notification.notification'].objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'notification.notification': {
'Meta': {'object_name': 'Notification'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notify_bug_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_invoices_to_send': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_late_invoices': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['notification']
|
fgaudin/aemanager
|
notification/migrations/0002_populate_users.py
|
Python
|
agpl-3.0
| 4,216
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('videos', '0007_auto_20151027_2338'),
]
operations = [
migrations.RemoveField(
model_name='video',
name='account',
),
migrations.RemoveField(
model_name='video',
name='events',
),
migrations.AddField(
model_name='video',
name='extra',
field=jsonfield.fields.JSONField(default={}),
),
]
|
palfrey/kitling
|
frontend/videos/migrations/0008_auto_20151028_1154.py
|
Python
|
agpl-3.0
| 633
|
import re
from django.conf import settings
from tincan import (
Activity,
ActivityDefinition,
LanguageMap
)
from xapi.patterns.base import BasePattern
from xapi.patterns.eco_verbs import (
LearnerCreatesWikiPageVerb,
LearnerEditsWikiPageVerb
)
class BaseWikiRule(BasePattern): # pylint: disable=abstract-method
def convert(self, evt, course_id):
title = None
obj = None
try:
# We need to do this because we receive a string instead than a dictionary
# event_data = json.loads(evt['event'])
event_data = evt['event']
title = event_data['POST'].get('title', None)
except: # pylint: disable=bare-except
pass
if title:
title = title[0] # from parametervalues to single value
verb = self.get_verb() # pylint: disable=no-member
obj = Activity(
id=self.fix_id(self.base_url, evt['context']['path']),
definition=ActivityDefinition(
name=LanguageMap({'en-US': title}),
type="http://www.ecolearning.eu/expapi/activitytype/wiki"
)
)
else:
verb = None # Skip the not really created pages
return verb, obj
class CreateWikiRule(BaseWikiRule, LearnerCreatesWikiPageVerb):
def match(self, evt, course_id):
return re.match(
'/courses/'+settings.COURSE_ID_PATTERN+'/wiki/_create/?',
evt['event_type'])
class EditWikiRule(BaseWikiRule, LearnerEditsWikiPageVerb):
def match(self, evt, course_id):
return re.match(
'/courses/'+settings.COURSE_ID_PATTERN+r'/wiki/\w+/_edit/?',
evt['event_type'])
|
marcore/pok-eco
|
xapi/patterns/manage_wiki.py
|
Python
|
agpl-3.0
| 1,746
|
#!/usr/bin/python
import pytest
from AnnotatorCore import *
def test_getgenesfromfusion():
AB_EXAMPLE = ('A', 'B')
assert getgenesfromfusion('A-B') == AB_EXAMPLE
assert getgenesfromfusion('A-B ') == AB_EXAMPLE
assert getgenesfromfusion('a-b') == ('a', 'b')
assert getgenesfromfusion('A') == ('A', 'A')
assert getgenesfromfusion('A1-1B') == ('A1', '1B')
# Test fusion case insensitive
assert getgenesfromfusion('A-B fusion') == AB_EXAMPLE
assert getgenesfromfusion('A-B Fusion') == AB_EXAMPLE
# Test unnecessary characters will be trimmed off after fusion
assert getgenesfromfusion('A-B fusion archer') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion Archer') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion -Archer') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion -archer') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion - archer') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion - archer ') == AB_EXAMPLE
assert getgenesfromfusion('A-B fusion test') == AB_EXAMPLE
assert getgenesfromfusion('fusion A-B fusion') == AB_EXAMPLE
# Test intragenic
assert getgenesfromfusion('MLL2-intragenic') == ('MLL2', 'MLL2')
def test_conversion():
# Test conversion case for case insensitivity
assert conversion('tyr100') == 'Y100'
assert conversion('tYr100') == 'Y100'
assert conversion('Tyr100') == 'Y100'
assert conversion('tyR100') == 'Y100'
assert conversion('TyR100') == 'Y100'
assert conversion('TYR100') == 'Y100'
assert conversion('tYR100') == 'Y100'
assert conversion('sEr100') == 'S100'
# Test conversion only targets dict() keys
assert conversion('hot100') == 'hot100'
# Test conversion is not affected by empty string and whitespaces
assert conversion('') == ''
assert conversion(' sEr100') == ' S100'
# Test conversion when the string contains three letter but not supposed to be converted
assert conversion('Promoter') == 'Promoter'
def test_replace_all():
# Test replace_all for case insensitivity
assert replace_all('tyr') == 'Y'
assert replace_all('tYr') == 'Y'
assert replace_all('Tyr') == 'Y'
assert replace_all('tyR') == 'Y'
assert replace_all('TyR') == 'Y'
assert replace_all('TYR') == 'Y'
assert replace_all('tYR') == 'Y'
assert replace_all('sEr') == 'S'
# Test replace_all only targets the dict() keys
assert replace_all('bubblegum juice cup dairy hot pot Tyr melon') == 'bubblegum juice cup dairy hot pot Y melon'
assert replace_all('Ly Lys Pr Pro Gln Glad Ph PH Phe') == 'Ly K Pr P Q Glad Ph PH F'
assert replace_all(
'nOt can fat Tan Rat cat dog man Men FAn rot taR car fAr map TAP Zip poP') == 'nOt can fat Tan Rat cat dog man Men FAn rot taR car fAr map TAP Zip poP'
# Test replace_all is not affected by numbers
assert replace_all('Tyr600E Cys56734342342454562456') == 'Y600E C56734342342454562456'
assert replace_all(
'60 045 434 345 4 26 567 254 245 34 67567 8 56 8 364 56 6 345 7567 3455 6 8 99 89 7 3') == '60 045 434 345 4 26 567 254 245 34 67567 8 56 8 364 56 6 345 7567 3455 6 8 99 89 7 3'
# Test replace_all is not affected by empty string and whitespaces
assert replace_all('') == ''
assert replace_all(' ') == ' '
assert replace_all('Tyr Asn As n Ile Il e') == 'Y N As n I Il e'
def test_resolve_query_type():
assert resolve_query_type(None, [HGVSG_HEADER]) == QueryType.HGVSG
assert resolve_query_type(None, [HGVSP_HEADER]) == QueryType.HGVSP
assert resolve_query_type(None, [HGVSP_SHORT_HEADER]) == QueryType.HGVSP_SHORT
assert resolve_query_type(None, [HGVSG_HEADER, HGVSP_HEADER, HGVSP_SHORT_HEADER]) == QueryType.HGVSP_SHORT
assert resolve_query_type(None, [GC_CHROMOSOME_HEADER, GC_START_POSITION_HEADER, GC_END_POSITION_HEADER,
GC_REF_ALLELE_HEADER, GC_VAR_ALLELE_1_HEADER,
GC_VAR_ALLELE_2_HEADER]) == QueryType.GENOMIC_CHANGE
assert resolve_query_type(QueryType.HGVSG, [HGVSG_HEADER, HGVSP_HEADER, HGVSP_SHORT_HEADER]) == QueryType.HGVSG
# Test extreme cases
with pytest.raises(Exception):
assert resolve_query_type(None, [])
assert resolve_query_type(None, [ALTERATION_HEADER]) == QueryType.HGVSP_SHORT
# Raise exception when the file does not have asked header
with pytest.raises(Exception):
assert resolve_query_type(QueryType.HGVSG, [HGVSP_SHORT_HEADER])
with pytest.raises(Exception):
assert resolve_query_type(QueryType.GENOMIC_CHANGE, [GC_CHROMOSOME_HEADER, GC_START_POSITION_HEADER])
|
oncokb/oncokb-annotator
|
test_AnnotatorCore.py
|
Python
|
agpl-3.0
| 4,654
|
# ogf4py3
# Copyright (C) 2017 Oscar Triano @cat_dotoscat
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from math import fabs
class Body(object):
"""Basic component for physics.
You can enable or disable gravity for this :class:`Body` with the
attribute *gravity*.
"""
def __init__(self, gravity=False, max_falling_speed=0., max_ascending_speed=0.):
self.x = 0.0
self.y = 0.0
self.vel_x = 0.0
self.vel_y = 0.0
self.max_falling_speed = max_falling_speed
self.max_ascending_speed = max_ascending_speed
self.gravity = gravity
def update(self, dt, g_force=0.):
if self.gravity: self.vel_y += g_force*dt
if self.vel_y < 0. and fabs(self.vel_y) > self.max_falling_speed > 0.:
self.vel_y = -self.max_falling_speed
elif self.vel_y > self.max_ascending_speed > 0.:
self.vel_y = self.max_ascending_speed
self.x += self.vel_x*dt
self.y += self.vel_y*dt
def apply_force(self, dt, x=0., y=0.):
self.vel_x += x*dt
self.vel_y += y*dt
class Collision(object):
"""Rect collision.
Attributes:
x (Float):
y (Float):
type (Int): Type of collision.
collides_with (Int): Use this as flag of *type*
width (Float):
height (Float):
offset (Tuple(x, y)): offset to respect a point. For instance a Body's position.
"""
@property
def right(self):
return self.x + self.width
@property
def top(self):
return self.y + self.height
def __init__(self, x=0., y=0., type_=0, collides_with=0, width=0., height=0., offset=(0, 0)):
self.x = x
self.y = y
self.width = width
self.height = height
self.type_ = type_
self.offset = offset
self.collides_with = collides_with
def update(self, x, y):
self.x = x + self.offset[0]
self.y = y + self.offset[1]
def intersects(self, b):
if b.y >= self.top: return False # top
if b.top <= self.y: return False # bottom
if b.right <= self.x: return False # left
if b.x >= self.right: return False # right
return True
def __contains__(self, pair):
return self.x <= pair[0] <= self.right and self.y <= pair[1] <= self.top
class Platform(Collision):
"""This collision component is specific for platform collisions.
Returns:
An instance of Platform.
Attributes:
platform (Entity or None): This is the last platform which this entity's component has touched.
touch_floor (Bool): Tells if *platform* is not None.
"""
FOOT = 1000
PLATFORM = 1001
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.platform = None
@property
def touch_floor(self):
return self.platform is not None
def reset(self):
self.platform = None
@staticmethod
def get_foot(*args, **kwargs):
collision = Platform(*args, **kwargs)
collision.type_ = Platform.FOOT
return collision
@staticmethod
def get_platform(*args, **kwargs):
collision = Platform(*args, **kwargs)
collision.type_ = Platform.PLATFORM
return collision
class Timer:
def __init__(self, time):
self.time = 0.
self.max_time = time
@property
def done(self):
return self.time >= self.max_time
|
dotoscat/Polytank-ASIR
|
polytanks/ogf4py3/component.py
|
Python
|
agpl-3.0
| 4,129
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class ProductApphook(CMSApp):
name = _("Product Apphook")
urls = ["wlansi_store.urls"]
apphook_pool.register(ProductApphook)
|
matevzmihalic/wlansi-store
|
wlansi_store/cms_app.py
|
Python
|
agpl-3.0
| 264
|
from decimal import Decimal
import ddt
from babel.numbers import format_currency
from django.conf import settings
from django.utils.translation import get_language, to_locale
from oscar.core.loading import get_model
from oscar.test.factories import * # pylint:disable=wildcard-import,unused-wildcard-import
from ecommerce.courses.tests.factories import CourseFactory
from ecommerce.extensions.catalogue.tests.mixins import CourseCatalogTestMixin
from ecommerce.extensions.offer.utils import _remove_exponent_and_trailing_zeros, format_benefit_value
from ecommerce.tests.testcases import TestCase
Benefit = get_model('offer', 'Benefit')
@ddt.ddt
class UtilTests(CourseCatalogTestMixin, TestCase):
def setUp(self):
super(UtilTests, self).setUp()
self.course = CourseFactory()
self.verified_seat = self.course.create_or_update_seat('verified', False, 100, self.partner)
self.stock_record = StockRecord.objects.filter(product=self.verified_seat).first()
self.seat_price = self.stock_record.price_excl_tax
self._range = RangeFactory(products=[self.verified_seat, ])
self.percentage_benefit = BenefitFactory(type=Benefit.PERCENTAGE, range=self._range, value=35.00)
self.value_benefit = BenefitFactory(type=Benefit.FIXED, range=self._range, value=self.seat_price - 10)
def test_format_benefit_value(self):
""" format_benefit_value(benefit) should format benefit value based on benefit type """
benefit_value = format_benefit_value(self.percentage_benefit)
self.assertEqual(benefit_value, '35%')
benefit_value = format_benefit_value(self.value_benefit)
expected_benefit = format_currency(
Decimal((self.seat_price - 10)), settings.OSCAR_DEFAULT_CURRENCY, format=u'#,##0.00',
locale=to_locale(get_language()))
self.assertEqual(benefit_value, '${expected_benefit}'.format(expected_benefit=expected_benefit))
@ddt.data(
('1.0', '1'),
('5000.0', '5000'),
('1.45000', '1.45'),
('5000.40000', '5000.4'),
)
@ddt.unpack
def test_remove_exponent_and_trailing_zeros(self, value, expected):
"""
_remove_exponent_and_trailing_zeros(decimal) should remove exponent and trailing zeros
from decimal number
"""
decimal = _remove_exponent_and_trailing_zeros(Decimal(value))
self.assertEqual(decimal, Decimal(expected))
|
mferenca/HMS-ecommerce
|
ecommerce/extensions/offer/tests/test_utils.py
|
Python
|
agpl-3.0
| 2,446
|
import sys
import zmq
import tnetstring
command_uri = sys.argv[1]
sock = zmq.Context.instance().socket(zmq.REQ)
sock.connect(command_uri)
req = {'method': 'recover'}
sock.send(tnetstring.dumps(req))
resp = tnetstring.loads(sock.recv())
if not resp.get('success'):
raise ValueError('request failed: %s' % resp)
|
fanout/pushpin
|
tools/recover.py
|
Python
|
agpl-3.0
| 315
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'MacadjanUserProfile'
db.create_table(u'macadjan_macadjanuserprofile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='macadjan_profile', unique=True, to=orm['auth.User'])),
('map_source', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='user_profiles', null=True, on_delete=models.SET_NULL, to=orm['macadjan.MapSource'])),
))
db.send_create_signal(u'macadjan', ['MacadjanUserProfile'])
def backwards(self, orm):
# Deleting model 'MacadjanUserProfile'
db.delete_table(u'macadjan_macadjanuserprofile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'macadjan.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'marker_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'macadjan.entitytag': {
'Meta': {'ordering': "['collection', 'name']", 'object_name': 'EntityTag'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': u"orm['macadjan.TagCollection']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'macadjan.entitytype': {
'Meta': {'ordering': "['name']", 'object_name': 'EntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'macadjan.macadjanuserprofile': {
'Meta': {'object_name': 'MacadjanUserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map_source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_profiles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['macadjan.MapSource']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'macadjan_profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'macadjan.mapsource': {
'Meta': {'ordering': "['name']", 'object_name': 'MapSource'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}),
'web': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'macadjan.siteinfo': {
'Meta': {'ordering': "['website_name']", 'object_name': 'SiteInfo'},
'additional_info_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'additional_info_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'address_1_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address_2_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alias_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'change_proposal_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_proposal_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'city_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact_person_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact_phone_1_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'contact_phone_2_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'country_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'creation_year_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'email_2_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'entity_change_proposal_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fax_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'finances_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'finances_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'footer_line': ('django.db.models.fields.TextField', [], {}),
'goals_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'goals_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'how_to_access_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'how_to_access_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'legal_form_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'longitude_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'map_bounds_bottom': ('django.db.models.fields.FloatField', [], {'default': '-20037508.34'}),
'map_bounds_left': ('django.db.models.fields.FloatField', [], {'default': '-20037508.34'}),
'map_bounds_right': ('django.db.models.fields.FloatField', [], {'default': '20037508.34'}),
'map_bounds_top': ('django.db.models.fields.FloatField', [], {'default': '20037508.34'}),
'map_initial_lat': ('django.db.models.fields.FloatField', [], {}),
'map_initial_lon': ('django.db.models.fields.FloatField', [], {}),
'map_initial_zoom': ('django.db.models.fields.IntegerField', [], {}),
'map_max_resolution': ('django.db.models.fields.IntegerField', [], {'default': '156543'}),
'map_units': ('django.db.models.fields.CharField', [], {'default': "'meters'", 'max_length': '50'}),
'map_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '18'}),
'needs_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'needs_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'networks_member_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'networks_member_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'networks_works_with_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'networks_works_with_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'new_entity_proposal_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'new_entity_proposal_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'new_entity_proposal_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'offerings_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'offerings_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'ongoing_projects_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ongoing_projects_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'proponent_comment_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'proponent_email_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'proposal_bottom_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'province_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'site_info'", 'unique': 'True', 'to': u"orm['sites.Site']"}),
'social_values_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'social_values_hints': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'subcategories_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'summary_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'web_2_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'web_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'website_description': ('django.db.models.fields.TextField', [], {}),
'website_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'website_subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'zipcode_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'zone_field_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'macadjan.subcategory': {
'Meta': {'ordering': "['category', 'name']", 'object_name': 'SubCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subcategories'", 'to': u"orm['macadjan.Category']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'marker_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'macadjan.tagcollection': {
'Meta': {'ordering': "['name']", 'object_name': 'TagCollection'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['macadjan']
|
hirunatan/macadjan
|
macadjan/migrations/0005_auto__add_macadjanuserprofile.py
|
Python
|
agpl-3.0
| 15,963
|
# -*- coding: utf-8 -*-
"""Setup the SkyLines application"""
from faker import Faker
from skylines.model import User
def test_admin():
u = User()
u.first_name = u'Example'
u.last_name = u'Manager'
u.email_address = u'manager@somedomain.com'
u.password = u.original_password = u'managepass'
u.admin = True
return u
def test_user():
u1 = User()
u1.first_name = u'Example'
u1.last_name = u'User'
u1.email_address = u'example@test.de'
u1.password = u1.original_password = u'test'
u1.tracking_key = 123456
u1.tracking_delay = 2
return u1
def test_users(n=50):
fake = Faker(locale='de_DE')
fake.seed(42)
users = []
for i in xrange(n):
u = User()
u.first_name = fake.first_name()
u.last_name = fake.last_name()
u.email_address = fake.email()
u.password = u.original_password = fake.password()
u.tracking_key = fake.random_number(digits=6)
users.append(u)
return users
|
kerel-fs/skylines
|
tests/data/users.py
|
Python
|
agpl-3.0
| 1,007
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import superdesk
import urllib3
import urllib
import xml.etree.ElementTree as etree
from superdesk.io.iptc import subject_codes
from datetime import datetime
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, ITEM_STATE, CONTENT_STATE
from superdesk.utc import utc
from superdesk.io.commands.update_ingest import process_iptc_codes
from superdesk.etree import get_text_word_count
# The older content does not contain an anpa category, so we derive it from the
# publication name
pubnames = {
'International Sport': 'S',
'Racing': 'R',
'Parliamentary Press Releases': 'P',
'Features': 'C',
'Financial News': 'F',
'General': 'A',
'aap Features': 'C',
'aap International News': 'I',
'aap Australian Sport': 'S',
'Australian General News': 'A',
'Asia Pulse Full': 'I',
'AFR Summary': 'A',
'Australian Sport': 'T',
'PR Releases': 'J',
'Entertainment News': 'E',
'Special Events': 'Y',
'Asia Pulse': 'I',
'aap International Sport': 'S',
'Emergency Services': 'A',
'BRW Summary': 'A',
'FBM Summary': 'A',
'aap Australian General News': 'A',
'International News': 'I',
'aap Financial News': 'F',
'Asia Pulse Basic': 'I',
'Political News': 'P',
'Advisories': 'V'
}
class AppImportTextArchiveCommand(superdesk.Command):
option_list = (
superdesk.Option('--start', '-strt', dest='start_id', required=False),
superdesk.Option('--user', '-usr', dest='user', required=True),
superdesk.Option('--password', '-pwd', dest='password', required=True),
superdesk.Option('--url_root', '-url', dest='url', required=True),
superdesk.Option('--query', '-qry', dest='query', required=True),
superdesk.Option('--count', '-c', dest='limit', required=False)
)
def run(self, start_id, user, password, url, query, limit):
print('Starting text archive import at {}'.format(start_id))
self._user = user
self._password = password
self._id = int(start_id)
self._url_root = url
self._query = urllib.parse.quote(query)
if limit is not None:
self._limit = int(limit)
else:
self._limit = None
self._api_login()
x = self._get_bunch(self._id)
while x:
self._process_bunch(x)
x = self._get_bunch(self._id)
if self._limit is not None and self._limit <= 0:
break
print('finished text archive import')
def _api_login(self):
self._http = urllib3.PoolManager()
credentials = '?login[username]={}&login[password]={}'.format(self._user, self._password)
url = self._url_root + credentials
r = self._http.urlopen('GET', url, headers={'Content-Type': 'application/xml'})
self._headers = {'cookie': r.getheader('set-cookie')}
self._anpa_categories = superdesk.get_resource_service('vocabularies').find_one(req=None, _id='categories')
def _get_bunch(self, id):
url = self._url_root + \
'archives/txtarch?search_docs[struct_query]=(DCDATA_ID<{0})&search_docs[query]='.format(id)
url += self._query
url += '&search_docs[format]=full&search_docs[pagesize]=500&search_docs[page]=1'
url += '&search_docs[sortorder]=DCDATA_ID%20DESC'
print(url)
retries = 3
while retries > 0:
r = self._http.request('GET', url, headers=self._headers)
if r.status == 200:
e = etree.fromstring(r.data)
# print(str(r.data))
count = int(e.find('doc_count').text)
if count > 0:
print('count : {}'.format(count))
return e
else:
self._api_login()
retries -= 1
return None
def _get_head_value(self, doc, field):
el = doc.find('dcdossier/document/head/' + field)
if el is not None:
return el.text
return None
def _addkeywords(self, key, doc, item):
code = self._get_head_value(doc, key)
if code:
if 'keywords' not in item:
item['keywords'] = []
item['keywords'].append(code)
def _process_bunch(self, x):
# x.findall('dc_rest_docs/dc_rest_doc')[0].get('href')
for doc in x.findall('dc_rest_docs/dc_rest_doc'):
print(doc.get('href'))
id = doc.find('dcdossier').get('id')
if int(id) < self._id:
self._id = int(id)
item = {}
item['guid'] = doc.find('dcdossier').get('guid')
# if the item has been modified in the archive then it is due to a kill
# there is an argument that this item should not be imported at all
if doc.find('dcdossier').get('created') != doc.find('dcdossier').get('modified'):
item[ITEM_STATE] = CONTENT_STATE.KILLED
else:
item[ITEM_STATE] = CONTENT_STATE.PUBLISHED
value = datetime.strptime(self._get_head_value(doc, 'PublicationDate'), '%Y%m%d%H%M%S')
item['firstcreated'] = utc.normalize(value) if value.tzinfo else value
item['versioncreated'] = item['firstcreated']
item['unique_id'] = doc.find('dcdossier').get('unique')
item['ingest_id'] = id
item['source'] = self._get_head_value(doc, 'Agency')
self._addkeywords('AsiaPulseCodes', doc, item)
byline = self._get_head_value(doc, 'Byline')
if byline:
item['byline'] = byline
# item['service'] = self._get_head_value(doc,'Service')
category = self._get_head_value(doc, 'Category')
if not category:
publication_name = self._get_head_value(doc, 'PublicationName')
if publication_name in pubnames:
category = pubnames[publication_name]
if category:
anpacategory = {}
anpacategory['qcode'] = category
for anpa_category in self._anpa_categories['items']:
if anpacategory['qcode'].lower() == anpa_category['qcode'].lower():
anpacategory = {'qcode': anpacategory['qcode'], 'name': anpa_category['name']}
break
item['anpa_category'] = [anpacategory]
self._addkeywords('CompanyCodes', doc, item)
type = self._get_head_value(doc, 'Format')
if type == 'x':
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
elif type == 't':
item[ITEM_TYPE] = CONTENT_TYPE.PREFORMATTED
else:
item[ITEM_TYPE] = CONTENT_TYPE.TEXT
item['keyword'] = self._get_head_value(doc, 'Keyword')
item['ingest_provider_sequence'] = self._get_head_value(doc, 'Sequence')
orginal_source = self._get_head_value(doc, 'Author')
if orginal_source:
item['original_source'] = orginal_source
item['headline'] = self._get_head_value(doc, 'Headline')
code = self._get_head_value(doc, 'SubjectRefNum')
if code and len(code) == 7:
code = '0' + code
if code and code in subject_codes:
item['subject'] = []
item['subject'].append({'qcode': code, 'name': subject_codes[code]})
try:
process_iptc_codes(item, None)
except:
pass
slug = self._get_head_value(doc, 'SLUG')
if slug:
item['slugline'] = slug
else:
item['slugline'] = self._get_head_value(doc, 'Keyword')
# self._addkeywords('Takekey', doc, item)
take_key = self._get_head_value(doc, 'Takekey')
if take_key:
item['anpa_take_key'] = take_key
self._addkeywords('Topic', doc, item)
self._addkeywords('Selectors', doc, item)
el = doc.find('dcdossier/document/body/BodyText')
if el is not None:
story = el.text
if item[ITEM_TYPE] == CONTENT_TYPE.TEXT:
story = story.replace('\n ', '<br><br>')
story = story.replace('\n', '<br>')
item['body_html'] = story
else:
item['body_html'] = story
try:
item['word_count'] = get_text_word_count(item['body_html'])
except:
pass
item['pubstatus'] = 'usable'
item['allow_post_publish_actions'] = False
res = superdesk.get_resource_service('published')
original = res.find_one(req=None, guid=item['guid'])
if not original:
res.post([item])
else:
res.patch(original['_id'], item)
if self._limit:
self._limit -= 1
# print(item)
superdesk.command('app:import_text_archive', AppImportTextArchiveCommand())
|
amagdas/superdesk
|
server/apps/aap/import_text_archive/commands.py
|
Python
|
agpl-3.0
| 9,503
|
import argparse
import os.path
import config
import definitions
import sa_util
import csv_util
def import_module(args):
from dependencies import dependencies_manager
tables = []
for module in args.module:
if not args.updateonly:
definitions.get_importer(module)(verbose=args.verbose)
tables += definitions.get_tables(module)
deps = dependencies_manager.updates_for(tables, include=False)
if not args.noupdate:
sa_util.build_views_and_summaries(
items=deps,
verbose=args.verbose,
)
def build_views_summaries(args):
sa_util.build_views_and_summaries(
items=args.module,
all=args.all,
verbose=args.verbose,
force=args.force,
dependencies=not args.no_dependants,
)
def sql(args):
from definitions import get_definition
for item in args.items:
print '\n\nSQL for %s\n' % item
info = get_definition(item)
sql = info.get('sql')
tables = info.get('tables')
ts = {}
for index, table in enumerate(tables):
ts['t%s' % (index + 1)] = '"%s"' % table
print sql.format(**ts)
def deps(args):
from dependencies import dependencies_manager
for item in args.items:
print 'Dependencies for %s' % item
print dependencies_manager.get_needed_updates(item)
def clear_views(args):
for view in sa_util.view_list():
sa_util.drop_table_or_view(view, force=True)
def recreate_views(args):
from dependencies import dependencies_manager
views = definitions.defined_views()
existing_views = sa_util.view_list()
updates = dependencies_manager.updates_for(views)
needed = []
for update in updates:
if update in views:
if update in existing_views:
continue
needed.append(update)
needed.reverse()
print needed
sa_util.build_views_and_summaries(
items=needed,
dependencies=False,
verbose=args.verbose,
)
sa_util.swap_tables(verbose=args.verbose)
def clean_db(args):
sa_util.clear_temp_objects(verbose=args.verbose)
tables = sorted(list(
set(sa_util.table_view_list())
- set(definitions.defined_tables())
- set(sa_util.dependent_objects())
))
print 'Unknown tables'
for table in tables:
print '\t%s' % table
for table in tables:
response = raw_input('Delete table `%s` [No/yes/quit]:' % table)
if response and response.upper()[0] == 'Y':
sa_util.drop_table_or_view(table, verbose=args.verbose)
if response and response.upper()[0] == 'Q':
return
def export_all(verbose=False):
if verbose:
print('Exporting all tables')
csv_util.dump_all(verbose=verbose)
def export_custom(verbose=False):
if verbose:
print('Exporting custom tables')
import custom_output
def db_functions(verbose=False):
if verbose:
print('Creating db functions')
import postgres_functions
def import_csv(args):
verbose = args.verbose
filename = args.filename
tablename = args.tablename
delimiter = args.delimiter
filename = os.path.join(config.DATA_PATH, 'import', filename)
if delimiter == '\\t':
delimiter = '\t'
if not tablename:
tablename = os.path.splitext(os.path.basename(filename))[0]
if verbose:
print('Importing %s' % args.filename)
csv_util.import_single(
filename,
tablename,
encoding=args.encoding,
delimiter=delimiter,
verbose=verbose
)
sa_util.swap_tables(verbose=verbose)
def webserver(args):
from munge.app import app
app.run(debug=True)
def main():
commands = [
'export_all',
'export_custom',
'web',
'clean_db',
'db_functions',
'clear_views',
'recreate_views',
]
parser = argparse.ArgumentParser(
description='Command line interface for munge'
)
parser.add_argument('-v', '--verbose', action='count', default=0)
subparsers = parser.add_subparsers(help='commands', dest='command')
for command in commands:
subparsers.add_parser(command)
import_csv_parser = subparsers.add_parser('import_csv')
import_csv_parser.add_argument("--encoding", default='utf-8')
import_csv_parser.add_argument("--delimiter", default=',')
import_csv_parser.add_argument('--tablename', default=None)
import_csv_parser.add_argument('filename')
swap_temp_parser = subparsers.add_parser('swap_temp')
swap_temp_parser.add_argument('-f', '--force', action="store_true")
module_commands = [
'import',
'summaries',
]
for command in module_commands:
module_parser = subparsers.add_parser(command)
module_parser.add_argument('-f', '--force', action="store_true")
module_parser.add_argument('-d', '--no-dependants', action="store_true")
module_parser.add_argument('-a', '--all', action="store_true")
module_parser.add_argument('-t', '--test', action="store_true")
module_parser.add_argument('-n', '--noupdate', action="store_true")
module_parser.add_argument('-u', '--updateonly', action="store_true")
module_parser.add_argument('-s', '--stage', default=0, type=int)
module_parser.add_argument('module', nargs='*')
dep_parser = subparsers.add_parser('deps')
dep_parser.add_argument('items', nargs='*')
dep_parser = subparsers.add_parser('sql')
dep_parser.add_argument('items', nargs='*')
args = parser.parse_args()
if args.command == 'deps':
deps(args)
if args.command == 'sql':
sql(args)
if args.command == 'export_all':
export_all(verbose=args.verbose)
if args.command == 'export_all':
export_all(verbose=args.verbose)
elif args.command == 'import':
import_module(args)
sa_util.swap_tables(verbose=args.verbose)
elif args.command == 'swap_temp':
sa_util.swap_tables(verbose=args.verbose, force=args.force)
elif args.command == 'summaries':
build_views_summaries(args)
if not args.noupdate:
sa_util.swap_tables(verbose=args.verbose, force=args.force)
elif args.command == 'export_custom':
export_custom(verbose=args.verbose)
elif args.command == 'import_csv':
import_csv(args)
elif args.command == 'web':
webserver(args)
elif args.command == 'clean_db':
clean_db(args)
elif args.command == 'clear_views':
clear_views(args)
elif args.command == 'recreate_views':
recreate_views(args)
elif args.command == 'db_functions':
db_functions(verbose=args.verbose)
|
tobes/munge
|
munge/cli.py
|
Python
|
agpl-3.0
| 6,778
|
from . import tnt_config
from . import delivery
from . import stock
from . import carrier_file
|
factorlibre/carrier-delivery
|
delivery_carrier_tnt/model/__init__.py
|
Python
|
agpl-3.0
| 95
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (C) 2015 Didotech srl (<http://www.didotech.com>).
#
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale Order Analysis",
"version": "3.1.1.2",
"author": "Didotech SRL",
"website": "http://www.didotech.com",
"category": "Sales Management",
"description": """
Module permits to create a simple analysis on sale shop based on date, sales team, user
""",
"depends": [
'sale_order_confirm',
],
"data": [
'sale/sale_shop_view.xml'
],
"active": False,
"installable": True,
}
|
iw3hxn/LibrERP
|
sale_order_analysis/__openerp__.py
|
Python
|
agpl-3.0
| 1,472
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import fields, models
class Event(models.Model):
_inherit = 'myo.event'
annotation_ids = fields.Many2many(
'myo.annotation',
'myo_event_annotation_rel',
'event_id',
'annotation_id',
'Annotations'
)
class Annotation(models.Model):
_inherit = 'myo.annotation'
event_ids = fields.Many2many(
'myo.event',
'myo_event_annotation_rel',
'annotation_id',
'event_id',
'Events'
)
|
MostlyOpen/odoo_addons
|
myo_event/models/annotation.py
|
Python
|
agpl-3.0
| 1,400
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import unittest
import intelmq.lib.test as test
from intelmq.bots.parsers.dragonresearchgroup.parser_ssh import \
DragonResearchGroupSSHParserBot
class TestDragonResearchGroupSSHParserBot(test.BotTestCase, unittest.TestCase):
"""
A TestCase for DragonResearchGroupSSHParserBot.
"""
@classmethod
def set_bot(self):
self.bot_reference = DragonResearchGroupSSHParserBot
self.default_input_message = json.dumps({'__type': 'Report'})
if __name__ == '__main__':
unittest.main()
|
sch3m4/intelmq
|
intelmq/tests/bots/parsers/dragonresearchgroup/test_parser_ssh.py
|
Python
|
agpl-3.0
| 598
|
from pyramid.security import ALL_PERMISSIONS, Allow, Authenticated
from .models import DBSession, Distro, Package, Upstream, User
from uptrack.schemas import DistroSchema, UpstreamSchema, UserSchema
resources = {}
class RootFactory(object):
__name__ = 'RootFactory'
__parent__ = None
__acl__ = [(Allow, Authenticated, ALL_PERMISSIONS)]
def __init__(self, request):
pass
def __getitem__(self, name):
r = resources[name]()
r.__parent__ = self
r.__name__ = name
return r
class BaseResource(object):
__name__ = None
__parent__ = None
def __getitem__(self, id):
o = DBSession.query(self.__model__).get(id)
if o:
o.__parent__ = self
o.__name__ = id
return o
else:
raise KeyError(id)
class DistroResource(BaseResource):
__model__ = Distro
__schema__ = DistroSchema
class PackageResource(BaseResource):
__model__ = Package
__schema__ = None
class UpstreamResource(BaseResource):
__model__ = Upstream
__schema__ = UpstreamSchema
class UserResource(BaseResource):
__model__ = User
__schema__ = UserSchema
def get_root(request):
global resources
resources.update({"distros": DistroResource,
"packages": PackageResource,
"upstreams": UpstreamResource,
"users": UserResource,
})
return RootFactory(request)
|
network-box/uptrack
|
uptrack/resources.py
|
Python
|
agpl-3.0
| 1,492
|
"""
End to end tests for Instructor Dashboard.
"""
from bok_choy.web_app_test import WebAppTest
from regression.pages.lms.course_page_lms import CourseHomePageExtended
from regression.pages.lms.dashboard_lms import DashboardPageExtended
from regression.pages.lms.instructor_dashboard import InstructorDashboardPageExtended
from regression.pages.lms.utils import get_course_key
from regression.tests.helpers.api_clients import LmsLoginApi
from regression.tests.helpers.utils import get_course_display_name, get_course_info
class AnalyticsTest(WebAppTest):
"""
Regression tests on Analytics on Instructor Dashboard
"""
def setUp(self):
super().setUp()
login_api = LmsLoginApi()
login_api.authenticate(self.browser)
course_info = get_course_info()
self.dashboard_page = DashboardPageExtended(self.browser)
self.instructor_dashboard = InstructorDashboardPageExtended(
self.browser,
get_course_key(course_info)
)
self.course_page = CourseHomePageExtended(
self.browser,
get_course_key(course_info)
)
self.dashboard_page.visit()
self.dashboard_page.select_course(get_course_display_name())
self.course_page.wait_for_page()
self.instructor_dashboard.visit()
|
edx/edx-e2e-tests
|
regression/tests/lms/test_instructor_dashboard.py
|
Python
|
agpl-3.0
| 1,326
|
#!/usr/bin/env python3
from math import log, exp
def RungeKutta2aEDO (x0, t0, tf, h, dX):
xold = x0
told = t0
ret = []
while (told <= tf):
ret += [(told, xold)]
k1 = dX(xold, told)
k2 = dX(xold + h*k1, told+h)
xold = xold + h/2 * (k1+k2)
told = round(told + h,3)
return ret
if __name__ == "__main__":
dX = lambda x, t: t + x
RungeKutta2aEDO(0, 0, 1, 0.1, dX)
|
paulocsanz/algebra-linear
|
scripts/runge_kutta_2a.py
|
Python
|
agpl-3.0
| 431
|
"""HTTP end-points for the User API. """
import copy
from opaque_keys import InvalidKeyError
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured, NON_FIELD_ERRORS, ValidationError
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import ensure_csrf_cookie, csrf_protect, csrf_exempt
from opaque_keys.edx import locator
from rest_framework import authentication
from rest_framework import filters
from rest_framework import generics
from rest_framework import status
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.exceptions import ParseError
from django_countries import countries
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.lib.api.permissions import ApiKeyHeaderPermission
import third_party_auth
from django_comment_common.models import Role
from edxmako.shortcuts import marketing_link
from student.views import create_account_with_params
from student.cookies import set_logged_in_cookies
from openedx.core.lib.api.authentication import SessionAuthenticationAllowInactiveUser
from util.json_request import JsonResponse
from .preferences.api import update_email_opt_in
from .helpers import FormDescription, shim_student_view, require_post_params
from .models import UserPreference, UserProfile
from .accounts import (
NAME_MAX_LENGTH, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH,
USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH
)
from .accounts.api import check_account_exists
from .serializers import UserSerializer, UserPreferenceSerializer
class LoginSessionView(APIView):
"""HTTP end-points for logging in users. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the login form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_login_session"))
# Translators: This label appears above a field on the login form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the login form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the login form, immediately
# below a field meant to hold the user's email address.
email_instructions = _(
u"The email address you used to register with {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
# Translators: This label appears above a field on the login form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
}
)
form_desc.add_field(
"remember",
field_type="checkbox",
label=_("Remember me"),
default=False,
required=False,
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(require_post_params(["email", "password"]))
@method_decorator(csrf_protect)
def post(self, request):
"""Log in a user.
You must send all required form fields with the request.
You can optionally send an `analytics` param with a JSON-encoded
object with additional info to include in the login analytics event.
Currently, the only supported field is "enroll_course_id" to indicate
that the user logged in while enrolling in a particular course.
Arguments:
request (HttpRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 403 if authentication failed.
403 with content "third-party-auth" if the user
has successfully authenticated with a third party provider
but does not have a linked account.
HttpResponse: 302 if redirecting to another page.
Example Usage:
POST /user_api/v1/login_session
with POST params `email`, `password`, and `remember`.
200 OK
"""
# For the initial implementation, shim the existing login view
# from the student Django app.
from student.views import login_user
return shim_student_view(login_user, check_logged_in=True)(request)
class RegistrationView(APIView):
"""HTTP end-points for creating a new user. """
DEFAULT_FIELDS = ["email", "name", "username", "password"]
EXTRA_FIELDS = [
"city",
"country",
"gender",
"year_of_birth",
"level_of_education",
"mailing_address",
"goals",
"honor_code",
"terms_of_service",
]
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
def _is_field_visible(self, field_name):
"""Check whether a field is visible based on Django settings. """
return self._extra_fields_setting.get(field_name) in ["required", "optional"]
def _is_field_required(self, field_name):
"""Check whether a field is required based on Django settings. """
return self._extra_fields_setting.get(field_name) == "required"
def __init__(self, *args, **kwargs):
super(RegistrationView, self).__init__(*args, **kwargs)
# Backwards compatibility: Honor code is required by default, unless
# explicitly set to "optional" in Django settings.
self._extra_fields_setting = copy.deepcopy(settings.REGISTRATION_EXTRA_FIELDS)
self._extra_fields_setting["honor_code"] = self._extra_fields_setting.get("honor_code", "required")
# Check that the setting is configured correctly
for field_name in self.EXTRA_FIELDS:
if self._extra_fields_setting.get(field_name, "hidden") not in ["required", "optional", "hidden"]:
msg = u"Setting REGISTRATION_EXTRA_FIELDS values must be either required, optional, or hidden."
raise ImproperlyConfigured(msg)
# Map field names to the instance method used to add the field to the form
self.field_handlers = {}
for field_name in self.DEFAULT_FIELDS + self.EXTRA_FIELDS:
handler = getattr(self, "_add_{field_name}_field".format(field_name=field_name))
self.field_handlers[field_name] = handler
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the registration form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
This is especially important for the registration form,
since different edx-platform installations might
collect different demographic information.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Arguments:
request (HttpRequest)
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("user_api_registration"))
self._apply_third_party_auth_overrides(request, form_desc)
# Default fields are always required
for field_name in self.DEFAULT_FIELDS:
self.field_handlers[field_name](form_desc, required=True)
# Extra fields configured in Django settings
# may be required, optional, or hidden
for field_name in self.EXTRA_FIELDS:
if self._is_field_visible(field_name):
self.field_handlers[field_name](
form_desc,
required=self._is_field_required(field_name)
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
@method_decorator(csrf_exempt)
def post(self, request):
"""Create the user's account.
You must send all required form fields with the request.
You can optionally send a "course_id" param to indicate in analytics
events that the user registered while enrolling in a particular course.
Arguments:
request (HTTPRequest)
Returns:
HttpResponse: 200 on success
HttpResponse: 400 if the request is not valid.
HttpResponse: 409 if an account with the given username or email
address already exists
"""
data = request.POST.copy()
email = data.get('email')
username = data.get('username')
# Handle duplicate email/username
conflicts = check_account_exists(email=email, username=username)
if conflicts:
conflict_messages = {
# Translators: This message is shown to users who attempt to create a new
# account using an email address associated with an existing account.
"email": _(
u"It looks like {email_address} belongs to an existing account. Try again with a different email address." # pylint: disable=line-too-long
).format(email_address=email),
# Translators: This message is shown to users who attempt to create a new
# account using a username associated with an existing account.
"username": _(
u"It looks like {username} belongs to an existing account. Try again with a different username."
).format(username=username),
}
errors = {
field: [{"user_message": conflict_messages[field]}]
for field in conflicts
}
return JsonResponse(errors, status=409)
# Backwards compatibility: the student view expects both
# terms of service and honor code values. Since we're combining
# these into a single checkbox, the only value we may get
# from the new view is "honor_code".
# Longer term, we will need to make this more flexible to support
# open source installations that may have separate checkboxes
# for TOS, privacy policy, etc.
if data.get("honor_code") and "terms_of_service" not in data:
data["terms_of_service"] = data["honor_code"]
try:
user = create_account_with_params(request, data)
except ValidationError as err:
# Should only get non-field errors from this function
assert NON_FIELD_ERRORS not in err.message_dict
# Only return first error for each field
errors = {
field: [{"user_message": error} for error in error_list]
for field, error_list in err.message_dict.items()
}
return JsonResponse(errors, status=400)
response = JsonResponse({"success": True})
set_logged_in_cookies(request, response, user)
return response
def _add_email_field(self, form_desc, required=True):
"""Add an email field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the registration form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
},
required=required
)
def _add_name_field(self, form_desc, required=True):
"""Add a name field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's full name.
name_label = _(u"Full name")
# Translators: This example name is used as a placeholder in
# a field on the registration form meant to hold the user's name.
name_placeholder = _(u"Jane Doe")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's full name.
name_instructions = _(u"Your legal name, used for any certificates you earn.")
form_desc.add_field(
"name",
label=name_label,
placeholder=name_placeholder,
instructions=name_instructions,
restrictions={
"max_length": NAME_MAX_LENGTH,
},
required=required
)
def _add_username_field(self, form_desc, required=True):
"""Add a username field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's public username.
username_label = _(u"Public username")
# Translators: These instructions appear on the registration form, immediately
# below a field meant to hold the user's public username.
username_instructions = _(
u"The name that will identify you in your courses - "
"{bold_start}(cannot be changed later){bold_end}"
).format(bold_start=u'<strong>', bold_end=u'</strong>')
# Translators: This example username is used as a placeholder in
# a field on the registration form meant to hold the user's username.
username_placeholder = _(u"JaneDoe")
form_desc.add_field(
"username",
label=username_label,
instructions=username_instructions,
placeholder=username_placeholder,
restrictions={
"min_length": USERNAME_MIN_LENGTH,
"max_length": USERNAME_MAX_LENGTH,
},
required=required
)
def _add_password_field(self, form_desc, required=True):
"""Add a password field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's password.
password_label = _(u"Password")
form_desc.add_field(
"password",
label=password_label,
field_type="password",
restrictions={
"min_length": PASSWORD_MIN_LENGTH,
"max_length": PASSWORD_MAX_LENGTH,
},
required=required
)
def _add_level_of_education_field(self, form_desc, required=True):
"""Add a level of education field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's highest completed level of education.
education_level_label = _(u"Highest level of education completed")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.LEVEL_OF_EDUCATION_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"level_of_education",
label=education_level_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_gender_field(self, form_desc, required=True):
"""Add a gender field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's gender.
gender_label = _(u"Gender")
# The labels are marked for translation in UserProfile model definition.
options = [(name, _(label)) for name, label in UserProfile.GENDER_CHOICES] # pylint: disable=translation-of-non-string
form_desc.add_field(
"gender",
label=gender_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_year_of_birth_field(self, form_desc, required=True):
"""Add a year of birth field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the user's year of birth.
yob_label = _(u"Year of birth")
options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
form_desc.add_field(
"year_of_birth",
label=yob_label,
field_type="select",
options=options,
include_default_option=True,
required=required
)
def _add_mailing_address_field(self, form_desc, required=True):
"""Add a mailing address field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# meant to hold the user's mailing address.
mailing_address_label = _(u"Mailing address")
form_desc.add_field(
"mailing_address",
label=mailing_address_label,
field_type="textarea",
required=required
)
def _add_goals_field(self, form_desc, required=True):
"""Add a goals field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This phrase appears above a field on the registration form
# meant to hold the user's reasons for registering with edX.
goals_label = _(
u"Tell us why you're interested in {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"goals",
label=goals_label,
field_type="textarea",
required=required
)
def _add_city_field(self, form_desc, required=True):
"""Add a city field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a field on the registration form
# which allows the user to input the city in which they live.
city_label = _(u"City")
form_desc.add_field(
"city",
label=city_label,
required=required
)
def _add_country_field(self, form_desc, required=True):
"""Add a country field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This label appears above a dropdown menu on the registration
# form used to select the country in which the user lives.
country_label = _(u"Country")
error_msg = _(u"Please select your Country.")
form_desc.add_field(
"country",
label=country_label,
field_type="select",
options=list(countries),
include_default_option=True,
required=required,
error_messages={
"required": error_msg
}
)
def _add_honor_code_field(self, form_desc, required=True):
"""Add an honor code field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Separate terms of service and honor code checkboxes
if self._is_field_visible("terms_of_service"):
terms_text = _(u"Honor Code")
# Combine terms of service and honor code checkboxes
else:
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_text = _(u"Terms of Service and Honor Code")
terms_link = u"<a href=\"{url}\">{terms_text}</a>".format(
url=marketing_link("HONOR"),
terms_text=terms_text
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
label = _(
u"I agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
# Translators: "Terms of Service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(
u"You must agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
form_desc.add_field(
"honor_code",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
}
)
def _add_terms_of_service_field(self, form_desc, required=True):
"""Add a terms of service field to a form description.
Arguments:
form_desc: A form description
Keyword Arguments:
required (bool): Whether this field is required; defaults to True
"""
# Translators: This is a legal document users must agree to
# in order to register a new account.
terms_text = _(u"Terms of Service")
terms_link = u"<a href=\"{url}\">{terms_text}</a>".format(
url=marketing_link("TOS"),
terms_text=terms_text
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
label = _(
u"I agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
# Translators: "Terms of service" is a legal document users must agree to
# in order to register a new account.
error_msg = _(
u"You must agree to the {platform_name} {terms_of_service}."
).format(
platform_name=settings.PLATFORM_NAME,
terms_of_service=terms_link
)
form_desc.add_field(
"terms_of_service",
label=label,
field_type="checkbox",
default=False,
required=required,
error_messages={
"required": error_msg
}
)
def _apply_third_party_auth_overrides(self, request, form_desc):
"""Modify the registration form if the user has authenticated with a third-party provider.
If a user has successfully authenticated with a third-party provider,
but does not yet have an account with EdX, we want to fill in
the registration form with any info that we get from the
provider.
This will also hide the password field, since we assign users a default
(random) password on the assumption that they will be using
third-party auth to log in.
Arguments:
request (HttpRequest): The request for the registration form, used
to determine if the user has successfully authenticated
with a third-party provider.
form_desc (FormDescription): The registration form description
"""
if third_party_auth.is_enabled():
running_pipeline = third_party_auth.pipeline.get(request)
if running_pipeline:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
if current_provider:
# Override username / email / full name
field_overrides = current_provider.get_register_form_data(
running_pipeline.get('kwargs')
)
for field_name in self.DEFAULT_FIELDS:
if field_name in field_overrides:
form_desc.override_field_properties(
field_name, default=field_overrides[field_name]
)
# Hide the password field
form_desc.override_field_properties(
"password",
default="",
field_type="hidden",
required=False,
label="",
instructions="",
restrictions={}
)
class PasswordResetView(APIView):
"""HTTP end-point for GETting a description of the password reset form. """
# This end-point is available to anonymous users,
# so do not require authentication.
authentication_classes = []
@method_decorator(ensure_csrf_cookie)
def get(self, request):
"""Return a description of the password reset form.
This decouples clients from the API definition:
if the API decides to modify the form, clients won't need
to be updated.
See `user_api.helpers.FormDescription` for examples
of the JSON-encoded form description.
Returns:
HttpResponse
"""
form_desc = FormDescription("post", reverse("password_change_request"))
# Translators: This label appears above a field on the password reset
# form meant to hold the user's email address.
email_label = _(u"Email")
# Translators: This example email address is used as a placeholder in
# a field on the password reset form meant to hold the user's email address.
email_placeholder = _(u"username@domain.com")
# Translators: These instructions appear on the password reset form,
# immediately below a field meant to hold the user's email address.
email_instructions = _(
u"The email address you used to register with {platform_name}"
).format(platform_name=settings.PLATFORM_NAME)
form_desc.add_field(
"email",
field_type="email",
label=email_label,
placeholder=email_placeholder,
instructions=email_instructions,
restrictions={
"min_length": EMAIL_MIN_LENGTH,
"max_length": EMAIL_MAX_LENGTH,
}
)
return HttpResponse(form_desc.to_json(), content_type="application/json")
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the User ORM object
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = User.objects.all().prefetch_related("preferences")
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
class ForumRoleUsersListView(generics.ListAPIView):
"""
Forum roles are represented by a list of user dicts
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
"""
Return a list of users with the specified role/course pair
"""
name = self.kwargs['name']
course_id_string = self.request.query_params.get('course_id')
if not course_id_string:
raise ParseError('course_id must be specified')
course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id_string)
role = Role.objects.get_or_create(course_id=course_id, name=name)[0]
users = role.users.all()
return users
class UserPreferenceViewSet(viewsets.ReadOnlyModelViewSet):
"""
DRF class for interacting with the UserPreference ORM
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
queryset = UserPreference.objects.all()
filter_backends = (filters.DjangoFilterBackend,)
filter_fields = ("key", "user")
serializer_class = UserPreferenceSerializer
paginate_by = 10
paginate_by_param = "page_size"
class PreferenceUsersListView(generics.ListAPIView):
"""
DRF class for listing a user's preferences
"""
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (ApiKeyHeaderPermission,)
serializer_class = UserSerializer
paginate_by = 10
paginate_by_param = "page_size"
def get_queryset(self):
return User.objects.filter(preferences__key=self.kwargs["pref_key"]).prefetch_related("preferences")
class UpdateEmailOptInPreference(APIView):
"""View for updating the email opt in preference. """
authentication_classes = (SessionAuthenticationAllowInactiveUser,)
@method_decorator(require_post_params(["course_id", "email_opt_in"]))
@method_decorator(ensure_csrf_cookie)
def post(self, request):
""" Post function for updating the email opt in preference.
Allows the modification or creation of the email opt in preference at an
organizational level.
Args:
request (Request): The request should contain the following POST parameters:
* course_id: The slash separated course ID. Used to determine the organization
for this preference setting.
* email_opt_in: "True" or "False" to determine if the user is opting in for emails from
this organization. If the string does not match "True" (case insensitive) it will
assume False.
"""
course_id = request.data['course_id']
try:
org = locator.CourseLocator.from_string(course_id).org
except InvalidKeyError:
return HttpResponse(
status=400,
content="No course '{course_id}' found".format(course_id=course_id),
content_type="text/plain"
)
# Only check for true. All other values are False.
email_opt_in = request.data['email_opt_in'].lower() == 'true'
update_email_opt_in(request.user, org, email_opt_in)
return HttpResponse(status=status.HTTP_200_OK)
|
jbzdak/edx-platform
|
openedx/core/djangoapps/user_api/views.py
|
Python
|
agpl-3.0
| 33,442
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from sorl.thumbnail import ImageField
from warnings import warn
class Image(models.Model):
# link to other objects using the ContentType system
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
# store the actual image
image = ImageField( upload_to="images" )
# added
source = models.CharField(max_length=400)
# user
is_primary = models.BooleanField( default=False )
def save(self, *args, **kwargs):
"""
Only one image should be marked as is_primary for an object.
"""
# other images for this object
siblings = Image.objects.filter(
content_type = self.content_type,
object_id = self.object_id,
)
# check that we are not first entry for content_object
if not siblings.count():
self.is_primary = True
super(Image, self).save(*args, **kwargs)
# If we are true then make sure all others are false
if self.is_primary is True:
primary_siblings = siblings.exclude( is_primary = False ).exclude( id = self.id )
for sibling in primary_siblings:
sibling.is_primary = False
sibling.save()
class HasImageMixin():
def primary_image(self):
primary = self.images.filter(is_primary=True)
if primary.count():
return primary[0].image
return None
|
Hutspace/odekro
|
mzalendo/images/models.py
|
Python
|
agpl-3.0
| 1,690
|
# Generated by Django 1.11.11 on 2018-07-27 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0090_degree_curriculum_reset'),
]
operations = [
migrations.AddField(
model_name='degree',
name='campus_image_desktop',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on desktop displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='campus_image_mobile',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on mobile displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='campus_image_tablet',
field=models.ImageField(blank=True, help_text='Provide a campus image to display on tablet displays', null=True, upload_to='media/degree_marketing/campus_images/'),
),
migrations.AddField(
model_name='degree',
name='overall_ranking',
field=models.CharField(blank=True, help_text='Overall program ranking (e.g. "#1 in the U.S.")', max_length=255),
),
migrations.AlterModelOptions(
name='degree',
options={'verbose_name_plural': 'Degrees'},
),
]
|
edx/course-discovery
|
course_discovery/apps/course_metadata/migrations/0091_auto_20180727_1844.py
|
Python
|
agpl-3.0
| 1,487
|
# -*- coding: utf-8 -*-
from ..utils.model_form import ModelForm
from ..models import security
from wtforms import HiddenField, SubmitField
from wtforms.fields import FormField
from wtforms_alchemy import ModelFormField
from wtforms.widgets import ListWidget
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from flask_wtf.file import FileField, FileAllowed
from ..extensions import images
class UserFormRaw(ModelForm):
class Meta:
model = security.User
exclude = ['password', 'email', 'active', 'apikey', 'confirmed_at']
class UserForm(UserFormRaw):
logo = FileField('image', validators=[
FileAllowed(images, 'Images only!')
])
submit = SubmitField(u'Modifier')
|
odtvince/APITaxi
|
APITaxi/forms/user.py
|
Python
|
agpl-3.0
| 716
|
import os
from django.core.management.base import BaseCommand
from xbrowse_server.base.models import Project
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('args', nargs='*')
def handle(self, *args, **options):
project_id = args[0]
coverage_dir = args[1]
project = Project.objects.get(project_id=project_id)
files = os.listdir(coverage_dir)
full_path_dir = os.path.abspath(coverage_dir)
for individual in project.individual_set.all():
indiv_id = individual.indiv_id
full_path = None
if '%s.callable.bed.gz' % indiv_id in files:
full_path = '%s/%s.callable.bed.gz' % (full_path_dir, indiv_id)
elif '%s.bam.bed.gz' % indiv_id in files:
full_path = '%s/%s.bam.bed.gz' % (full_path_dir, indiv_id)
elif '%s.bed.gz' % indiv_id in files:
full_path = '%s/%s.bed.gz' % (full_path_dir, indiv_id)
if full_path:
individual.coverage_file = full_path
individual.save()
|
macarthur-lab/xbrowse
|
xbrowse_server/base/management/commands/add_coverage_to_project.py
|
Python
|
agpl-3.0
| 1,115
|
# -*- coding: utf-8 -*-
# Copyright 2015 Eficent - Jordi Ballester Alomar
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
class AnalyticAccountOpen(models.TransientModel):
_name = 'analytic.account.open'
_description = 'Open single analytic account'
analytic_account_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
required=True
)
include_child = fields.Boolean(
'Include child accounts',
default=True
)
@api.model
def _get_child_analytic_accounts(self, curr_id):
result = {}
result[curr_id] = True
# Now add the children
self.env.cr.execute('''
WITH RECURSIVE children AS (
SELECT parent_id, id
FROM account_analytic_account
WHERE parent_id = %s
UNION ALL
SELECT a.parent_id, a.id
FROM account_analytic_account a
JOIN children b ON(a.parent_id = b.id)
)
SELECT * FROM children order by parent_id
''', (curr_id,))
res = self.env.cr.fetchall()
for x, y in res:
result[y] = True
return result
@api.multi
def analytic_account_open_window(self):
self.ensure_one()
act_window_id = self.env.ref(
'analytic.action_account_analytic_account_form')
result = act_window_id.read()[0]
acc_id = self.analytic_account_id.id
acc_ids = []
if self.include_child:
acc_ids = self._get_child_analytic_accounts(acc_id)
else:
acc_ids.append(acc_id)
result['domain'] = "[('id','in', ["+','.join(map(str, acc_ids))+"])]"
return result
|
sysadminmatmoz/pmis
|
analytic_account_open/wizards/analytic_account_open.py
|
Python
|
agpl-3.0
| 1,741
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.basics.configuration import ConfigurationDefinition
# -----------------------------------------------------------------
# Create the configuration definition
definition = ConfigurationDefinition()
definition.add_optional("input_path", "directory_path", "path to the input directory")
definition.add_optional("output_path", "directory_path", "path to the output directory")
definition.add_flag("track_record", "track record")
definition.add_flag("plot_track_record_if_exception", "plot track record if exception", True)
definition.add_flag("find_apertures", "find apertures", True)
definition.add_optional("principal_region", "file_path", "path to a region file with a contour for the principal galaxy")
definition.add_flag("remove_apertures", "remove apertures")
definition.add_section("aperture_removal", "aperture removal")
definition.sections["aperture_removal"].add_optional("expansion_factor", "real", 1.0)
definition.add_section("fetching", "fetching")
#definition.sections["fetching"].add_flag("use_catalog_file", "use catalog file")
#definition.sections["fetching"].add_optional("catalog_path", "file_path", "catalog path")
definition.add_section("detection", "detection")
definition.sections["detection"].add_flag("use_d25", "use D25")
definition.sections["detection"].add_optional("d25_expansion_factor", "real", "D25 expansion factor", 1.2)
definition.sections["detection"].add_optional("initial_radius", "real", 20.0)
definition.sections["detection"].add_optional("detection_method", "string", "detection method", "segmentation")
definition.sections["detection"].add_flag("allow_overlap", "Do not normally allow overlap between the center segment and the background mask of the source")
definition.sections["detection"].add_flag("expand", "expand", True)
definition.sections["detection"].add_flag("always_subtract_background", "always subtract background", False)
definition.sections["detection"].add_optional("background_outer_factor", "real", "background outer factor", 1.3)
definition.sections["detection"].add_optional("background_est_method", "string", "polynomial")
definition.sections["detection"].add_flag("sigma_clip_background", "sigma clip background", True)
definition.sections["detection"].add_optional("min_pixels", "integer", "Minimum connected pixels", 5)
definition.sections["detection"].add_section("kernel", "kernel")
definition.sections["detection"].sections["kernel"].add_optional("fwhm", "real", "FWHM", 10.0)
definition.sections["detection"].sections["kernel"].add_optional("cutoff_level", "real", "cutoff_level", 4.0)
definition.sections["detection"].add_optional("sigma_level", "real", "threshold sigmas", 2.0)
definition.sections["detection"].add_optional("expansion_factor", "real", "expansion factor", 1.5)
definition.sections["detection"].add_optional("max_expansion_level", "integer", "maximum expansion level", 4)
definition.sections["detection"].add_section("debug", "debug")
definition.sections["detection"].sections["debug"].add_flag("no_segment_before", "no segment before")
definition.sections["detection"].sections["debug"].add_flag("no_segment_after", "no segment after")
definition.sections["detection"].sections["debug"].add_flag("no_segment", "no segment")
definition.sections["detection"].sections["debug"].add_flag("expand", "expand")
definition.sections["detection"].sections["debug"].add_flag("success", "success")
definition.sections["detection"].sections["debug"].add_flag("dilated", "dilated")
definition.sections["detection"].sections["debug"].add_flag("user_expansion", "user expansion")
definition.sections["detection"].sections["debug"].add_flag("overlap_before", "overlap before")
definition.sections["detection"].sections["debug"].add_flag("overlap_after", "overlap after")
definition.sections["detection"].add_flag("dilate", "dilate", True)
definition.sections["detection"].add_optional("dilation_factor", "real", "dilation factor", 1.3)
definition.sections["detection"].add_optional("iterations", "integer", "iterations", 5)
definition.sections["detection"].add_optional("connectivity", "integer", "connectivity", 2)
definition.sections["detection"].add_flag("user_expansion", "user expansion")
definition.sections["detection"].add_optional("user_expansion_factor", "real", "user expansion factor")
definition.sections["detection"].add_flag("remove_appendages", "remove appendages from overlapping mask")
definition.add_section("region", "region")
definition.sections["region"].add_optional("default_radius", "real", "default radius", 20.0)
definition.add_section("apertures", "apertures")
definition.sections["apertures"].add_optional("sigma_level", "real", "approximate isophotal extent", 4.0)
definition.sections["apertures"].add_optional("max_offset", "real", "maximal offset between the aperture center and galaxy position (in number of pixels) (None=no limit)")
# Flags
definition.add_flag("weak", "weak search", False)
# -----------------------------------------------------------------
|
SKIRT/PTS
|
magic/config/find_extended.py
|
Python
|
agpl-3.0
| 5,377
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class AccountInvoiceSend(models.TransientModel):
_name = 'account.invoice.send'
_inherit = 'account.invoice.send'
_description = 'Account Invoice Send'
partner_id = fields.Many2one('res.partner', compute='_get_partner', string='Partner')
snailmail_is_letter = fields.Boolean('Send by Post', help='Allows to send the document by Snailmail (coventional posting delivery service)', default=lambda self: self.env.company.invoice_is_snailmail)
snailmail_cost = fields.Float(string='Stamp(s)', compute='_compute_snailmail_cost', readonly=True)
invalid_addresses = fields.Integer('Invalid Addresses Count', compute='_compute_invalid_addresses')
invalid_invoice_ids = fields.Many2many('account.move', string='Invalid Addresses', compute='_compute_invalid_addresses')
@api.depends('invoice_ids')
def _compute_invalid_addresses(self):
for wizard in self:
invalid_invoices = wizard.invoice_ids.filtered(lambda i: not self.env['snailmail.letter']._is_valid_address(i.partner_id))
wizard.invalid_invoice_ids = invalid_invoices
wizard.invalid_addresses = len(invalid_invoices)
@api.depends('invoice_ids')
def _get_partner(self):
self.partner_id = self.env['res.partner']
for wizard in self:
if wizard.invoice_ids and len(wizard.invoice_ids) == 1:
wizard.partner_id = wizard.invoice_ids.partner_id.id
@api.depends('snailmail_is_letter')
def _compute_snailmail_cost(self):
for wizard in self:
wizard.snailmail_cost = len(wizard.invoice_ids.ids)
def snailmail_print_action(self):
self.ensure_one()
letters = self.env['snailmail.letter']
for invoice in self.invoice_ids:
letter = self.env['snailmail.letter'].create({
'partner_id': invoice.partner_id.id,
'model': 'account.move',
'res_id': invoice.id,
'user_id': self.env.user.id,
'company_id': invoice.company_id.id,
'report_template': self.env.ref('account.account_invoices').id
})
letters |= letter
self.invoice_ids.filtered(lambda inv: not inv.is_move_sent).write({'is_move_sent': True})
if len(self.invoice_ids) == 1:
letters._snailmail_print()
else:
letters._snailmail_print(immediate=False)
def send_and_print_action(self):
if self.snailmail_is_letter:
if self.env['snailmail.confirm.invoice'].show_warning():
wizard = self.env['snailmail.confirm.invoice'].create({'model_name': _('Invoice'), 'invoice_send_id': self.id})
return wizard.action_open()
self._print_action()
return self.send_and_print()
def _print_action(self):
if not self.snailmail_is_letter:
return
if self.invalid_addresses and self.composition_mode == "mass_mail":
self.notify_invalid_addresses()
self.snailmail_print_action()
def send_and_print(self):
res = super(AccountInvoiceSend, self).send_and_print_action()
return res
def notify_invalid_addresses(self):
self.ensure_one()
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', self.env.user.partner_id.id),
{'type': 'snailmail_invalid_address', 'title': _("Invalid Addresses"),
'message': _("%s of the selected invoice(s) had an invalid address and were not sent", self.invalid_addresses)}
)
def invalid_addresses_action(self):
return {
'name': _('Invalid Addresses'),
'type': 'ir.actions.act_window',
'view_mode': 'kanban,tree,form',
'res_model': 'account.move',
'domain': [('id', 'in', self.mapped('invalid_invoice_ids').ids)],
}
|
ddico/odoo
|
addons/snailmail_account/wizard/account_invoice_send.py
|
Python
|
agpl-3.0
| 4,064
|
# Copyright (c) 2016 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import colander
import deform
import hashlib
from functools import reduce
from zope.interface import implementer
from substanced.schema import NameSchemaNode
from substanced.content import content
from substanced.util import get_oid
from dace.descriptors import (
CompositeUniqueProperty,
SharedUniqueProperty,
SharedMultipleProperty)
from dace.util import getSite
from pontus.core import VisualisableElementSchema
from pontus.widget import (
FileWidget
)
from pontus.file import ObjectData
from pontus.form import FileUploadTempStore
from lac import _
from lac.core import (
VisualisableElement,
SearchableEntity,
SearchableEntitySchema,
DuplicableEntity,
ParticipativeEntity)
from lac.content.interface import IArtistInformationSheet
from lac.file import Image
from lac.views.widget import RichTextWidget
from lac.utilities.duplicates_utility import (
find_duplicates_artist)
@colander.deferred
def picture_widget(node, kw):
context = node.bindings['context']
request = node.bindings['request']
tmpstore = FileUploadTempStore(request)
source = None
root = getSite()
if context is not root:
if context.picture:
source = context.picture
return FileWidget(
tmpstore=tmpstore,
source=source,
file_type=['image']
)
def context_is_a_artist(context, request):
return request.registry.content.istype(context, 'artist')
class ArtistInformationSheetSchema(VisualisableElementSchema, SearchableEntitySchema):
"""Schema for artist"""
name = NameSchemaNode(
editing=context_is_a_artist,
)
id = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title=_('Id'),
missing=""
)
title = colander.SchemaNode(
colander.String(),
widget=deform.widget.HiddenWidget(),
title=_('Title')
)
description = colander.SchemaNode(
colander.String(),
widget=deform.widget.TextAreaWidget(rows=4, cols=60),
title=_('Description'),
missing=""
)
biography = colander.SchemaNode(
colander.String(),
widget=RichTextWidget(),
title=_("Biography"),
missing=""
)
picture = colander.SchemaNode(
ObjectData(Image),
widget=picture_widget,
title=_('Picture'),
required=False,
missing=None,
)
is_director = colander.SchemaNode(
colander.Boolean(),
widget=deform.widget.CheckboxWidget(),
label=_('Is a director'),
title='',
default=False,
missing=False
)
origin_oid = colander.SchemaNode(
colander.Int(),
widget=deform.widget.HiddenWidget(),
title=_('OID'),
missing=0
)
def get_artist_data(artists, artist_schema):
result = []
for artist in artists:
artist_data = artist.get_data(artist_schema)
if artist_data['picture']:
picture = artist_data['picture']
artist_data['picture'] = picture.get_data(None)
result.append(artist_data)
return result
@content(
'artist',
icon='glyphicon glyphicon-align-left',
)
@implementer(IArtistInformationSheet)
class ArtistInformationSheet(VisualisableElement, DuplicableEntity,
ParticipativeEntity, SearchableEntity):
"""Artist information sheet class"""
type_title = _('Artist information sheet')
icon = 'glyphicon glyphicon-star'
templates = {'default': 'lac:views/templates/artist_result.pt',
'bloc': 'lac:views/templates/artist_result.pt',
'diff': 'lac:views/templates/diff_artist_template.pt',
'duplicates': 'lac:views/templates/artist_duplicates.pt'}
picture = CompositeUniqueProperty('picture')
author = SharedUniqueProperty('author', 'contents')
creations = SharedMultipleProperty('creations', 'artists')
productions = SharedMultipleProperty('productions', 'artists')
def __init__(self, **kwargs):
super(ArtistInformationSheet, self).__init__(**kwargs)
self.hash_picture = None
self.hash_artist = None
self.hash_picture_fp()
self.hash_artist_data()
@property
def id(self):
return self.get_id()
def hash_picture_fp(self):
if self.picture:
m = hashlib.md5()
picture_r = self.picture.fp.readall()
self.picture.fp.seek(0)
m.update(picture_r)
self.hash_picture = m.digest()
else:
self.hash_picture = None
@property
def related_contents(self):
result = list(self.creations)
result.extend(list(self.productions))
return result
@property
def improved_artist(self):
original = getattr(self, 'original', None)
return original if original is not self else None
def get_id(self):
return str(get_oid(self, 0))
def replace_by(self, source):
if self is not source:
creations = source.creations
productions = source.productions
connections_to = source.connections_to
for creation in self.creations:
if creation not in creations:
source.addtoproperty('creations', creation)
creation.reindex()
self.setproperty('creations', [])
for production in self.productions:
if production not in productions:
source.addtoproperty('productions', production)
production.reindex()
self.setproperty('productions', [])
for connection in self.connections_to:
if connection not in connections_to:
source.addtoproperty('connections_to', connection)
self.setproperty('connections_to', [])
for branch in self.branches:
source.addtoproperty('branches', branch)
original = self.original
if original and original is not source:
source.setproperty('original', original)
self.setproperty('original', None)
source.add_contributors(self.contributors)
self.setproperty('branches', [])
return True
return False
def reject(self):
original = self.original
if original:
self.replace_by(original)
def hash_artist_data(self):
result = self.title
result += getattr(self, 'description', '')
result += getattr(self, 'biography', '')
result += str(getattr(self, 'is_director', False))
result += str(self.hash_picture)
result = result.replace(' ', '').strip()
m = hashlib.md5()
m.update(result.encode())
self.hash_artist = m.digest()
def eq(self, other):
hash_artist = getattr(self, 'hash_artist', None)
other_hash_artist = getattr(other, 'hash_artist', None)
if hash_artist != other_hash_artist:
return False
return True
def get_more_contents_criteria(self):
"return specific query, filter values"
artists = reduce(lambda result, x: result + getattr(x, 'artists', []),
filter(lambda x: 'published' in x.state, self.creations), [])
artists = filter(lambda x: 'published' in x.state, artists)
return None, {'objects': set(artists)}
def get_duplicates(self, states=('published', )):
return find_duplicates_artist(self, states)
|
ecreall/lagendacommun
|
lac/content/artist.py
|
Python
|
agpl-3.0
| 7,788
|
"""Add commentset fields.
Revision ID: a23e88f06478
Revises: 284c10efdbce
Create Date: 2021-03-22 02:54:30.416806
"""
from alembic import op
from sqlalchemy.sql import column, table
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a23e88f06478'
down_revision = '284c10efdbce'
branch_labels = None
depends_on = None
commentset = table(
'commentset',
column('id', sa.Integer()),
column('last_comment_at', sa.TIMESTAMP(timezone=True)),
)
comment = table(
'comment',
column('id', sa.Integer()),
column('created_at', sa.TIMESTAMP(timezone=True)),
column('commentset_id', sa.Integer()),
)
def upgrade():
op.add_column(
'commentset',
sa.Column('last_comment_at', sa.TIMESTAMP(timezone=True), nullable=True),
)
op.add_column(
'commentset_membership',
sa.Column(
'is_muted',
sa.Boolean(),
nullable=False,
server_default=sa.sql.expression.false(),
),
)
op.alter_column('commentset_membership', 'is_muted', server_default=None)
op.execute(
commentset.update().values(
last_comment_at=sa.select([sa.func.max(comment.c.created_at)]).where(
comment.c.commentset_id == commentset.c.id
)
)
)
def downgrade():
op.drop_column('commentset_membership', 'is_muted')
op.drop_column('commentset', 'last_comment_at')
|
hasgeek/funnel
|
migrations/versions/a23e88f06478_add_commentset_fields.py
|
Python
|
agpl-3.0
| 1,439
|
"""
Tests for Calendar Sync views.
"""
import ddt
from django.test import TestCase
from django.urls import reverse
from openedx.features.calendar_sync.api import SUBSCRIBE, UNSUBSCRIBE
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
TEST_PASSWORD = 'test'
@ddt.ddt
class TestCalendarSyncView(SharedModuleStoreTestCase, TestCase):
"""Tests for the calendar sync view."""
@classmethod
def setUpClass(cls):
""" Set up any course data """
super(TestCalendarSyncView, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCalendarSyncView, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
self.user = self.create_user_for_course(self.course)
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.calendar_sync_url = reverse('openedx.calendar_sync', args=[self.course.id])
@ddt.data(
# Redirect on successful subscribe
[{'tool_data': "{{'toggle_data': '{}'}}".format(SUBSCRIBE)}, 302, ''],
# Redirect on successful unsubscribe
[{'tool_data': "{{'toggle_data': '{}'}}".format(UNSUBSCRIBE)}, 302, ''],
# 422 on unknown toggle_data
[{'tool_data': "{{'toggle_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no toggle_data
[{'tool_data': "{{'random_data': '{}'}}".format('gibberish')}, 422,
'Toggle data was not provided or had unknown value.'],
# 422 on no tool_data
[{'nonsense': "{{'random_data': '{}'}}".format('gibberish')}, 422, 'Tool data was not provided.'],
)
@ddt.unpack
def test_course_dates_fragment(self, data, expected_status_code, contained_text):
response = self.client.post(self.calendar_sync_url, data)
assert response.status_code == expected_status_code
assert contained_text in str(response.content)
|
stvstnfrd/edx-platform
|
openedx/features/calendar_sync/tests/test_views.py
|
Python
|
agpl-3.0
| 2,051
|
"""
Test the about xblock
"""
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from .helpers import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class AboutTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
def setUp(self):
self.course = CourseFactory.create()
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_logged_in(self):
self.setup_user()
url = reverse('about_course', args=[self.course.id])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_anonymous_user(self):
url = reverse('about_course', args=[self.course.id])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
|
pelikanchik/edx-platform
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 1,252
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2016 Didotech srl (http://www.didotech.com)
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from team_system_template import cash_book, account_template, tax_template
from team_system_template import deadline_book, industrial_accounting_template, industrial_accounting
tax_data = tax_template.format(**{
'taxable': 240000000, # Imponibile 6 dec?
'vat_code': 22, # Aliquota Iva o Codice esenzione
'agro_vat_code': 0, # Aliquota iva di compensazione agricola
'vat11_code': 0,
'vat_total': 52800}
) * 8
account_data = account_template.format(**{
'account_proceeds': 5810502,
'total_proceeds': 240000000 # Imponibile 6 dec?
}) * 8
cash_book_values = {
'company_id': 1,
'version': 3,
'type': 0,
'partner_id': 34,
'name': 'Cliente prova con nome estremamente lungo'[:32],
'address': 'via Tre Porcellini'[:30],
'zip': 35020,
'city': 'Padova',
'province': 'PD'[:2],
'fiscalcode': 'RSSMRA85T10A562S',
'vat_number': 01032450072,
'individual': True and 'S' or 'N', # 134
'space': 0, # Posizione spazio fra cognome nome
# Estero:
'country': 0, # Codice paese estero di residenza. Dove si prende il codice???
'vat_ext': '', # Solo 12 caratteri??? Doveva essere 14... Ex (Croazia): HR12345678901, Sweden: SE999999999901
'fiscalcode_ext': '',
# Dati di nascita,se questi dati sono vuoti vengono presi dal codice fiscale.
'sex': 'M', # M/F 173
'birthday': 01012001, # ggmmaaaa
'city_of_birth': 'Palermo', # KGB?
'province_of_birth': 'PA',
'phone_prefix': '091',
'phone': '1234567',
'fax_prefix': '0921',
'fax': '7890123',
# Solo per i fornitori 246 -
'account_code': 9999999, # Codice conto di costo abituale
'payment_conditions_code': 4444, # Codice condizioni di pagamento
'abi': 3002,
'cab': 3280,
'partner_interm': 2, # Codice intermedio clienti / fornitori 267
# Dati fattura 268
'causal': 1, # Codice causale movimento
# Fattura di vendita=001
# Nota Credito = 002
# Fattura di acquisto=011
# Corrispettivo=020
# Movimenti diversi a diversi=027
# ( E' possibile indicare anche una causale multi collegata a una causale iva es. 101 collegata alla 1 )
# Vendita agenzia di viaggio=causale collegata alla 1 o alla 20 con il campo agenzia di viaggio = S
# Acquisti agenzia di viaggio=causale collagta alla 11 con il campo agenzia di viaggio = S
'causal_description': 'FATT. VENDITA',
'causal_ext': 'Causale aggiuntiva',
'causal_ext_1': 'Causale aggiuntiva 1',
'causal_ext_2': 'Causale aggiuntiva 2',
'registration_date': 0, # Se 0 si intende uguale alla data documento
'document_date': 01012016,
'document_number': 345, # Numero documento fornitore compreso sezionale
'document_number_no_sectional': 34, # Numero documento (numero doc senza sezionale)
'vat_sectional': 22,
'account_extract': 1501, # Estratto conto Numero partita (numero doc + sezionale (tutto unito):
# es. 1501 per una fattura numero 15 del sez. 1)
'account_extract_year': 2016, # Estratto conto Anno partita (anno di emissione della fattura in formato AAAA)
'ae_currency': 0, # Estratto conto in valuta Codice valuta estera
'ae_exchange_rate': 1000000, # 13(7+6 dec)
'ae_date': 23012016,
'ae_total_currency': 240000, # 16(13+3dec)
'ae_total_currency_vat': 52800, # 16(13+3dec)
'plafond_month': 012016, # MMAAAA Riferimento PLAFOND e fatture diferite
# Dati iva
'tax_data': tax_data,
# Totale fattura
'invoice_total': 240000000, # Imponibile 6 dec?
# Conti di ricavo/costo
'account_data': account_data,
# Dati eventuale pagamento fattura o movimenti diversi
# Iva Editoria
'vat_collectability': 0, # 0=Immediata 1=Differita 2=Differita DL. 185/08
# 3=Immediata per note credito/debito 4=Split payment
# R=Risconto C=Competenza
# N=Non aggiorna estratto conto
'val_0': 0,
'empty': ''
}
deadline_book_values = {
'company_id': 1,
'version': 3,
'type': 1,
# Dati INTRASTAT
'val_0': 0,
'empty': '',
# Dati portafoglio
'payment_condition': 0, # ??? Codice condizione di pagamento
'abi': 0, # ???
'cab': 0, # ???
'agency_description': '', # Descrizione agenzia
'total_number_of_payments': 0, # ??? Numero totale rate
'invoice_total': 0, # ??? Totale documento (totale fattura)
# Dettaglio effetti
'payment_count': 0, # ??? Numero rata
'payment_deadline': 0, # ??? Data scadenza
'document_type': 0, # Tipo effetto
# 1=Tratta
# 2=Ricevuta bancaria
# 3=Rimessa diretta
# 4=Cessioni
# 5=Solo descrittivo
# 6=Contanti alla consegna
'payment_total': 0, # ??? Importo effetto
'payment_total_currency': 0, # Portafoglio in valuta. Importo effetto in valuta
'total_stamps': 0, # Importo bolli
'payment_stamp_currency': 0, # Portafoglio in valuta. Importo bolli in valuta
'payment_state': '', # ??? Stato effetto 0=Aperto 1=Chiuso 2=Insoluto 3=Personalizzato
'payment_subtype': '', # Sottotipo rimessa diretta
'agent_code': 0, # Codice agente
'paused_payment': '', # Effetto sospeso
'cig': '',
'cup': '',
# Movimenti INTRASTAT BENI dati aggiuntivi...
}
def get_accounting_data():
empty_accounting = {
'val_0': 0,
'empty': '',
'causal': 0, # ??? Causale cont. industr.
# Fatt vendita = 001
# Fatt acquisto = 002
'account': 0, # ??? Conto cont. Industriale
# 1 = sistemi
# 2 = Noleggi
# 3 = domotica
'account_proceeds': 0, # ??? Voce di spesa / ricavo (uguale ai conti di ricavo contabilità generale ma con uno 0 in più)
# 58100501
# 58100502
# 58100503
'sign': '', # ??? Segno ( D o A )
'total_ammount': 0, # Importo movimento o costo complessivo
}
accounting_data = ''
for k in range(0, 20):
accounting_data += industrial_accounting_template.format(**empty_accounting)
return accounting_data
industrial_accounting_values = {
'company_id': 1,
'version': 3,
'type': 2,
'val_0': 0,
# 'empty': '',
# CONTAB. INDUSTRIALE 8
'accounting_data': get_accounting_data()
}
if __name__ == '__main__':
record_type = 0
if record_type == 0:
record = cash_book.format(**cash_book_values)
elif record_type == 1:
record = deadline_book.format(**deadline_book_values)
elif record_type == 2:
record = industrial_accounting.format(**industrial_accounting_values)
print record
# for s in record:
# print 'X:', s
print len(record)
|
iw3hxn/LibrERP
|
export_teamsystem/test_data.py
|
Python
|
agpl-3.0
| 8,152
|
# -*- coding: utf-8 -*-
# (c) 2015 ACSONE SA/NV, Dhinesh D
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': "Inactive Sessions Timeout",
'summary': """
This module disable all inactive sessions since a given delay""",
'author': "ACSONE SA/NV, "
"Dhinesh D, "
"Jesse Morgan, "
"LasLabs, "
"Odoo Community Association (OCA)",
'maintainer': 'Odoo Community Association (OCA)',
'website': "http://acsone.eu",
'category': 'Tools',
'version': '10.0.1.0.2',
'license': 'AGPL-3',
'data': [
'data/ir_config_parameter_data.xml'
],
'installable': True,
}
|
ovnicraft/server-tools
|
auth_session_timeout/__manifest__.py
|
Python
|
agpl-3.0
| 691
|
from elasticsearch import helpers
from c2corg_api.scripts.migration.batch import Batch
from elasticsearch.helpers import BulkIndexError
import logging
log = logging.getLogger(__name__)
class ElasticBatch(Batch):
"""A batch implementation to do bulk inserts for ElasticSearch.
Example usage:
batch = ElasticBatch(client, 1000)
with batch:
...
batch.add({
'_op_type': 'index',
'_index': index_name,
'_type': SearchDocument._doc_type.name,
'_id': document_id,
'title': 'Abc'
})
"""
def __init__(self, client, batch_size):
super(ElasticBatch, self).__init__(client, batch_size)
self.client = client
self.actions = []
def add(self, action):
self.actions.append(action)
self.flush_or_not()
def should_flush(self):
return len(self.actions) > self.batch_size
def flush(self):
if self.actions:
try:
helpers.bulk(self.client, self.actions)
except BulkIndexError:
# when trying to delete a document that does not exist, an
# error is raised, and other documents are not inserted
log.warning(
'error sending bulk update to ElasticSearch',
exc_info=True)
self.actions = []
|
c2corg/v6_api
|
c2corg_api/scripts/es/es_batch.py
|
Python
|
agpl-3.0
| 1,426
|
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
"""
Check if request is safe or authenticated user is owner.
"""
def has_object_permission(self, request, view, obj):
return request.method in SAFE_METHODS or view.get_stream().owner == request.user
|
ballotify/django-backend
|
ballotify/apps/api_v1/streams/permissions.py
|
Python
|
agpl-3.0
| 335
|
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Forocoches(Platform):
"""A <Platform> object for Forocoches"""
def __init__(self):
self.platformName = "Forocoches"
self.tags = ["opinions", "activism"]
# Add the URL for enumeration below
#self.urlEnumeration = "http://www.forocoches.com/foro/member.php?u=" + "<HERE_GOES_THE_USER_ID>"
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.forocoches.com/foro/member.php?username=" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["main error message"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
i3visio/osrframework
|
osrframework/wrappers/pending/cloudflare/forocoches.py
|
Python
|
agpl-3.0
| 4,071
|
from datetime import date
from django.core.validators import URLValidator
from django.utils.timezone import now
from survey.tests.models import BaseModelTest
class TestSurvey(BaseModelTest):
def test_unicode(self):
"""Unicode generation."""
self.assertIsNotNone(str(self.survey))
def test_questions(self):
"""Recovering a list of questions from a survey."""
questions = self.survey.questions.all()
self.assertEqual(len(questions), len(self.data))
def test_absolute_url(self):
"""Absoulte url is not None and do not raise error."""
self.assertIsNotNone(self.survey.get_absolute_url())
def test_latest_answer(self):
"""the lastest answer date is returned."""
self.assertIsInstance(self.survey.latest_answer_date(), date)
def test_publish_date(self):
"""the pblish date must be None or datetime date instance."""
self.assertIsInstance(self.survey.publish_date, date)
def test_expiration_date(self):
"""expirationdate must be datetime date instance or None"""
self.assertIsInstance(self.survey.expire_date, date)
def test_expiration_date_is_in_future(self):
"""by default the expiration should be a week in the future"""
self.assertGreater(self.survey.expire_date, now())
def test_redirect_url(self):
self.assertIsNone(URLValidator()(self.survey.redirect_url))
|
Pierre-Sassoulas/django-survey
|
survey/tests/models/test_survey.py
|
Python
|
agpl-3.0
| 1,428
|
from pathlib import Path
import pytest
from loguru import logger
from libretime_shared.logging import (
DEBUG,
INFO,
create_task_logger,
level_from_name,
setup_logger,
)
@pytest.mark.parametrize(
"name,level_name,level_no",
[
("error", "error", 40),
("warning", "warning", 30),
("info", "info", 20),
("debug", "debug", 10),
("trace", "trace", 5),
],
)
def test_level_from_name(name, level_name, level_no):
level = level_from_name(name)
assert level.name == level_name
assert level.no == level_no
def test_level_from_name_invalid():
with pytest.raises(ValueError):
level_from_name("invalid")
def test_setup_logger(tmp_path: Path):
log_filepath = tmp_path / "test.log"
extra_log_filepath = tmp_path / "extra.log"
setup_logger(INFO, log_filepath)
extra_logger = create_task_logger(DEBUG, extra_log_filepath, True)
logger.info("test info")
extra_logger.info("extra info")
logger.debug("test debug")
extra_logger.complete()
logger.complete()
assert len(log_filepath.read_text(encoding="utf-8").splitlines()) == 1
assert len(extra_log_filepath.read_text(encoding="utf-8").splitlines()) == 1
|
LibreTime/libretime
|
shared/tests/logging_test.py
|
Python
|
agpl-3.0
| 1,238
|
# Main network and testnet3 definitions
params = {
'bitcoin_main': {
'pubkey_address': 50,
'script_address': 9,
'genesis_hash': '00000c7c73d8ce604178dae13f0fc6ec0be3275614366d44b1b4b5c6e238c60c'
},
'bitcoin_test': {
'pubkey_address': 88,
'script_address': 188,
'genesis_hash': '000003ae7f631de18a457fa4fa078e6fa8aff38e258458f8189810de5d62cede'
}
}
|
mazaclub/tate-server
|
src/networks.py
|
Python
|
agpl-3.0
| 413
|
# © 2008-2020 Dorin Hongu <dhongu(@)gmail(.)com
# See README.rst file on addons root folder for license details
from odoo import fields, models
class IntrastatTransaction(models.Model):
_name = "l10n_ro_intrastat.transaction"
_description = "Intrastat Transaction"
_rec_name = "description"
code = fields.Char("Code", required=True, readonly=True)
parent_id = fields.Many2one("l10n_ro_intrastat.transaction", "Parent Code", readonly=True)
description = fields.Text("Description", readonly=True)
_sql_constraints = [
("l10n_ro_intrastat_trcodeunique", "UNIQUE (code)", "Code must be unique."),
]
class IntrastatTransportMode(models.Model):
_name = "l10n_ro_intrastat.transport_mode"
_description = "Intrastat Transport Mode"
code = fields.Char("Code", required=True, readonly=True)
name = fields.Char("Description", readonly=True)
_sql_constraints = [
("l10n_ro_intrastat_trmodecodeunique", "UNIQUE (code)", "Code must be unique."),
]
|
dhongu/l10n-romania
|
l10n_ro_intrastat/models/l10n_ro_intrastat.py
|
Python
|
agpl-3.0
| 1,017
|
"""Post admins
Revision ID: 449914911f93
Revises: 2420dd9c9949
Create Date: 2013-12-03 23:03:02.404457
"""
# revision identifiers, used by Alembic.
revision = '449914911f93'
down_revision = '2420dd9c9949'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'jobpost_admin',
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('jobpost_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['jobpost_id'], ['jobpost.id']),
sa.ForeignKeyConstraint(['user_id'], ['user.id']),
sa.PrimaryKeyConstraint('user_id', 'jobpost_id'),
)
def downgrade():
op.drop_table('jobpost_admin')
|
hasgeek/hasjob
|
migrations/versions/449914911f93_post_admins.py
|
Python
|
agpl-3.0
| 812
|
"""
Tests of CourseKeys and CourseLocators
"""
import ddt
from bson.objectid import ObjectId
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from opaque_keys.edx.tests import LocatorBaseTest, TestDeprecated
@ddt.ddt
class TestCourseKeys(LocatorBaseTest, TestDeprecated):
"""
Tests of :class:`.CourseKey` and :class:`.CourseLocator`
"""
@ddt.data(
"foo/bar/baz",
)
def test_deprecated_roundtrip(self, course_id):
self.assertEquals(
course_id,
unicode(CourseKey.from_string(course_id))
)
@ddt.data(
"foo!/bar/baz",
)
def test_invalid_chars_in_ssck_string(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(course_id)
@ddt.data(
"org/course/run/foo",
"org/course",
"org+course+run+foo",
"org+course",
)
def test_invalid_format_location(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseLocator.from_string(course_id)
def test_make_usage_key(self):
depr_course = CourseKey.from_string('org/course/run')
self.assertEquals(
unicode(BlockUsageLocator(depr_course, 'category', 'name', deprecated=True)),
unicode(depr_course.make_usage_key('category', 'name'))
)
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(
unicode(BlockUsageLocator(course, 'block_type', 'block_id')),
unicode(course.make_usage_key('block_type', 'block_id'))
)
def test_convert_deprecation(self):
depr_course = CourseKey.from_string('org/course/run')
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(unicode(depr_course.replace(deprecated=False)), unicode(course))
self.assertEquals(unicode(course.replace(deprecated=True)), unicode(depr_course))
def test_course_constructor_underspecified(self):
with self.assertRaises(InvalidKeyError):
CourseLocator()
with self.assertRaises(InvalidKeyError):
CourseLocator(branch='published')
def test_course_constructor_bad_version_guid(self):
with self.assertRaises(ValueError):
CourseLocator(version_guid="012345")
with self.assertRaises(InvalidKeyError):
CourseLocator(version_guid=None)
def test_course_constructor_version_guid(self):
# pylint: disable=no-member,protected-access
# generate a random location
test_id_1 = ObjectId()
test_id_1_loc = str(test_id_1)
testobj_1 = CourseLocator(version_guid=test_id_1)
self.check_course_locn_fields(testobj_1, version_guid=test_id_1)
self.assertEqual(str(testobj_1.version_guid), test_id_1_loc)
testobj_1_string = u'@'.join((testobj_1.VERSION_PREFIX, test_id_1_loc))
self.assertEqual(testobj_1._to_string(), testobj_1_string)
self.assertEqual(str(testobj_1), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.html_id(), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.version, test_id_1)
# Test using a given string
test_id_2_loc = '519665f6223ebd6980884f2b'
test_id_2 = ObjectId(test_id_2_loc)
testobj_2 = CourseLocator(version_guid=test_id_2)
self.check_course_locn_fields(testobj_2, version_guid=test_id_2)
self.assertEqual(str(testobj_2.version_guid), test_id_2_loc)
testobj_2_string = u'@'.join((testobj_2.VERSION_PREFIX, test_id_2_loc))
self.assertEqual(testobj_2._to_string(), testobj_2_string)
self.assertEqual(str(testobj_2), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.html_id(), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.version, test_id_2)
@ddt.data(
' mit.eecs',
'mit.eecs ',
CourseLocator.VERSION_PREFIX + '@mit.eecs',
BlockUsageLocator.BLOCK_PREFIX + '@black+mit.eecs',
'mit.ee cs',
'mit.ee,cs',
'mit.ee+cs',
'mit.ee&cs',
'mit.ee()cs',
CourseLocator.BRANCH_PREFIX + '@this',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX + '@that',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this ',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@th%is ',
u'\ufffd',
)
def test_course_constructor_bad_package_id(self, bad_id):
"""
Test all sorts of badly-formed package_ids (and urls with those package_ids)
"""
with self.assertRaises(InvalidKeyError):
CourseLocator(org=bad_id, course='test', run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course=bad_id, run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course='test', run=bad_id)
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(u'course-v1:test+{}+2014_T2'.format(bad_id))
@ddt.data(
'course-v1:',
'course-v1:/mit.eecs',
'http:mit.eecs',
'course-v1:mit+course+run{}@branch'.format(CourseLocator.BRANCH_PREFIX),
'course-v1:mit+course+run+',
)
def test_course_constructor_bad_url(self, bad_url):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(bad_url)
def test_course_constructor_url(self):
# Test parsing a url when it starts with a version ID and there is also a block ID.
# This hits the parsers parse_guid method.
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string("course-v1:{}@{}+{}@hw3".format(
CourseLocator.VERSION_PREFIX, test_id_loc, CourseLocator.BLOCK_PREFIX
))
self.check_course_locn_fields(
testobj,
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string(
'course-v1:mit.eecs+honors.6002x+2014_T2+{}@{}'.format(CourseLocator.VERSION_PREFIX, test_id_loc)
)
self.check_course_locn_fields(
testobj,
org='mit.eecs',
course='honors.6002x',
run='2014_T2',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_branch_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
org = 'mit.eecs'
course = '~6002x'
run = '2014_T2'
testobj = CourseKey.from_string('course-v1:{}+{}+{}+{}@draft-1+{}@{}'.format(
org, course, run, CourseLocator.BRANCH_PREFIX, CourseLocator.VERSION_PREFIX, test_id_loc
))
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch='draft-1',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_package_id_no_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
testurn = '{}+{}+{}'.format(org, course, run)
testobj = CourseLocator(org=org, course=course, run=run)
self.check_course_locn_fields(testobj, org=org, course=course, run=run)
# Allow access to _to_string
# pylint: disable=protected-access
self.assertEqual(testobj._to_string(), testurn)
def test_course_constructor_package_id_separate_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
test_branch = 'published'
expected_urn = '{}+{}+{}+{}@{}'.format(org, course, run, CourseLocator.BRANCH_PREFIX, test_branch)
testobj = CourseLocator(org=org, course=course, run=run, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
# pylint: disable=no-member,protected-access
self.assertEqual(testobj.branch, test_branch)
self.assertEqual(testobj._to_string(), expected_urn)
def test_course_constructor_deprecated_offering(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
offering = '{}/{}'.format(course, run)
test_branch = 'published'
with self.assertDeprecationWarning(count=2):
testobj = CourseLocator(org=org, offering=offering, branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering='', branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering=course, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
@ddt.data(
"i4x://org/course/category/name",
"i4x://org/course/category/name@revision"
)
def test_make_usage_key_from_deprecated_string_roundtrip(self, url):
course_key = CourseLocator('org', 'course', 'run')
with self.assertDeprecationWarning(count=2):
self.assertEquals(
url,
course_key.make_usage_key_from_deprecated_string(url).to_deprecated_string()
)
def test_empty_run(self):
with self.assertRaises(InvalidKeyError):
CourseLocator('org', 'course', '')
self.assertEquals(
'org/course/',
unicode(CourseLocator('org', 'course', '', deprecated=True))
)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/opaque_keys/edx/tests/test_course_locators.py
|
Python
|
agpl-3.0
| 10,010
|
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from datetime import datetime
from babel.dates import format_date
from odoo import api, models, fields, _
from odoo.exceptions import UserError
logger = logging.getLogger(__name__)
COMPASSION_QRR = "CH2430808007681434347"
class ContractGroup(models.Model):
_inherit = ["recurring.contract.group", "translatable.model"]
_name = "recurring.contract.group"
@api.multi
def get_months(self, months, sponsorships):
"""
Given the list of months to print,
returns the list of months grouped by the frequency payment
of the contract group and only containing unpaid sponsorships.
:param months: list of dates (date, datetime or string)
:param sponsorships: recordset of included sponsorships
:return: list of dates grouped in string format
"""
self.ensure_one()
freq = self.advance_billing_months
payment_mode = self.with_context(lang="en_US").payment_mode_id
# Take first open invoice or next_invoice_date
open_invoice = min([i for i in sponsorships.mapped("first_open_invoice") if i])
if open_invoice:
first_invoice_date = open_invoice.replace(day=1)
else:
raise UserError(_("No open invoice found !"))
for i, month in enumerate(months):
if isinstance(month, str):
months[i] = fields.Date.from_string(month)
if isinstance(month, datetime):
months[i] = month.date()
# check if first invoice is after last month
if first_invoice_date > months[-1]:
raise UserError(_(f"First invoice is after Date Stop"))
# Only keep unpaid months
valid_months = [
fields.Date.to_string(month) for month in months
if month >= first_invoice_date
]
if "Permanent" in payment_mode.name:
return valid_months[:1]
if freq == 1:
return valid_months
else:
# Group months
result = list()
count = 1
month_start = ""
for month in valid_months:
if not month_start:
month_start = month
if count < freq:
count += 1
else:
result.append(month_start + " - " + month)
month_start = ""
count = 1
if not result:
result.append(month_start + " - " + month)
return result
@api.multi
def get_communication(self, start, stop, sponsorships):
"""
Get the communication to print on the payment slip for sponsorship
:param start: the month start for which we print the payment slip (string)
:param stop: the month stop for which we print the payment slip (string)
:param sponsorships: recordset of sponsorships for which to print the
payment slips
:return: string of the communication
"""
self.ensure_one()
payment_mode = self.with_context(lang="en_US").payment_mode_id
amount = self.get_amount(start, stop, sponsorships)
valid = sponsorships
number_sponsorship = len(sponsorships)
date_start = fields.Date.to_date(start)
date_stop = fields.Date.to_date(stop)
vals = {
"amount": f"CHF {amount:.0f}",
"subject": _("for") + " ",
"date": "",
}
locale = self.partner_id.lang
context = {"lang": locale}
if start and stop:
start_date = format_date(date_start, format="MMMM yyyy", locale=locale)
stop_date = format_date(date_stop, format="MMMM yyyy", locale=locale)
if start == stop:
vals["date"] = start_date
else:
vals["date"] = f"{start_date} - {stop_date}"
if "Permanent" in payment_mode.name:
vals["payment_type"] = _("ISR for standing order")
vals["date"] = ""
else:
vals["payment_type"] = (
_("ISR") + " " + self.contract_ids[0].with_context(
context).group_freq
)
if number_sponsorship > 1:
vals["subject"] += str(number_sponsorship) + " " + _("sponsorships")
elif number_sponsorship and valid.child_id:
vals["subject"] = valid.child_id.preferred_name + " ({})".format(
valid.child_id.local_id
)
elif number_sponsorship and not valid.child_id and valid.display_name:
product_name = self.env["product.product"].search(
[("id", "in", valid.mapped("contract_line_ids.product_id").ids)]
)
vals["subject"] = ", ".join(product_name.mapped("thanks_name"))
return (
f"{vals['payment_type']} {vals['amount']}"
f"<br/>{vals['subject']}<br/>{vals['date']}"
)
@api.model
def get_company_qrr_account(self):
""" Utility to find the bvr account of the company. """
return self.env["res.partner.bank"].search([
('acc_number', '=', COMPASSION_QRR)])
def get_amount(self, start, stop, sponsorships):
self.ensure_one()
amount = sum(sponsorships.mapped("total_amount"))
months = int(stop.split("-")[1]) - int(start.split("-")[1]) + 1
payment_mode = self.with_context(lang="en_US").payment_mode_id
if "Permanent" in payment_mode.name:
months = self.advance_billing_months
return amount * months
|
eicher31/compassion-switzerland
|
report_compassion/models/contract_group.py
|
Python
|
agpl-3.0
| 5,996
|
from functools import wraps
from django.shortcuts import get_object_or_404, redirect
from django.http import Http404, HttpResponseForbidden, HttpResponseBadRequest
from autodidact.models import *
def needs_course(view):
@wraps(view)
def wrapper(request, course_slug, *args, **kwargs):
if isinstance(course_slug, Course):
course = course_slug
elif request.user.is_staff:
course = get_object_or_404(Course, slug=course_slug)
else:
course = get_object_or_404(Course, slug=course_slug, active=True)
return view(request, course, *args, **kwargs)
return wrapper
def needs_session(view):
@wraps(view)
def wrapper(request, course, session_nr, *args, **kwargs):
if not isinstance(course, Course):
raise TypeError('Course object required')
if isinstance(session_nr, Session):
session = session_nr
else:
session_nr = int(session_nr)
session = course.sessions.filter(number=session_nr).first()
if session is None:
raise Http404()
if not session.active and not request.user.is_staff:
raise Http404()
return view(request, course, session, *args, **kwargs)
return wrapper
def needs_assignment(view):
@wraps(view)
def wrapper(request, course, session, assignment_nr, *args, **kwargs):
if not isinstance(course, Course):
raise TypeError('Course object required')
if not isinstance(session, Session):
raise TypeError('Session object required')
if isinstance(assignment_nr, Assignment):
assignment = assignment_nr
else:
assignment_nr = int(assignment_nr)
assignment = session.assignments.filter(number=assignment_nr).first()
if assignment is None:
raise Http404()
if not assignment.active and not request.user.is_staff:
raise Http404()
if assignment.locked and not request.user.is_staff:
if not request.user.attends.all() & session.classes.all():
return HttpResponseForbidden('Permission Denied')
return view(request, course, session, assignment, *args, **kwargs)
return wrapper
def needs_step(view):
@wraps(view)
def wrapper(request, course, session, assignment, *args, **kwargs):
if not isinstance(course, Course):
raise TypeError('Course object required')
if not isinstance(session, Session):
raise TypeError('Session object required')
if not isinstance(assignment, Assignment):
raise TypeError('Assignment object required')
try:
step = assignment.steps.filter(number=request.GET.get('step')).first()
if step is None:
# Not sure if this is the right place, but let's
# ensure that an assignment has at least one step
if not assignment.steps.exists():
Step(assignment=assignment).save()
return redirect(assignment.steps.first())
except ValueError:
return HttpResponseBadRequest('Invalid step number')
step.fullscreen = 'fullscreen' in request.GET
step.completedstep = request.user.completed.filter(step=step).first()
step.given_values = step.completedstep.answer.split('\x1e') if step.completedstep else []
step.right_values = [a.value for a in step.right_answers.all()]
step.wrong_values = [a.value for a in step.wrong_answers.all()]
step.graded = bool(step.right_values) and step.answer_required
step.multiple_choice = bool(step.wrong_values)
step.multiple_answers = step.multiple_choice and len(step.right_values) > 1
step.please_try_again = False
return view(request, course, session, assignment, step, *args, **kwargs)
return wrapper
|
JaapJoris/autodidact
|
autodidact/views/decorators.py
|
Python
|
agpl-3.0
| 3,963
|
# -*- coding: utf-8 -*-
# ============================================================================ #
# SMB2_Header.py
#
# Copyright:
# Copyright (C) 2016 by Christopher R. Hertel
#
# $Id: SMB2_Header.py; 2019-06-18 17:56:20 -0500; crh$
#
# ---------------------------------------------------------------------------- #
#
# Description:
# Carnaval Toolkit: SMB2+ message header parsing and composition.
#
# ---------------------------------------------------------------------------- #
#
# License:
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# See Also:
# The 0.README file included with the distribution.
#
# ---------------------------------------------------------------------------- #
# This code was developed in participation with the
# Protocol Freedom Information Foundation.
# <www.protocolfreedom.org>
# ---------------------------------------------------------------------------- #
#
# Notes:
#
# - This module provides the basic tools used to compose and decompose
# SMB2/3 message headers. This module can be used by both client and
# server implementations.
#
# - The specific dialects considered by this module are:
# Common Name | Official Name | Dialect ID
# ============|===============|===========
# SMB2.0 | SMB 2.0.2 | 0x0202
# SMB2.1 | SMB 2.1 | 0x0210
# SMB3.0 | SMB 3.0 | 0x0300
# SMB3.02 | SMB 3.0.2 | 0x0302
# SMB3.11 | SMB 3.1.1 | 0x0311
#
# Others can be added as they are conjured up from the underworld.
#
# - The Python <int> type is "at least" 32 bits, but it's signed, so to
# be safe we use the <long> type to handle ULONG field values. That
# ensures that unsigned 32-bit field values are handled correctly.
# The <long> type can be used to store UINT32 and UINT64 values, as
# well as shorter integer types.
# See: https://docs.python.org/2/library/stdtypes.html#typesnumeric
#
# - This project, overall, is designed to protect against sending invalid
# field values. It also, to some extent, protects against invalid values
# in received messages. However, to make it easy to do protocol testing,
# these protections can be easily bypassed.
#
# References:
#
# [MS-SMB2] Microsoft Corporation, "Server Message Block (SMB)
# Protocol Versions 2 and 3",
# http://msdn.microsoft.com/en-us/library/cc246482.aspx
#
# ToDo:
# - Add more unit tests.
# - Add support for "related commands" (NextCommand chaining).
# - Add support for transform headers (\xfdSMB).
# - Extend the context information to include more connection-related
# data, including GUID, flags, etc.
# - Check the assert() calls in setters when decomposing a message header.
# We want consistent error handling, and asserts() can be compiled out.
# - Allow (and keep) invalid values where docs say "must ignore".
#
# FIX:
# - Use exceptions from SMB_Core.
#
# Moose:
#
# \_\_ _/_/
# \__/
# (oo)
# (..)
# --
#
# ============================================================================ #
#
"""Carnaval Toolkit: SMB2+ message header packing and parsing.
Common classes, functions, etc., for packing and unpacking SMB2+ Headers.
This module deals with structures common to both the client and server.
CONSTANTS:
Protocol constants:
SMB2_MSG_PROTOCOL : \\xFESMB; SMB2 message prefix (protocol ID).
4 bytes.
SMB2_HDR_SIZE : The fixed length of an SMB2+ message header
(64 bytes).
Supported SMB2+ dialect revision codes:
SMB2_DIALECT_202 : SMB 2.0.2 dialect revision (Vista, W2K8 Server)
SMB2_DIALECT_210 : SMB 2.1 dialect revision (Win7, W2K8r2 Server)
SMB2_DIALECT_300 : SMB 3.0 dialect revision (Win8, W2K12 Server)
SMB2_DIALECT_302 : SMB 3.0.2 dialect revision (Win8.1, W2K12r2 Server)
SMB2_DIALECT_311 : SMB 3.1.1 dialect revision (Win10, 2016 Server)
SMB2_DIALECT_LIST : A list of all supported dialects, ordered from
lowest to highest.
SMB2_DIALECT_MIN : The lowest supported dialect.
SMB2_DIALECT_MAX : The highest supported dialect.
SMB2+ command codes:
SMB2_COM_NEGOTIATE : Dialect and feature support negotiation.
SMB2_COM_SESSION_SETUP : Authentication and session establishment.
SMB2_COM_LOGOFF : Close a session; log out.
SMB2_COM_TREE_CONNECT : Connect to a remote share; mount.
SMB2_COM_TREE_DISCONNECT : Disconnect a connected share; umount.
SMB2_COM_CREATE : Create/open a filesystem object (file).
SMB2_COM_CLOSE : Close a previously opened handle.
SMB2_COM_FLUSH : Push data to disk (or thereabouts).
SMB2_COM_READ : Get some data.
SMB2_COM_WRITE : Put some data.
SMB2_COM_LOCK : Byte-range locks.
SMB2_COM_IOCTL : Do fiddly stuff.
SMB2_COM_CANCEL : Don't do whatever you're waiting to do.
SMB2_COM_ECHO : Ping!
SMB2_COM_QUERY_DIRECTORY : Find things in the Object Store.
SMB2_COM_CHANGE_NOTIFY : Let me know if something happens.
SMB2_COM_QUERY_INFO : Get some metadata.
SMB2_COM_SET_INFO : Put some metadata.
SMB2_COM_OPLOCK_BREAK : Server->client lease/oplock break.
SMB2+ header flags:
SMB2_FLAGS_SERVER_TO_REDIR : Response
SMB2_FLAGS_ASYNC_COMMAND : Async
SMB2_FLAGS_RELATED_OPERATIONS : Chained command
SMB2_FLAGS_SIGNED : Signed packet
SMB2_FLAGS_DFS_OPERATIONS : Distributed File System
SMB2_FLAGS_REPLAY_OPERATION : SMB3 Replay
SMB2_FLAGS_MASK : Flags Bitmask
"""
# Imports -------------------------------------------------------------------- #
#
import struct # Binary data handling.
from SMB_Status import * # Windows NT Status Codes.
from common.HexDump import hexstr # Convert binary data to readable output.
from common.HexDump import hexstrchop # Ditto, but with linewrap.
from common.HexDump import hexdump # Formatted hex dump à la hexdump(1).
# Constants ------------------------------------------------------------------ #
#
# Protocol constants
SMB2_MSG_PROTOCOL = '\xFESMB' # Standard SMB2 message prefix (protocol ID).
SMB2_HDR_SIZE = 64 # Fixed SMB2+ header size.
# Known SMB2+ dialect revision codes.
# An unknown or undefined dialect is indicated using <None>.
SMB2_DIALECT_202 = 0x0202 # SMB 2.0.2 dialect revision (Vista/W2K8 Server)
SMB2_DIALECT_210 = 0x0210 # SMB 2.1 dialect revision (Win7/W2K8r2 Server)
SMB2_DIALECT_300 = 0x0300 # SMB 3.0 dialect revision (Win8/W2K12 Server)
SMB2_DIALECT_302 = 0x0302 # SMB 3.0.2 dialect revision (Win8.1/W2K12r2 Server)
SMB2_DIALECT_311 = 0x0311 # SMB 3.1.1 dialect revision (Win10/W2K16 Server)
# List of supported dialects, in order from oldest to newest.
SMB2_DIALECT_LIST = [ SMB2_DIALECT_202,
SMB2_DIALECT_210,
SMB2_DIALECT_300,
SMB2_DIALECT_302,
SMB2_DIALECT_311 ]
SMB2_DIALECT_MIN = SMB2_DIALECT_LIST[0] # Oldest supported revision.
SMB2_DIALECT_MAX = SMB2_DIALECT_LIST[-1] # Newest supported revision.
# SMB2/3 command codes (there are, currently, 19 SMB2+ command codes).
SMB2_COM_NEGOTIATE = 0x0000 # 0
SMB2_COM_SESSION_SETUP = 0x0001 # 1
SMB2_COM_LOGOFF = 0x0002 # 2
SMB2_COM_TREE_CONNECT = 0x0003 # 3
SMB2_COM_TREE_DISCONNECT = 0x0004 # 4
SMB2_COM_CREATE = 0x0005 # 5
SMB2_COM_CLOSE = 0x0006 # 6
SMB2_COM_FLUSH = 0x0007 # 7
SMB2_COM_READ = 0x0008 # 8
SMB2_COM_WRITE = 0x0009 # 9
SMB2_COM_LOCK = 0x000A # 10
SMB2_COM_IOCTL = 0x000B # 11
SMB2_COM_CANCEL = 0x000C # 12
SMB2_COM_ECHO = 0x000D # 13
SMB2_COM_QUERY_DIRECTORY = 0x000E # 14
SMB2_COM_CHANGE_NOTIFY = 0x000F # 15
SMB2_COM_QUERY_INFO = 0x0010 # 16
SMB2_COM_SET_INFO = 0x0011 # 17
SMB2_COM_OPLOCK_BREAK = 0x0012 # 18
# SMB2/3 header flags
SMB2_FLAGS_SERVER_TO_REDIR = 0x00000001 # Response
SMB2_FLAGS_ASYNC_COMMAND = 0x00000002 # Async
SMB2_FLAGS_RELATED_OPERATIONS = 0x00000004 # ANDX
SMB2_FLAGS_SIGNED = 0x00000008 # Signed packet
SMB2_FLAGS_DFS_OPERATIONS = 0x10000000 # Distributed File System (DFS)
SMB2_FLAGS_REPLAY_OPERATION = 0x20000000 # SMB3 Replay
SMB2_FLAGS_PRIORITY_MASK = 0x00000070 # SMB311 priority bits
SMB2_FLAGS_MASK = 0x3000007F # Bitmask
# Max Size values
_UCHAR_MAX = 0xFF # Bitmask for Unsigned 8-bit (UCHAR) values.
_USHORT_MAX = 0xFFFF # Bitmask for Unsigned 16-bit (USHORT) values.
_ULONG_MAX = 0xFFFFFFFF # Bitmask for Unsigned 32-bit (ULONG) values.
_UINT64_MAX = (2**64) - 1 # Bitmask for Unsigned 64-bit (UINT64) values.
# Classes -------------------------------------------------------------------- #
#
class _SMB2_Header( object ):
# SMB2/SMB3 Message Header; [MS-SMB; 2.2.1].
#
# This class is used to format both Sync and Async SMB2 headers.
#
# Reminder: SMB2 and SMB3 are names for different sets of dialects of the
# same protocol; SMB3.0 was originally SMB2.2. Can you say
# "Marketing Upgrade"?
#
# Class values:
# Values instanciated once for the class (so that all instances can use them).
#
# These represent the four possible header formats defined for the
# supported SMB2 dialects. It's basically a 2x2 matrix.
#
# _format_SMB2_StatAsync - Async header, with <status> and <asyncId>.
# _format_SMB2_StatTreeId - Sync header, with <status> and <treeId>.
# _format_SMB2_cSeqAsync - Async header, with <channelSeq> and <asyncId>.
# _format_SMB2_cSeqTreeId - Sync header, with <channelSeq> and <treeId>.
#
# In general, Async headers are sent in server responses that are used to
# tell the client to wait for a pending operation to complete. That is,
# they are "hang on a bit" messages, telling the client not to time out.
#
# A client uses an async header when it is sending a CANCEL request for
# a command for which the server has already sent an Async response.
# That is:
# Command --> (sync)
# <-- Hang on a bit (async)
# Nevermind --> (async)
# <-- Command canceled (sync)
# The middle two are sent using Async headers.
#
# These two additional patterns are used for decoding header variants.
# _format_2H - Two unsigned 16-bit integers.
# _format_Q - One unsigned 64-bit integer.
#
# [MS-SMB2; 2.2.1] also mystically says that the Async header "MAY be used
# for any request", but doesn't explain when or why a client would do such
# a confusing thing.
#
# _cmd_LookupDict - A dictionary that maps command codes to strings.
# This is used for composing error messages, and when
# providing a header dump.
#
_format_SMB2_StatAsync = struct.Struct( '<4s H H L H H L L Q Q Q 16s' )
_format_SMB2_StatTreeId = struct.Struct( '<4s H H L H H L L Q L L Q 16s' )
_format_SMB2_cSeqAsync = struct.Struct( '<4s H H H H H H L L Q Q Q 16s' )
_format_SMB2_cSeqTreeId = struct.Struct( '<4s H H H H H H L L Q L L Q 16s' )
_format_2H = struct.Struct( "<H H" )
_format_Q = struct.Struct( "<Q" )
_cmd_LookupDict = \
{
SMB2_COM_NEGOTIATE : "NEGOTIATE",
SMB2_COM_SESSION_SETUP : "SESSION_SETUP",
SMB2_COM_LOGOFF : "LOGOFF",
SMB2_COM_TREE_CONNECT : "TREE_CONNECT",
SMB2_COM_TREE_DISCONNECT: "TREE_DISCONNECT",
SMB2_COM_CREATE : "CREATE",
SMB2_COM_CLOSE : "CLOSE",
SMB2_COM_FLUSH : "FLUSH",
SMB2_COM_READ : "READ",
SMB2_COM_WRITE : "WRITE",
SMB2_COM_LOCK : "LOCK",
SMB2_COM_IOCTL : "IOCTL",
SMB2_COM_CANCEL : "CANCEL",
SMB2_COM_ECHO : "ECHO",
SMB2_COM_QUERY_DIRECTORY: "QUERY_DIRECTORY",
SMB2_COM_CHANGE_NOTIFY : "CHANGE_NOTIFY",
SMB2_COM_QUERY_INFO : "QUERY_INFO",
SMB2_COM_SET_INFO : "SET_INFO",
SMB2_COM_OPLOCK_BREAK : "OPLOCK_BREAK"
}
# _SMB2_Header class methods:
#
@classmethod
def parseMsg( cls, msgBlob=None, dialect=SMB2_DIALECT_MIN ):
"""Decompose wire data and return an _SMB2_Header object.
Input:
cls - This class.
msgBlob - An array of at least 64 bytes, representing an SMB2+
message in wire format.
dialect - The minimum dialect under which to parse the header.
Output:
An <_SMB2_Header> object.
Errors:
AssertionError - Thrown if:
+ The length of <msgBlob> is less than the
minimum of 64 bytes.
+ The command code parsed from the message is
not a valid command code.
+ The given dialect is not known.
ValueError - Thrown if the packet cannot possibly contain a
valid SMB2+ message header. This exception is
raised if either the ProtocolId field doesn't
contain the correct string, or if the
StructureSize value is incorrect.
Notes:
- This function does not parse SMB3 Transform Headers. An SMB3
Transform header will be rejected with a ValueError.
- Beyond the basics of verifying that ProtocolId and StructureSize
are correct, this function does _no_ validation of the input.
"""
# Fundamental sanity check.
assert( SMB2_HDR_SIZE <= len( msgBlob ) ), "Incomplete message header."
# Parse it. Use the simple sync response format.
tup = cls._format_SMB2_StatTreeId.unpack( msgBlob[:SMB2_HDR_SIZE] )
# Look for trouble.
if( SMB2_MSG_PROTOCOL != tup[0] ):
raise ValueError( "Malformed SMB2 ProtocolId: [%s]." % repr( tup[0] ) )
elif( SMB2_HDR_SIZE != tup[1] ):
s = "The SMB2 Header StructureSize must be 64, not %d." % tup[1]
raise ValueError( s )
# Create and populate a header record instance.
hdr = cls( tup[4], dialect )
hdr._creditCharge = tup[2]
# 3: Status/ChannelSeq/Reserved1; see below
hdr.command = tup[4]
hdr._creditReqResp = tup[5]
hdr._flags = tup[6]
hdr._nextCommand = tup[7]
hdr._messageId = tup[8]
# 9, 10: Reserved2/TreeId/AsyncId; see below
hdr._sessionId = tup[11]
hdr._signature = tup[12]
# Handle the overloaded fields.
if( hdr.flagReply or (dialect < SMB2_DIALECT_300) ):
hdr._status = tup[3]
else:
hdr._channelSeq, hdr._reserved1 = cls._format_2H.unpack( msgBlob[8:12] )
if( hdr.flagAsync ):
hdr._asyncId = cls._format_Q.unpack( msgBlob[32:40] )
else:
hdr._reserved2 = tup[9]
hdr._treeId = tup[10]
# All done.
return( hdr )
@classmethod
def commandName( self, CmdId=0xFF ):
"""Given an SMB2 command code, return the name of the command.
Input:
CmdId - An SMB2/3 command code.
Output: A string.
If <CmdId> is a known SMB2/3 command code, the string
will be the command name. Otherwise, the empty string
is returned.
"""
if( CmdId in self._cmd_LookupDict ):
return( self._cmd_LookupDict[CmdId] )
return( '' )
def __init__( self, command=None, dialect=SMB2_DIALECT_MIN ):
# Create an SMB2 message header object.
#
# Input:
# command - The command code; one of the SMB2_COM_* values.
# dialect - The dialect version under which this header is being
# created. This is contextual information; in future
# revisions we may need to expand the context data to
# include things like negotiated flag settings, etc.
# Errors:
# AssertionError - Thrown if the given command code is not a
# known code, or if the given dialect is not
# in the list of supported dialects.
# [ TypeError, - Either of these may be thrown if an input value
# ValueError ] cannot be converted into the expected type.
#
# Notes:
# Several SMB2 Header fields are overloaded. For example, the
# <Status> field is a four byte field at offset 8.
# * In the 2.0 and 2.1 dialects, this field MUST be zero in
# Request messages.
# * In the 3.x dalects, in a request message only, the same
# bytes are used for a 2-byte <ChannelSequence> field,
# followed by a 2-byte Reserved-must-be-zero field.
# * In SMB2/3 Response messages, the field is always the 4-byte
# <Status> field.
#
# Similarly, in an Async header the 8 bytes at offset 32 are used
# for the <AsyncId>. In a Sync header, the first four bytes are
# Reserved-must-be-zero, and the next four bytes are the TreeID.
#
self._protocolId = SMB2_MSG_PROTOCOL # 4 bytes
self._headerSize = SMB2_HDR_SIZE # 2 bytes
self._creditCharge = 0 # 2 bytes
self._status = 0 # 4 bytes -- <status> --
self._channelSeq = 0 # 2 bytes \ Same bytes
self._reserved1 = 0 # 2 bytes / as <status>
self.command = command # 2 bytes
self._creditReqResp = 0 # 2 bytes
self._flags = 0 # 4 bytes
self._nextCommand = 0 # 4 bytes
self._messageId = 0 # 8 bytes
self._reserved2 = 0 # 4 bytes \ Same bytes
self._treeId = 0 # 4 bytes / as <asyncId>
self._asyncId = 0 # 8 bytes -- <asyncId> --
self._sessionId = 0 # 8 bytes
self._signature = (16 * '\0') # 16 bytes
# 64 bytes total.
# Context information:
#
assert( dialect in SMB2_DIALECT_LIST ), "Unknown Dialect: %0x04X" % dialect
self._dialect = int( dialect )
@property
def creditCharge( self ):
"""Get/set the SMB2_Header.CreditCharge field value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
- Thrown if the assigned value is non-zero and
the current dialect is SMBv2.0.2.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
It is out of character to throw an exception based on the given
dialect level. This layer does minimal enforcement of
per-dialect syntax rules, generally allowing the caller to make
their own mess. You can, of course, still bypass the assertion
by setting <instance>._creditCharge directly.
"""
return( self._creditCharge )
@creditCharge.setter
def creditCharge( self, cc ):
cc = int( cc )
assert( 0 <= cc <= _USHORT_MAX ), "Assigned value (%d) out of range." % cc
assert( (cc == 0) or (self._dialect > SMB2_DIALECT_202) ), \
"Reserved; Value must be zero in SMBv2.0.2."
self._creditCharge = cc
@property
def status( self ):
"""Get/set the SMB2_Header.status field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
Notes:
This field should only be set in response messages, and should
be considered "reserved; must be zero" in all requests.
Starting with SMBv3.0.0, this field is superceeded in request
messages by the 16-bit ChannelSequence field (plus an additional
16-bit Reserved field).
It is probably easiest to think of it this way:
- There is no <Status> field in request messages; it only exists
in response messages.
- If the dialect is less than 0x0300, then there is a 32-bit
"Reserved Must Be Zero" field where the <Status> field might
otherwise exist.
- If the dialect is 0x0300 or greater, then there is a 16-bit
<ChannelSequence> field followed by a 16-bit "Reserved Must Be
Zero" field where the <Status> might otherwise exist.
"""
return( self._status )
@status.setter
def status( self, st ):
st = 0L if( not st ) else long( st )
assert( 0 <= st <= _ULONG_MAX ), \
"Assigned value (0x%08X) out of range." % st
self._status = st
@property
def channelSeq( self ):
"""Get/set the Channel Sequence value (USHORT).
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
The ChannelSequence value is only recognized in request messages,
and only if the dialect is 0x0300 or greater. That is, this
field does not not exist in SMB2.x, only in SMB3.x. In all
responses, and in dialcts prior to 0x0300, the bytes of this
field are always seen as part of the Status field.
"""
return( self._channelSeq )
@channelSeq.setter
def channelSeq( self, cs ):
cs = int( cs )
assert( 0 <= cs <= _USHORT_MAX ), "Assigned value (%d) out of range." % cs
self._channelSeq = cs
@property
def command( self ):
"""Get/set the SMB2_Header.Command (UCHAR).
Errors: [ AssertionError, TypeError, ValueError ]
Thrown if the assigned value cannot be converted into a valid
SMB2 command code.
"""
return( self._command )
@command.setter
def command( self, cmd ):
cmd = int( cmd )
assert( 0 <= cmd <= 0x12 ), "Unknown command code: 0x%04X." % cmd
self._command = cmd
@property
def creditReqResp( self ):
"""Get/set the Credit Request / Credit Response value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
ToDo: Document how and when this is used; references.
The credit management subsystem needs study.
"""
return( self._creditReqResp )
@creditReqResp.setter
def creditReqResp( self, crr ):
crr = int( crr )
assert( 0 <= crr <= _USHORT_MAX ), \
"Assigned value (%d) out of range." % crr
self._creditReqResp = crr
@property
def flags( self ):
"""Get/set the Flags field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) has bits that are set which do not
represent a known SMB2+ flag.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._flags )
@flags.setter
def flags( self, flags ):
flgs = long( flags )
assert( flgs == (flgs & SMB2_FLAGS_MASK) ), "Unrecognized flag bit(s)."
self._flags = flgs
# Note: See below for per-flag get/set properties.
@property
def nextCommand( self ):
"""Get/set the Next Command offset value (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^32)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._nextCommand )
@nextCommand.setter
def nextCommand( self, nextOffset ):
nc = long( nextOffset )
assert( 0 <= nc <= _ULONG_MAX ), \
"Invalid Related Command Offset: %d." % nc
self._nextCommand = nc
@property
def messageId( self ):
"""Get/set the Message ID value (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._messageId )
@messageId.setter
def messageId( self, messageId ):
mi = long( messageId )
assert( 0 <= mi <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % mi
self._messageId = mi
@property
def treeId( self ):
"""Get/set the Tree Connect ID (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._treeId )
@treeId.setter
def treeId( self, treeId ):
tid = long( treeId )
assert( 0 <= tid <= _ULONG_MAX ), \
"Assigned value (%d) out of range." % tid
self._treeId = tid
@property
def asyncId( self ):
"""Get/set the Async Id (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._asyncId )
@asyncId.setter
def asyncId( selfd, asyncId ):
ai = long( asyncId )
assert( 0 <= ai <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % ai
self._asyncId = ai
@property
def sessionId( self ):
"""Get/set the Session Id (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._sessionId )
@sessionId.setter
def sessionId( self, sessionId ):
si = long( sessionId )
assert( 0 <= si <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % si
self._sessionId = si
@property
def signature( self ):
"""Get/set the packet signature.
Errors:
AssertionError - Thrown if the string representation of the
assigned value is not exactly 16 bytes.
SyntaxError - Thrown if the assigned value is not of type
<str> and cannot be converted to type <str>.
"""
return( self._signature )
@signature.setter
def signature( self, signature ):
sig = str( signature )
assert( 16 == len( sig ) ), "Exactly 16 bytes required."
self._signature = sig
# Flag bitfield properties.
# _flag[S|G]et() generically handles getting and setting of
# individual flag bits.
def _flagGet( self, flag ):
return( bool( flag & self._flags ) )
def _flagSet( self, flag, bitState ):
if( bitState ):
self._flags |= flag
else:
self._flags &= ~flag
@property
def flagReply( self ):
"""Get/set the SMB2_FLAGS_SERVER_TO_REDIR (Reply) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SERVER_TO_REDIR ) )
@flagReply.setter
def flagReply( self, bitState ):
self._flagSet( SMB2_FLAGS_SERVER_TO_REDIR, bitState )
@property
def flagAsync( self ):
"""Get/set the SMB2_FLAGS_ASYNC_COMMAND (Async) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_ASYNC_COMMAND ) )
@flagAsync.setter
def flagAsync( self, bitState ):
self._flagSet( SMB2_FLAGS_ASYNC_COMMAND, bitState )
@property
def flagNext( self ):
"""Get/set the SMB2_FLAGS_RELATED_OPERATIONS (Next) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_RELATED_OPERATIONS ) )
@flagNext.setter
def flagNext( self, bitState ):
self._flagSet( SMB2_FLAGS_RELATED_OPERATIONS, bitState )
@property
def flagSigned( self ):
"""Get/set the SMB2_FLAGS_SIGNED (Signed) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SIGNED ) )
@flagSigned.setter
def flagSigned( self, bitState ):
self._flagSet( SMB2_FLAGS_SIGNED, bitState )
@property
def flagDFS( self ):
"""Get/set the SMB2_FLAGS_DFS_OPERATIONS (DFS) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_DFS_OPERATIONS ) )
@flagDFS.setter
def flagDFS( self, bitState ):
self._flagSet( SMB2_FLAGS_DFS_OPERATIONS, bitState )
@property
def flagReplay( self ):
"""Get/set the SMB2_FLAGS_REPLAY_OPERATION (Replay) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_REPLAY_OPERATION ) )
@flagReplay.setter
def flagReplay( self, bitState ):
self._flagSet( SMB2_FLAGS_REPLAY_OPERATION, bitState )
@property
def flagPriority( self ):
"""Get/set the SMBv3.1.1+ Priority subfield.
This value is actually a 3-bit integer (in the range 0..7).
Errors:
ValueError - Thrown if the assigned value is outside of the
valid range.
"""
return( (self._flags & SMB2_FLAGS_PRIORITY_MASK) >> 4 )
@flagPriority.setter
def flagPriority( self, prioVal ):
if( prioVal not in range( 8 ) ):
raise ValueError( "Assigned value (%d) out of range." % prioVal )
self._flags &= ~SMB2_FLAGS_PRIORITY_MASK
self._flags |= (prioVal << 4)
def dump( self, indent=0 ):
# Produce a nicely formatted dump of the SMB2 header.
#
# Input:
# indent - Number of spaces to indent the formatted output.
#
# Output: A string, presentng the formatted SMB2 header fields.
#
# Notes: If the message is a request and the dialect is at least
# 0x0300, the ChannelSequence (and a Reserved field) will
# replace the Status field (which would otherwise go unused
# in a request). This is a protocol modification introduced
# with the 3.0 dialect.
#
ind = ' ' * indent
cmdName = self.commandName( self._command )
cmdName = "<unknown>" if( not cmdName ) else cmdName
statName = NTStatus( self._status )
statName = "\n" if( statName is None ) else " [%s]\n" % statName.name
# Stuff...
s = ind + "ProtocolId...: %s\n" % hexstr( self._protocolId[:4] )
s += ind + "StructureSize: 0x{0:04X} ({0:d})\n".format( self._headerSize )
s += ind + "CreditCharge.: 0x{0:04X} ({0:d})\n".format( self._creditCharge )
# Status/Reserved1
if( self.flagReply or self._dialect < SMB2_DIALECT_300 ):
s += ind + "Status.......: 0x{0:08X}".format( self._status ) + statName
else:
s += ind + "ChannelSeq...: 0x{0:04X} ({0:d})\n".format( self._channelSeq )
s += ind + "Reserved1....: 0x{0:04X} ({0:d})\n".format( self._reserved1 )
# More stuff...
s += ind + "Command......: 0x{0:02X} ({0:d})".format( self._command ) \
+ " [{0:s}]\n".format( self.commandName( self._command ) )
s += ind + "CreditReqResp: 0x{0:04X} ({0:d})\n".format( self.creditReqResp )
s += ind + "Flags........: 0x{0:08X} ({0:d})\n".format( self._flags )
# Flag subfields.
s += ind + " Response.....: %s\n" % self.flagReply
s += ind + " Async........: %s\n" % self.flagAsync
s += ind + " Related Op...: %s\n" % self.flagNext
s += ind + " Signed.......: %s\n" % self.flagSigned
if( self._dialect >= SMB2_DIALECT_311 ):
s += ind + " Priority.....: {0:d}\n".format( self.flagPriority )
s += ind + " DFS Operation: %s\n" % self.flagDFS
s += ind + " SMB3.x Replay: %s\n" % self.flagReplay
# Yet more stuff...
s += ind + "NextCommand..: 0x{0:08X} ({0:d})\n".format( self._nextCommand )
s += ind + "MessageId....: 0x{0:016X} ({0:d})\n".format( self._messageId )
# AsyncId/Reserved2+TreeId
if( self.flagAsync ):
s += ind + "AsyncId......: 0x{0:016X} ({0:d})\n".format( self._asyncId )
else:
s += ind + "Reserved2....: 0x{0:08X} ({0:d})\n".format( self._reserved2 )
s += ind + "TreeId.......: 0x{0:08X} ({0:d})\n".format( self._treeId )
# SessionId and Signature
s += ind + "SessionId....: 0x{0:016X} ({0:d})\n".format( self._sessionId )
s += ind + "Signature....: ["
tmp = (16 + indent)
s += ('\n' + (' ' * tmp)).join( hexstrchop( self._signature, 32 ) ) + "]\n"
return( s )
def compose( self ):
# Marshall the SMB2 header fields into a stream of bytes.
#
# Output: A string of bytes; the wire format of the SMB2 header.
#
# Notes: It's probably okay if the dialect version isn't
# specified. The default values of <channelSeq> and
# <reserved1> are zero, so the encoded format would be
# zero for either interpretation.
#
if( self.flagReply or (self._dialect < 0x0300) ):
# Bytes 8..11 are <status>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_StatAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_StatTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
else:
# Bytes 8..11 are <channelSeq>/<reserved1>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_cSeqAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_cSeqTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
return( msg )
# Unit Tests ----------------------------------------------------------------- #
#
def _unit_test():
# Module unit tests.
#
"""
Doctest:
>>> _unit_test()
Success
"""
if( __debug__ ):
# 1.Baseline test.
# Just verify that we can store and retrieve the basic attributes
# of an _SMB2_Header object.
#
hdr = _SMB2_Header( SMB2_COM_LOGOFF, SMB2_DIALECT_302 )
hdr.creditCharge = 213
hdr.channelSeq = 42607
hdr.creditReqResp = 42
hdr.flagReply = False
hdr.flagAsync = False
hdr.flagNext = False
hdr.flagSigned = False
hdr.flagPriority = 5
hdr.flagDFS = True
hdr.flagReplay = False
hdr.nextCommand = 0x87654321
hdr.messageId = _SMB2_Header._format_Q.unpack( "Fooberry" )[0]
hdr.treeId = 0xBEADED
hdr.sessionId = _SMB2_Header._format_Q.unpack( "Icecream" )[0]
hdr.signature = "Reginald".center( 16 )
# Create a header dump, compose a message, then parse the message.
dmp0 = hdr.dump()
msg = hdr.compose()
hdr = _SMB2_Header.parseMsg( msg, SMB2_DIALECT_302 )
# Dump the newly reparsed header, and compare against the original.
dmp1 = hdr.dump()
if( dmp0 != dmp1 ):
print "Failure: Reparsing a composed header resulted in differences."
print "As composed:\n", dmp0
print "As parsed:\n", dmp1
return
# 2.Add additional tests hereafter.
# Bottom line.
print "Success"
# ============================================================================ #
# Reginald fidgeted uneasily in his seat. "I realize", he said, pensively,
# "that I do have unusually large dorsal fins, for a carrot".
# ============================================================================ #
|
ubiqx-org/Carnaval
|
carnaval/smb/SMB2_Header.py
|
Python
|
agpl-3.0
| 40,520
|
'''
Created on Nov 10, 2014
@author: lauritz
'''
from mock import Mock
from fakelargefile.segmenttail import OverlapSearcher
def test_index_iter_stop():
os = OverlapSearcher("asdf")
segment = Mock()
segment.start = 11
try:
os.index_iter(segment, stop=10).next()
except ValueError:
assert True
else:
assert False
|
LauritzThaulow/fakelargefile
|
tests/test_segmenttail.py
|
Python
|
agpl-3.0
| 365
|
"""Charm Helpers saltstack - declare the state of your machines.
This helper enables you to declare your machine state, rather than
program it procedurally (and have to test each change to your procedures).
Your install hook can be as simple as:
{{{
from charmhelpers.contrib.saltstack import (
install_salt_support,
update_machine_state,
)
def install():
install_salt_support()
update_machine_state('machine_states/dependencies.yaml')
update_machine_state('machine_states/installed.yaml')
}}}
and won't need to change (nor will its tests) when you change the machine
state.
It's using a python package called salt-minion which allows various formats for
specifying resources, such as:
{{{
/srv/{{ basedir }}:
file.directory:
- group: ubunet
- user: ubunet
- require:
- user: ubunet
- recurse:
- user
- group
ubunet:
group.present:
- gid: 1500
user.present:
- uid: 1500
- gid: 1500
- createhome: False
- require:
- group: ubunet
}}}
The docs for all the different state definitions are at:
http://docs.saltstack.com/ref/states/all/
TODO:
* Add test helpers which will ensure that machine state definitions
are functionally (but not necessarily logically) correct (ie. getting
salt to parse all state defs.
* Add a link to a public bootstrap charm example / blogpost.
* Find a way to obviate the need to use the grains['charm_dir'] syntax
in templates.
"""
# Copyright 2013 Canonical Ltd.
#
# Authors:
# Charm Helpers Developers <juju@lists.ubuntu.com>
import subprocess
import charmhelpers.contrib.templating.contexts
import charmhelpers.core.host
import charmhelpers.core.hookenv
salt_grains_path = '/etc/salt/grains'
def install_salt_support(from_ppa=True):
"""Installs the salt-minion helper for machine state.
By default the salt-minion package is installed from
the saltstack PPA. If from_ppa is False you must ensure
that the salt-minion package is available in the apt cache.
"""
if from_ppa:
subprocess.check_call([
'/usr/bin/add-apt-repository',
'--yes',
'ppa:saltstack/salt',
])
subprocess.check_call(['/usr/bin/apt-get', 'update'])
# We install salt-common as salt-minion would run the salt-minion
# daemon.
charmhelpers.fetch.apt_install('salt-common')
def update_machine_state(state_path):
"""Update the machine state using the provided state declaration."""
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
salt_grains_path)
subprocess.check_call([
'salt-call',
'--local',
'state.template',
state_path,
])
|
Ubuntu-Solutions-Engineering/glance-simplestreams-sync-charm
|
hooks/charmhelpers/contrib/saltstack/__init__.py
|
Python
|
agpl-3.0
| 2,778
|
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import account_invoice
from . import account_invoice_refund
|
Gebesa-Dev/Addons-gebesa
|
account_invoice_refund_mode/models/__init__.py
|
Python
|
agpl-3.0
| 187
|
"""
Tests the crowdsourced hinter xmodule.
"""
from mock import Mock, MagicMock
import unittest
import copy
from xmodule.crowdsource_hinter import CrowdsourceHinterModule
from xmodule.vertical_module import VerticalModule, VerticalDescriptor
from xblock.field_data import DictFieldData
from xblock.fragment import Fragment
from xblock.core import XBlock
from . import get_test_system
import json
class CHModuleFactory(object):
"""
Helps us make a CrowdsourceHinterModule with the specified internal
state.
"""
sample_problem_xml = """
<?xml version="1.0"?>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown="A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value. The answer is correct if it is within a specified numerical tolerance of the expected answer. Enter the number of fingers on a human hand: = 5 [explanation] If you look at your hand, you can count that you have five fingers. [explanation] " rerandomize="never" showanswer="finished">
<p>A numerical input problem accepts a line of text input from the student, and evaluates the input for correctness based on its numerical value.</p>
<p>The answer is correct if it is within a specified numerical tolerance of the expected answer.</p>
<p>Enter the number of fingers on a human hand:</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
"""
num = 0
@staticmethod
def next_num():
"""
Helps make unique names for our mock CrowdsourceHinterModule's
"""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create(hints=None,
previous_answers=None,
user_submissions=None,
user_voted=None,
moderate=None,
mod_queue=None):
"""
A factory method for making CHM's
"""
# Should have a single child, but it doesn't matter what that child is
field_data = {'data': CHModuleFactory.sample_problem_xml, 'children': [None]}
if hints is not None:
field_data['hints'] = hints
else:
field_data['hints'] = {
'24.0': {'0': ['Best hint', 40],
'3': ['Another hint', 30],
'4': ['A third hint', 20],
'6': ['A less popular hint', 3]},
'25.0': {'1': ['Really popular hint', 100]}
}
if mod_queue is not None:
field_data['mod_queue'] = mod_queue
else:
field_data['mod_queue'] = {
'24.0': {'2': ['A non-approved hint']},
'26.0': {'5': ['Another non-approved hint']}
}
if previous_answers is not None:
field_data['previous_answers'] = previous_answers
else:
field_data['previous_answers'] = [
['24.0', [0, 3, 4]],
['29.0', []]
]
if user_submissions is not None:
field_data['user_submissions'] = user_submissions
else:
field_data['user_submissions'] = ['24.0', '29.0']
if user_voted is not None:
field_data['user_voted'] = user_voted
if moderate is not None:
field_data['moderate'] = moderate
descriptor = Mock(weight='1')
# Make the descriptor have a capa problem child.
capa_descriptor = MagicMock()
capa_descriptor.name = 'capa'
capa_descriptor.displayable_items.return_value = [capa_descriptor]
descriptor.get_children.return_value = [capa_descriptor]
# Make a fake capa module.
capa_module = MagicMock()
capa_module.lcp = MagicMock()
responder = MagicMock()
def validate_answer(answer):
""" A mock answer validator - simulates a numerical response"""
try:
float(answer)
return True
except ValueError:
return False
responder.validate_answer = validate_answer
def compare_answer(ans1, ans2):
""" A fake answer comparer """
return ans1 == ans2
responder.compare_answer = compare_answer
capa_module.lcp.responders = {'responder0': responder}
capa_module.displayable_items.return_value = [capa_module]
system = get_test_system()
# Make the system have a marginally-functional get_module
def fake_get_module(descriptor):
"""
A fake module-maker.
"""
return capa_module
system.get_module = fake_get_module
module = CrowdsourceHinterModule(descriptor, system, DictFieldData(field_data), Mock())
system.xmodule_instance = module
return module
class VerticalWithModulesFactory(object):
"""
Makes a vertical with several crowdsourced hinter modules inside.
Used to make sure that several crowdsourced hinter modules can co-exist
on one vertical.
"""
sample_problem_xml = """<?xml version="1.0"?>
<vertical display_name="Test vertical">
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
<crowdsource_hinter>
<problem display_name="Numerical Input" markdown=" " rerandomize="never" showanswer="finished">
<p>Another test numerical problem.</p>
<numericalresponse answer="5">
<formulaequationinput/>
</numericalresponse>
<solution>
<div class="detailed-solution">
<p>Explanation</p>
<p>If you look at your hand, you can count that you have five fingers. </p>
</div>
</solution>
</problem>
</crowdsource_hinter>
</vertical>
"""
num = 0
@staticmethod
def next_num():
"""Increments a global counter for naming."""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create():
"""Make a vertical."""
field_data = {'data': VerticalWithModulesFactory.sample_problem_xml}
system = get_test_system()
descriptor = VerticalDescriptor.from_xml(VerticalWithModulesFactory.sample_problem_xml, system)
module = VerticalModule(system, descriptor, field_data)
return module
class FakeChild(XBlock):
"""
A fake Xmodule.
"""
def __init__(self):
self.runtime = get_test_system()
self.student_view = Mock(return_value=Fragment(self.get_html()))
self.save = Mock()
self.id = 'i4x://this/is/a/fake/id'
def get_html(self):
"""
Return a fake html string.
"""
return u'This is supposed to be test html.'
class CrowdsourceHinterTest(unittest.TestCase):
"""
In the below tests, '24.0' represents a wrong answer, and '42.5' represents
a correct answer.
"""
def test_gethtml(self):
"""
A simple test of get_html - make sure it returns the html of the inner
problem.
"""
mock_module = CHModuleFactory.create()
def fake_get_display_items():
"""
A mock of get_display_items
"""
return [FakeChild()]
mock_module.get_display_items = fake_get_display_items
out_html = mock_module.render('student_view').content
self.assertTrue('This is supposed to be test html.' in out_html)
self.assertTrue('i4x://this/is/a/fake/id' in out_html)
def test_gethtml_nochild(self):
"""
get_html, except the module has no child :( Should return a polite
error message.
"""
mock_module = CHModuleFactory.create()
def fake_get_display_items():
"""
Returns no children.
"""
return []
mock_module.get_display_items = fake_get_display_items
out_html = mock_module.render('student_view').content
self.assertTrue('Error in loading crowdsourced hinter' in out_html)
@unittest.skip("Needs to be finished.")
def test_gethtml_multiple(self):
"""
Makes sure that multiple crowdsourced hinters play nice, when get_html
is called.
NOT WORKING RIGHT NOW
"""
mock_module = VerticalWithModulesFactory.create()
out_html = mock_module.render('student_view').content
self.assertTrue('Test numerical problem.' in out_html)
self.assertTrue('Another test numerical problem.' in out_html)
def test_numerical_answer_to_str(self):
"""
Tests the get request to string converter for numerical responses.
"""
mock_module = CHModuleFactory.create()
get = {'response1': '4'}
parsed = mock_module.numerical_answer_to_str(get)
self.assertTrue(parsed == '4')
def test_formula_answer_to_str(self):
"""
Tests the get request to string converter for formula responses.
"""
mock_module = CHModuleFactory.create()
get = {'response1': 'x*y^2'}
parsed = mock_module.formula_answer_to_str(get)
self.assertTrue(parsed == 'x*y^2')
def test_gethint_0hint(self):
"""
Someone asks for a hint, when there's no hint to give.
- Output should be blank.
- New entry should be added to previous_answers
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '26.0'}
out = mock_module.get_hint(json_in)
print mock_module.previous_answers
self.assertTrue(out is None)
self.assertTrue('26.0' in mock_module.user_submissions)
def test_gethint_unparsable(self):
"""
Someone submits an answer that is in the wrong format.
- The answer should not be added to previous_answers.
"""
mock_module = CHModuleFactory.create()
old_answers = copy.deepcopy(mock_module.previous_answers)
json_in = 'blah'
out = mock_module.get_hint(json_in)
self.assertTrue(out is None)
self.assertTrue(mock_module.previous_answers == old_answers)
def test_gethint_signature_error(self):
"""
Someone submits an answer that cannot be calculated as a float.
Nothing should change.
"""
mock_module = CHModuleFactory.create()
old_answers = copy.deepcopy(mock_module.previous_answers)
old_user_submissions = copy.deepcopy(mock_module.user_submissions)
json_in = {'problem1': 'fish'}
out = mock_module.get_hint(json_in)
self.assertTrue(out is None)
self.assertTrue(mock_module.previous_answers == old_answers)
self.assertTrue(mock_module.user_submissions == old_user_submissions)
def test_gethint_1hint(self):
"""
Someone asks for a hint, with exactly one hint in the database.
Output should contain that hint.
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '25.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('Really popular hint' in out['hints'])
# Also make sure that the input gets added to user_submissions,
# and that the hint is logged in previous_answers.
self.assertTrue('25.0' in mock_module.user_submissions)
self.assertTrue(['25.0', ['1']] in mock_module.previous_answers)
def test_gethint_manyhints(self):
"""
Someone asks for a hint, with many matching hints in the database.
- The top-rated hint should be returned.
- Two other random hints should be returned.
Currently, the best hint could be returned twice - need to fix this
in implementation.
"""
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '24.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('Best hint' in out['hints'])
self.assertTrue(len(out['hints']) == 3)
def test_getfeedback_0wronganswers(self):
"""
Someone has gotten the problem correct on the first try.
Output should be empty.
"""
mock_module = CHModuleFactory.create(previous_answers=[], user_submissions=[])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
print out
self.assertTrue(out is None)
def test_getfeedback_1wronganswer_nohints(self):
"""
Someone has gotten the problem correct, with one previous wrong
answer. However, we don't actually have hints for this problem.
There should be a dialog to submit a new hint.
"""
mock_module = CHModuleFactory.create(previous_answers=[['26.0', [None, None, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(out['answer_to_hints'] == {'26.0': {}})
def test_getfeedback_1wronganswer_withhints(self):
"""
Same as above, except the user did see hints. There should be
a voting dialog, with the correct choices, plus a hint submission
dialog.
"""
mock_module = CHModuleFactory.create(previous_answers=[['24.0', [0, 3, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(len(out['answer_to_hints']['24.0']) == 2)
def test_getfeedback_missingkey(self):
"""
Someone gets a problem correct, but one of the hints that he saw
earlier (pk=100) has been deleted. Should just skip that hint.
"""
mock_module = CHModuleFactory.create(
previous_answers=[['24.0', [0, 100, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
self.assertTrue(len(out['answer_to_hints']['24.0']) == 1)
def test_vote_nopermission(self):
"""
A user tries to vote for a hint, but he has already voted!
Should not change any vote tallies.
"""
mock_module = CHModuleFactory.create(user_voted=True)
json_in = {'answer': '24.0', 'hint': 1, 'pk_list': json.dumps([['24.0', 1], ['24.0', 3]])}
old_hints = copy.deepcopy(mock_module.hints)
mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints == old_hints)
def test_vote_withpermission(self):
"""
A user votes for a hint.
Also tests vote result rendering.
"""
mock_module = CHModuleFactory.create(
previous_answers=[['24.0', [0, 3, None]]])
json_in = {'answer': '24.0', 'hint': 3, 'pk_list': json.dumps([['24.0', 0], ['24.0', 3]])}
dict_out = mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints['24.0']['0'][1] == 40)
self.assertTrue(mock_module.hints['24.0']['3'][1] == 31)
self.assertTrue(['Best hint', 40] in dict_out['hint_and_votes'])
self.assertTrue(['Another hint', 31] in dict_out['hint_and_votes'])
def test_vote_unparsable(self):
"""
A user somehow votes for an unparsable answer.
Should return a friendly error.
(This is an unusual exception path - I don't know how it occurs,
except if you manually make a post request. But, it seems to happen
occasionally.)
"""
mock_module = CHModuleFactory.create()
# None means that the answer couldn't be parsed.
mock_module.answer_signature = lambda text: None
json_in = {'answer': 'fish', 'hint': 3, 'pk_list': '[]'}
dict_out = mock_module.tally_vote(json_in)
print dict_out
self.assertTrue(dict_out == {'error': 'Failure in voting!'})
def test_vote_nohint(self):
"""
A user somehow votes for a hint that doesn't exist.
Should return a friendly error.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}
dict_out = mock_module.tally_vote(json_in)
self.assertTrue(dict_out == {'error': 'Failure in voting!'})
def test_vote_badpklist(self):
"""
Some of the pk's specified in pk_list are invalid.
Should just skip those.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '24.0', 'hint': '0', 'pk_list': json.dumps([['24.0', 0], ['24.0', 12]])}
hint_and_votes = mock_module.tally_vote(json_in)['hint_and_votes']
self.assertTrue(['Best hint', 41] in hint_and_votes)
self.assertTrue(len(hint_and_votes) == 1)
def test_submithint_nopermission(self):
"""
A user tries to submit a hint, but he has already voted.
"""
mock_module = CHModuleFactory.create(user_voted=True)
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
print mock_module.user_voted
mock_module.submit_hint(json_in)
print mock_module.hints
self.assertTrue('29.0' not in mock_module.hints)
def test_submithint_withpermission_new(self):
"""
A user submits a hint to an answer for which no hints
exist yet.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' in mock_module.hints)
def test_submithint_withpermission_existing(self):
"""
A user submits a hint to an answer that has other hints
already.
"""
mock_module = CHModuleFactory.create(previous_answers=[['25.0', [1, None, None]]])
json_in = {'answer': '25.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
# Make a hint request.
json_in = {'problem name': '25.0'}
out = mock_module.get_hint(json_in)
self.assertTrue('This is a new hint.' in out['hints'])
def test_submithint_moderate(self):
"""
A user submits a hint, but moderation is on. The hint should
show up in the mod_queue, not the public-facing hints
dict.
"""
mock_module = CHModuleFactory.create(moderate='True')
json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' not in mock_module.hints)
self.assertTrue('29.0' in mock_module.mod_queue)
def test_submithint_escape(self):
"""
Make sure that hints are being html-escaped.
"""
mock_module = CHModuleFactory.create()
json_in = {'answer': '29.0', 'hint': '<script> alert("Trololo"); </script>'}
mock_module.submit_hint(json_in)
self.assertTrue(mock_module.hints['29.0']['0'][0] == u'<script> alert("Trololo"); </script>')
def test_submithint_unparsable(self):
mock_module = CHModuleFactory.create()
mock_module.answer_signature = lambda text: None
json_in = {'answer': 'fish', 'hint': 'A hint'}
dict_out = mock_module.submit_hint(json_in)
print dict_out
print mock_module.hints
self.assertTrue('error' in dict_out)
self.assertTrue(None not in mock_module.hints)
self.assertTrue('fish' not in mock_module.hints)
def test_template_gethint(self):
"""
Test the templates for get_hint.
"""
mock_module = CHModuleFactory.create()
def fake_get_hint(_):
"""
Creates a rendering dictionary, with which we can test
the templates.
"""
return {'best_hint': 'This is the best hint.',
'rand_hint_1': 'A random hint',
'rand_hint_2': 'Another random hint',
'answer': '42.5'}
mock_module.get_hint = fake_get_hint
json_in = {'problem_name': '42.5'}
out = json.loads(mock_module.handle_ajax('get_hint', json_in))['contents']
self.assertTrue('This is the best hint.' in out)
self.assertTrue('A random hint' in out)
self.assertTrue('Another random hint' in out)
def test_template_feedback(self):
"""
Test the templates for get_feedback.
NOT FINISHED
from lxml import etree
mock_module = CHModuleFactory.create()
def fake_get_feedback(get):
index_to_answer = {'0': '42.0', '1': '9000.01'}
index_to_hints = {'0': [('A hint for 42', 12),
('Another hint for 42', 14)],
'1': [('A hint for 9000.01', 32)]}
return {'index_to_hints': index_to_hints, 'index_to_answer': index_to_answer}
mock_module.get_feedback = fake_get_feedback
json_in = {'problem_name': '42.5'}
out = json.loads(mock_module.handle_ajax('get_feedback', json_in))['contents']
html_tree = etree.XML(out)
# To be continued...
"""
pass
|
TsinghuaX/edx-platform
|
common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py
|
Python
|
agpl-3.0
| 22,068
|
from django.urls import path
import users.views
urlpatterns = [
path("settings/", users.views.user_settings, name="settings"),
path("reset_token/", users.views.reset_token, name="reset_token"),
path("panel_hide/", users.views.panel_hide, name="hide_new_panel"),
]
|
UrLab/beta402
|
users/urls.py
|
Python
|
agpl-3.0
| 278
|
# function.py - views for evaluating SQL functions on SQLAlchemy models
#
# Copyright 2011 Lincoln de Sousa <lincoln@comum.org>.
# Copyright 2012, 2013, 2014, 2015, 2016 Jeffrey Finkelstein
# <jeffrey.finkelstein@gmail.com> and contributors.
#
# This file is part of Flask-Restless.
#
# Flask-Restless is distributed under both the GNU Affero General Public
# License version 3 and under the 3-clause BSD license. For more
# information, see LICENSE.AGPL and LICENSE.BSD.
"""Views for evaluating functions on a SQLAlchemy model.
The main class in this module, :class:`FunctionAPI`, is a
:class:`~flask.MethodView` subclass that creates endpoints for fetching
the result of evaluating a SQL function on a SQLAlchemy model.
"""
from flask import json
from flask import request
from sqlalchemy.exc import OperationalError
from .base import error_response
from .base import ModelView
from .helpers import evaluate_functions
class FunctionAPI(ModelView):
"""Provides method-based dispatching for :http:method:`get` requests which
wish to apply SQL functions to all instances of a model.
.. versionadded:: 0.4
"""
def get(self):
"""Returns the result of evaluating the SQL functions specified in the
body of the request.
For a description of the request and response formats, see
:ref:`functionevaluation`.
"""
if 'functions' not in request.args:
detail = 'Must provide `functions` query parameter'
return error_response(400, detail=detail)
functions = request.args.get('functions')
try:
data = json.loads(str(functions)) or []
except (TypeError, ValueError, OverflowError) as exception:
detail = 'Unable to decode JSON in `functions` query parameter'
return error_response(400, cause=exception, detail=detail)
try:
result = evaluate_functions(self.session, self.model, data)
except AttributeError as exception:
detail = 'No such field "{0}"'.format(exception.field)
return error_response(400, cause=exception, detail=detail)
except KeyError as exception:
detail = str(exception)
return error_response(400, cause=exception, detail=detail)
except OperationalError as exception:
detail = 'No such function "{0}"'.format(exception.function)
return error_response(400, cause=exception, detail=detail)
return dict(data=result)
|
jwg4/flask-restless
|
flask_restless/views/function.py
|
Python
|
agpl-3.0
| 2,512
|
#!/usr/bin/env python
# coding=utf-8
# lachesis automates the segmentation of a transcript into closed captions
#
# Copyright (C) 2016-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
TBW
"""
from __future__ import absolute_import
from __future__ import print_function
from lachesis.elements import Span
from lachesis.elements import Token
from lachesis.language import Language
from lachesis.nlpwrappers.base import BaseWrapper
from lachesis.nlpwrappers.upostags import UniversalPOSTags
class PatternWrapper(BaseWrapper):
"""
TBW
"""
CODE = u"pattern"
LANGUAGES = [
Language.DUTCH,
Language.ENGLISH,
Language.FRENCH,
Language.GERMAN,
Language.ITALIAN,
Language.SPANISH,
]
UPOSTAG_MAP = {
#
# English
#
u"NN": UniversalPOSTags.NOUN,
u"NN-LOC": UniversalPOSTags.PROPN,
u"NN-ORG": UniversalPOSTags.PROPN,
u"NN-PERS": UniversalPOSTags.PROPN,
u"VB": UniversalPOSTags.VERB,
u"JJ": UniversalPOSTags.ADJ,
u"RB": UniversalPOSTags.ADV,
u"PR": UniversalPOSTags.PRON,
u"DT": UniversalPOSTags.DET,
u"PP": UniversalPOSTags.ADP,
u"NO": UniversalPOSTags.NUM,
u"CJ": UniversalPOSTags.CCONJ,
u"UH": UniversalPOSTags.INTJ,
u"PT": UniversalPOSTags.PART,
u".": UniversalPOSTags.PUNCT,
u"X": UniversalPOSTags.X,
#
# Italian
u"J": UniversalPOSTags.ADJ,
}
def __init__(self, language):
super(PatternWrapper, self).__init__(language)
if self.language == Language.ENGLISH:
from pattern.en import parse as func_parse
from pattern.en import split as func_split
elif self.language == Language.ITALIAN:
from pattern.it import parse as func_parse
from pattern.it import split as func_split
elif self.language == Language.SPANISH:
from pattern.es import parse as func_parse
from pattern.es import split as func_split
elif self.language == Language.FRENCH:
from pattern.fr import parse as func_parse
from pattern.fr import split as func_split
elif self.language == Language.GERMAN:
from pattern.de import parse as func_parse
from pattern.de import split as func_split
elif self.language == Language.DUTCH:
from pattern.nl import parse as func_parse
from pattern.nl import split as func_split
else:
raise ValueError(u"No pattern submodule for the given language '%s'." % language)
self.func_parse = func_parse
self.func_split = func_split
#
# From the docs:
# http://www.clips.ua.ac.be/pages/pattern-en#parser
#
# The output of parse() is a subclass of unicode called TaggedString
# whose TaggedString.split() method by default yields a list of sentences,
# where each sentence is a list of tokens,
# where each token is a list of the word + its tags.
#
# parse(string,
# tokenize = True, # Split punctuation marks from words?
# tags = True, # Parse part-of-speech tags? (NN, JJ, ...)
# chunks = True, # Parse chunks? (NP, VP, PNP, ...)
# relations = False, # Parse chunk relations? (-SBJ, -OBJ, ...)
# lemmata = False, # Parse lemmata? (ate => eat)
# encoding = 'utf-8' # Input string encoding.
# tagset = None) # Penn Treebank II (default) or UNIVERSAL.
#
def _analyze(self, doc_string):
sentences = []
tagged_string = self.func_parse(
doc_string,
tokenize=True,
tags=True,
chunks=False,
relations=False,
lemmata=False,
tagset="universal"
)
for lib_sentence in self.func_split(tagged_string):
sentence_tokens = []
for lib_token in lib_sentence:
#
# NOTE: if chunks=True use:
# raw, upos_tag, chunk_tag, pnp_tag = lib_token.tags
# token = Token(
# raw=raw,
# upos_tag=self.UPOSTAG_MAP[upos_tag],
# chunk_tag=chunk_tag,
# pnp_tag=pnp_tag
# )
#
raw, upos_tag = lib_token.tags
# NOTE: pattern replaces "/" with "&slash;"
# so we need to convert it back
raw = raw.replace(u"&slash;", u"/")
token = self._create_token(raw, upos_tag)
sentence_tokens.append(token)
sentences.append(sentence_tokens)
return sentences
|
readbeyond/lachesis
|
lachesis/nlpwrappers/pattern.py
|
Python
|
agpl-3.0
| 5,498
|
# -*- coding: utf-8 -*-
# Copyright 2018 Lorenzo Battistini - Agile Business Group
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
{
"name": "Causali pagamento per ritenute d'acconto",
"version": "10.0.1.0.0",
"development_status": "Beta",
"category": "Hidden",
"website": "https://github.com/OCA/l10n-italy",
"author": "Agile Business Group, Odoo Community Association (OCA)",
"license": "LGPL-3",
"application": False,
"installable": True,
"depends": [
"l10n_it_withholding_tax",
"l10n_it_causali_pagamento",
],
"data": [
"views/withholding_tax.xml",
],
'auto_install': True,
}
|
linkitspa/l10n-italy
|
l10n_it_withholding_tax_causali/__manifest__.py
|
Python
|
agpl-3.0
| 677
|
# -*- coding: utf-8 -*-
#Copyright (C) 2011 Seán Hayes
#Python imports
import logging
import pdb
#Django imports
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models, IntegrityError
from django.db.models.signals import post_save
from django.utils import safestring
#App imports
from exceptions import TeamAlreadyExistsError, TeamNoLongerExistsError, TeamFullError, NotOnATeamError, TeamAlreadyHasALeaderError, NotOnSameTeamError, NotALeaderError
from managers import *
from swarm_war.core.models import FacebookRequest
from swarm_war.core.managers import FacebookRequestManager
logger = logging.getLogger(__name__)
# Create your models here.
MAX_TEAM_SIZE = 10
class Team(models.Model):
name = models.CharField(unique=True, max_length=100)
def get_leader(self):
leader = None
try:
#TODO: use filter to better tolerate bugs (e.g. more than one leader) that may creep up
leader = self.members.get(leader=True).user
except TeamProfile.DoesNotExist:
pass
except Exception as e:
logger.error(e)
return leader
def html(self):
s = u'<a href="%s">%s</a>' % (reverse('teams_view', args=[self.id]), self.name)
return safestring.mark_safe(s)
def __unicode__(self):
return self.name
class TeamProfile(models.Model):
user = models.OneToOneField(User)
team = models.ForeignKey(Team, null=True, blank=True, related_name="members")
leader = models.BooleanField(default=False)
#TODO: need a leave_team() method that cleans up teams with no members left
def become_leader(self):
if self.team is None:
raise NotOnATeamError(self.user)
elif self.team.get_leader() is not None:
raise TeamAlreadyHasALeaderError()
else:
self.leader = True
self.save()
def create_team(self, name):
try:
team = Team(name=name)
team.save()
except IntegrityError as e:
raise TeamAlreadyExistsError()
self.team = team
self.leader = True
self.save()
return team
def join_team(self, team):
count = team.members.count()
if count < MAX_TEAM_SIZE:
self.team = team
self.leader = False
self.save()
else:
raise TeamFullError()
def kick_out(self, user):
if self.team is None:
raise NotOnATeamError(self.user)
if not self.leader:
raise NotALeaderError()
user_tp = TeamProfile.objects.get(user=user)
if user_tp.team is None:
raise NotOnATeamError(user)
if user_tp.team.id is not self.team.id:
raise NotOnSameTeamError()
user_tp.leave_team()
def leave_team(self):
team = self.team
self.team = None
self.leader = False
self.save()
count = team.members.count()
if count == 0:
team.delete()
def __unicode__(self):
return u'%s: %s' % (self.__class__.__name__, self.user)
def create_profile(user):
"""
Called using a post_save trigger on User, so when a new User is added a Profile is created as well.
"""
#create a profile
profile = TeamProfile(user=user)
profile.save()
def user_save_handler(sender, instance, created, **kwargs):
if created:
create_profile(instance)
post_save.connect(user_save_handler, sender=User)
class TeamFacebookRequest(FacebookRequest):
#has to be nullable so that this doesn't get deleted when a related team gets deleted
team = models.ForeignKey(Team, null=True)
objects = FacebookRequestManager()
def html(self):
s = u'%s has invited you to join a Team: %s.' % (self.user.username, self.team.html)
return safestring.mark_safe(s)
def confirm(self, friend):
try:
if self.team is None:
raise TeamNoLongerExistsError()
if self.user.id not in [u.id for u in self.team.members.all()]:
raise Exception('Can\'t join %s because %s isn\'t a member anymore.' % (self.team.name, self.user.username))
friend.teamprofile.join_team(self.team)
finally:
super(TeamFacebookRequest, self).confirm(friend)
|
SeanHayes/swarm-war
|
swarm_war/teams/models.py
|
Python
|
agpl-3.0
| 3,862
|
# coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import render
from django.utils.translation import ugettext as _
@staff_member_required
def about_view(request):
"""
:param request: Django Request
:return: Django HttpResponse
:rtype: HttpResponse
"""
context = admin.site.each_context(request)
context.update({
'title': _('About'),
'version': "4.1",
})
template = 'admin/help/about.html'
return render(request, template, context)
|
82Flex/DCRM
|
WEIPDCRM/views/admin/help/about.py
|
Python
|
agpl-3.0
| 1,308
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.functional import cached_property
from django.views.generic import TemplateView
from assessments.calendar.scores_exam_submission_calendar import ScoresExamSubmissionCalendar
from base.utils.cache import CacheFilterMixin
from osis_role.contrib.views import PermissionRequiredMixin
class ScoreEncodingProgressOverviewBaseView(PermissionRequiredMixin, CacheFilterMixin, TemplateView):
# PermissionRequiredMixin
permission_required = "assessments.can_access_scoreencoding"
# CacheFilterMixin
timeout = 10800 # seconds = 3 hours
@cached_property
def person(self):
return self.request.user.person
def dispatch(self, request, *args, **kwargs):
opened_calendars = ScoresExamSubmissionCalendar().get_opened_academic_events()
if not opened_calendars:
redirect_url = reverse('outside_scores_encodings_period')
return HttpResponseRedirect(redirect_url)
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
return {
**super().get_context_data(**kwargs),
'person': self.person
}
def get_permission_object(self):
return None
|
uclouvain/osis
|
assessments/views/common/score_encoding_progress_overview.py
|
Python
|
agpl-3.0
| 2,568
|
"""A wait callback to allow psycopg2 cooperation with gevent.
Use `patch_psycopg()` to enable gevent support in Psycopg.
"""
# Copyright (C) 2010-2012 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# All rights reserved. See COPYING file for details.
from __future__ import absolute_import
import psycopg2
from psycopg2.extras import RealDictConnection
from psycopg2 import extensions
from gevent.coros import Semaphore
from gevent.local import local as gevent_local
from gevent.socket import wait_read, wait_write
def patch_psycopg():
"""Configure Psycopg to be used with gevent in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(gevent_wait_callback)
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
class ConnectionPool(object):
def __init__(self, dsn, max_con=10, max_idle=3,
connection_factory=RealDictConnection):
self.dsn = dsn
self.max_con = max_con
self.max_idle = max_idle
self.connection_factory = connection_factory
self._sem = Semaphore(max_con)
self._free = []
self._local = gevent_local()
def __enter__(self):
self._sem.acquire()
try:
if getattr(self._local, 'con', None) is not None:
raise RuntimeError("Attempting to re-enter connection pool?")
if self._free:
con = self._free.pop()
else:
con = psycopg2.connect(
dsn=self.dsn, connection_factory=self.connection_factory)
self._local.con = con
return con
except StandardError:
self._sem.release()
raise
def __exit__(self, exc_type, exc_value, traceback):
try:
if self._local.con is None:
raise RuntimeError("Exit connection pool with no connection?")
if exc_type is not None:
self.rollback()
else:
self.commit()
if len(self._free) < self.max_idle:
self._free.append(self._local.con)
self._local.con = None
finally:
self._sem.release()
def commit(self):
self._local.con.commit()
def rollback(self):
self._local.con.rollback()
|
funkring/fdoo
|
psycogreen/gevent.py
|
Python
|
agpl-3.0
| 2,927
|
# Copyright 2020 initOS GmbH
# Copyright 2012-2018 Therp BV <https://therp.nl>
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "LDAP groups assignment",
"version": "11.0.1.0.0",
"depends": ["auth_ldap"],
"author": "initOS GmbH, Therp BV, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/server-tools",
"license": "AGPL-3",
"summary": "Adds user accounts to groups based on rules defined "
"by the administrator.",
"category": "Authentication",
"data": [
'views/base_config_settings.xml',
'security/ir.model.access.csv',
],
"external_dependencies": {
'python': ['ldap'],
},
}
|
brain-tec/server-tools
|
users_ldap_groups/__manifest__.py
|
Python
|
agpl-3.0
| 704
|
from twisted.internet.defer import inlineCallbacks, fail, succeed
from globaleaks import models
from globaleaks.orm import transact
from globaleaks.tests import helpers
from globaleaks.jobs.delivery_sched import DeliverySchedule
from globaleaks.jobs.notification_sched import NotificationSchedule, MailGenerator
class TestNotificationSchedule(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def setUp(self):
yield helpers.TestGLWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@transact
def get_scheduled_email_count(self, store):
return store.find(models.Mail).count()
@inlineCallbacks
def test_notification_schedule_success(self):
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
yield DeliverySchedule().run()
notification_schedule = NotificationSchedule()
notification_schedule.skip_sleep = True
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
@inlineCallbacks
def test_notification_schedule_failure(self):
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
yield DeliverySchedule().run()
notification_schedule = NotificationSchedule()
notification_schedule.skip_sleep = True
def sendmail(x, y, z):
return fail(True)
notification_schedule.sendmail = sendmail
for i in range(0, 10):
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 40)
yield notification_schedule.run()
count = yield self.get_scheduled_email_count()
self.assertEqual(count, 0)
|
vodkina/GlobaLeaks
|
backend/globaleaks/tests/jobs/test_notification_sched.py
|
Python
|
agpl-3.0
| 1,811
|
from popit.models import Membership
def main():
memberships = Membership.objects.language("en").all()
for membership in memberships:
if not membership.organization:
if membership.post:
if membership.post.organization:
membership.organization = membership.post.organization
membership.save()
if __name__ == "__main__":
main()
|
Sinar/popit_ng
|
popit/utils/set_membership_org_from_post.py
|
Python
|
agpl-3.0
| 402
|
# ported from gnulib rev be7d73709d2b3bceb987f1be00a049bb7021bf87
#
# Copyright (C) 2014, Mark Laws.
# Copyright (C) 1999, 2002-2003, 2005-2007, 2009-2014 Free Software
# Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, see <http://www.gnu.org/licenses/>.
import ctypes
from flufl.enum import Enum
sizeof = ctypes.sizeof
Arg_type = Enum('Arg_type', [str(x.strip()) for x in '''
TYPE_NONE
TYPE_SCHAR
TYPE_UCHAR
TYPE_SHORT
TYPE_USHORT
TYPE_INT
TYPE_UINT
TYPE_LONGINT
TYPE_ULONGINT
TYPE_LONGLONGINT
TYPE_ULONGLONGINT
TYPE_DOUBLE
TYPE_LONGDOUBLE
TYPE_CHAR
TYPE_WIDE_CHAR
TYPE_STRING
TYPE_WIDE_STRING
TYPE_POINTER
TYPE_COUNT_SCHAR_POINTER
TYPE_COUNT_SHORT_POINTER
TYPE_COUNT_INT_POINTER
TYPE_COUNT_LONGINT_POINTER
TYPE_COUNT_LONGLONGINT_POINTER
'''.splitlines() if x != ''])
FLAG_GROUP = 1 # ' flag
FLAG_LEFT = 2 # - flag
FLAG_SHOWSIGN = 4 # + flag
FLAG_SPACE = 8 # space flag
FLAG_ALT = 16 # # flag
FLAG_ZERO = 32
# arg_index value indicating that no argument is consumed.
ARG_NONE = ~0
class Argument(object):
__slots__ = ['type', 'data']
class Arguments(object):
__slots__ = ['count', 'arg']
def __init__(self):
self.count = 0
self.arg = []
class Directive(object):
'''A parsed directive.'''
__slots__ = ['dir_start', 'dir_end', 'flags', 'width_start', 'width_end',
'width_arg_index', 'precision_start', 'precision_end',
'precision_arg_index', 'conversion', 'arg_index']
# conversion: d i o u x X f F e E g G a A c s p n U % but not C S
def __init__(self):
self.flags = 0
self.width_start = None
self.width_end = None
self.width_arg_index = ARG_NONE
self.precision_start = None
self.precision_end = None
self.precision_arg_index = ARG_NONE
self.arg_index = ARG_NONE
class Directives(object):
'''A parsed format string.'''
__slots__ = ['count', 'dir', 'max_width_length', 'max_precision_length']
def __init__(self):
self.count = 0
self.dir = []
def REGISTER_ARG(a, index, type):
n = index
while a.count <= n:
try:
a.arg[a.count]
except IndexError:
a.arg.append(Argument())
a.arg[a.count].type = Arg_type.TYPE_NONE
a.count += 1
if a.arg[n].type == Arg_type.TYPE_NONE:
a.arg[n].type = type
elif a.arg[n].type != type:
raise ValueError('ambiguous type for positional argument')
def conv_signed(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGLONGINT
else:
# If 'long long' exists and is the same as 'long', we parse "lld" into
# TYPE_LONGINT.
if flags >= 8:
type = Arg_type.TYPE_LONGINT
elif flags & 2:
type = Arg_type.TYPE_SCHAR
elif flags & 1:
type = Arg_type.TYPE_SHORT
else:
type = Arg_type.TYPE_INT
return c, type
def conv_unsigned(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_ULONGLONGINT
else:
# If 'unsigned long long' exists and is the same as 'unsigned long', we
# parse "llu" into TYPE_ULONGINT.
if flags >= 8:
type = Arg_type.TYPE_ULONGINT
elif flags & 2:
type = Arg_type.TYPE_UCHAR
elif flags & 1:
type = Arg_type.TYPE_USHORT
else:
type = Arg_type.TYPE_UINT
return c, type
def conv_float(c, flags):
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_LONGDOUBLE
else:
return c, Arg_type.TYPE_DOUBLE
def conv_char(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_CHAR
else:
return c, Arg_type.TYPE_CHAR
def conv_widechar(c, flags):
c = 'c'
return c, Arg_type.TYPE_WIDE_CHAR
def conv_string(c, flags):
if flags >= 8:
return c, Arg_type.TYPE_WIDE_STRING
else:
return c, Arg_type.TYPE_STRING
def conv_widestring(c, flags):
c = 's'
return c, Arg_type.TYPE_WIDE_STRING
def conv_pointer(c, flags):
return c, Arg_type.TYPE_POINTER
def conv_intpointer(c, flags):
# If 'long long' exists and is larger than 'long':
if flags >= 16 or flags & 4:
return c, Arg_type.TYPE_COUNT_LONGLONGINT_POINTER
else:
# If 'long long' exists and is the same as 'long', we parse "lln" into
# TYPE_COUNT_LONGINT_POINTER.
if flags >= 8:
type = Arg_type.TYPE_COUNT_LONGINT_POINTER
elif flags & 2:
type = Arg_type.TYPE_COUNT_SCHAR_POINTER
elif flags & 1:
type = Arg_type.TYPE_COUNT_SHORT_POINTER
else:
type = Arg_type.TYPE_COUNT_INT_POINTER
return c, type
def conv_none(c, flags):
return c, Arg_type.TYPE_NONE
_conv_char = {
'd': conv_signed,
'i': conv_signed,
'o': conv_unsigned,
'u': conv_unsigned,
'x': conv_unsigned,
'X': conv_unsigned,
'f': conv_float,
'F': conv_float,
'e': conv_float,
'E': conv_float,
'g': conv_float,
'G': conv_float,
'a': conv_float,
'A': conv_float,
'c': conv_char,
'C': conv_widechar,
's': conv_string,
'S': conv_widestring,
'p': conv_pointer,
'n': conv_intpointer,
'%': conv_none
}
def printf_parse(fmt):
'''Parses the format string. Fills in the number N of directives, and fills
in directives[0], ..., directives[N-1], and sets directives[N].dir_start to
the end of the format string. Also fills in the arg_type fields of the
arguments and the needed count of arguments.'''
cp = 0 # index into format string
arg_posn = 0 # number of regular arguments consumed
max_width_length = 0
max_precision_length = 0
d = Directives()
a = Arguments()
while True:
try:
c = fmt[cp]
except IndexError:
break
cp += 1
if c == '%':
arg_index = ARG_NONE
d.dir.append(Directive())
dp = d.dir[d.count]
dp.dir_start = cp - 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
arg_index = n - 1
cp = np + 1
# Read the flags.
while True:
if fmt[cp] == '\'':
dp.flags |= FLAG_GROUP
cp += 1
elif fmt[cp] == '-':
dp.flags |= FLAG_LEFT
cp += 1
elif fmt[cp] == '+':
dp.flags |= FLAG_SHOWSIGN
cp += 1
elif fmt[cp] == ' ':
dp.flags |= FLAG_SPACE
cp += 1
elif fmt[cp] == '#':
dp.flags |= FLAG_ALT
cp += 1
elif fmt[cp] == '0':
dp.flags |= FLAG_ZERO
cp += 1
else:
break
# Parse the field width.
if fmt[cp] == '*':
dp.width_start = cp
cp += 1
dp.width_end = cp
if max_width_length < 1:
max_width_length = 1
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.width_arg_index = n - 1
cp = np + 1
if dp.width_arg_index == ARG_NONE:
dp.width_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.width_arg_index, Arg_type.TYPE_INT)
elif fmt[cp].isdigit():
dp.width_start = cp
while fmt[cp].isdigit():
cp += 1
dp.width_end = cp
width_length = dp.width_end - dp.width_start
if max_width_length < width_length:
max_width_length = width_length
# Parse the precision.
if fmt[cp] == '.':
cp += 1
if fmt[cp] == '*':
dp.precision_start = cp - 1
cp += 1
dp.precision_end = cp
if max_precision_length < 2:
max_precision_length = 2
# Test for positional argument.
if fmt[cp].isdigit():
np = cp
while fmt[np].isdigit():
np += 1
if fmt[np] == '$':
n = 0
np = cp
while fmt[np].isdigit():
n = n * 10 + (ord(fmt[np]) - ord('0'))
np += 1
if n == 0:
raise ValueError('positional argument 0')
dp.precision_arg_index = n - 1
cp = np + 1
if dp.precision_arg_index == ARG_NONE:
dp.precision_arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.precision_arg_index, Arg_type.TYPE_INT)
else:
dp.precision_start = cp - 1
while fmt[cp].isdigit():
cp += 1
dp.precision_end = cp
precision_length = dp.precision_end - dp.precision_start
if max_precision_length < precision_length:
max_precision_length = precision_length
# Parse argument type/size specifiers.
flags = 0
while True:
if fmt[cp] == 'h':
flags |= (1 << (flags & 1))
cp += 1
elif fmt[cp] == 'L':
flags |= 4
cp += 1
elif fmt[cp] == 'l':
flags += 8
cp += 1
elif fmt[cp] == 'j':
raise ValueError("don't know how to handle intmax_t")
elif fmt[cp] == 'z':
if sizeof(ctypes.c_size_t) > sizeof(ctypes.c_long):
# size_t = long long
flags += 16
elif sizeof(ctypes.c_size_t) > sizeof(ctypes.c_int):
# size_t = long
flags += 8
cp += 1
elif fmt[cp] == 't':
raise ValueError("don't know how to handle ptrdiff_t")
else:
break
# Read the conversion character.
c = fmt[cp]
cp += 1
try:
c, type = _conv_char[c](c, flags)
except KeyError:
raise ValueError('bad conversion character: %%%s' % c)
if type != Arg_type.TYPE_NONE:
dp.arg_index = arg_index
if dp.arg_index == ARG_NONE:
dp.arg_index = arg_posn
arg_posn += 1
REGISTER_ARG(a, dp.arg_index, type)
dp.conversion = c
dp.dir_end = cp
d.count += 1
d.dir.append(Directive())
d.dir[d.count].dir_start = cp
d.max_width_length = max_width_length
d.max_precision_length = max_precision_length
return d, a
|
drvink/pyc-fmtstr-parser
|
pyc_fmtstr_parser/printf_parse.py
|
Python
|
lgpl-2.1
| 13,012
|
"""
Test connecting to a server.
"""
from gabbletest import exec_test
import constants as cs
def test(q, bus, conn, stream):
conn.Connect()
q.expect('dbus-signal', signal='StatusChanged', args=[cs.CONN_STATUS_CONNECTING, cs.CSR_REQUESTED])
q.expect('stream-authenticated')
q.expect('dbus-signal', signal='PresenceUpdate',
args=[{1L: (0L, {u'available': {}})}])
q.expect('dbus-signal', signal='StatusChanged', args=[cs.CONN_STATUS_CONNECTED, cs.CSR_REQUESTED])
if __name__ == '__main__':
exec_test(test, do_connect=False)
|
jku/telepathy-gabble
|
tests/twisted/connect/test-success.py
|
Python
|
lgpl-2.1
| 558
|
#!/usr/bin/python
# Urwid common display code
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from __future__ import division, print_function
import os
import sys
try:
import termios
except ImportError:
pass # windows
from urwid.util import StoppingContext, int_scale
from urwid import signals
from urwid.compat import B, bytes3, xrange, with_metaclass
# for replacing unprintable bytes with '?'
UNPRINTABLE_TRANS_TABLE = B("?") * 32 + bytes3(list(xrange(32,256)))
# signals sent by BaseScreen
UPDATE_PALETTE_ENTRY = "update palette entry"
INPUT_DESCRIPTORS_CHANGED = "input descriptors changed"
# AttrSpec internal values
_BASIC_START = 0 # first index of basic color aliases
_CUBE_START = 16 # first index of color cube
_CUBE_SIZE_256 = 6 # one side of the color cube
_GRAY_SIZE_256 = 24
_GRAY_START_256 = _CUBE_SIZE_256 ** 3 + _CUBE_START
_CUBE_WHITE_256 = _GRAY_START_256 -1
_CUBE_SIZE_88 = 4
_GRAY_SIZE_88 = 8
_GRAY_START_88 = _CUBE_SIZE_88 ** 3 + _CUBE_START
_CUBE_WHITE_88 = _GRAY_START_88 -1
_CUBE_BLACK = _CUBE_START
# values copied from xterm 256colres.h:
_CUBE_STEPS_256 = [0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff]
_GRAY_STEPS_256 = [0x08, 0x12, 0x1c, 0x26, 0x30, 0x3a, 0x44, 0x4e, 0x58, 0x62,
0x6c, 0x76, 0x80, 0x84, 0x94, 0x9e, 0xa8, 0xb2, 0xbc, 0xc6, 0xd0,
0xda, 0xe4, 0xee]
# values copied from xterm 88colres.h:
_CUBE_STEPS_88 = [0x00, 0x8b, 0xcd, 0xff]
_GRAY_STEPS_88 = [0x2e, 0x5c, 0x73, 0x8b, 0xa2, 0xb9, 0xd0, 0xe7]
# values copied from X11/rgb.txt and XTerm-col.ad:
_BASIC_COLOR_VALUES = [(0,0,0), (205, 0, 0), (0, 205, 0), (205, 205, 0),
(0, 0, 238), (205, 0, 205), (0, 205, 205), (229, 229, 229),
(127, 127, 127), (255, 0, 0), (0, 255, 0), (255, 255, 0),
(0x5c, 0x5c, 0xff), (255, 0, 255), (0, 255, 255), (255, 255, 255)]
_COLOR_VALUES_256 = (_BASIC_COLOR_VALUES +
[(r, g, b) for r in _CUBE_STEPS_256 for g in _CUBE_STEPS_256
for b in _CUBE_STEPS_256] +
[(gr, gr, gr) for gr in _GRAY_STEPS_256])
_COLOR_VALUES_88 = (_BASIC_COLOR_VALUES +
[(r, g, b) for r in _CUBE_STEPS_88 for g in _CUBE_STEPS_88
for b in _CUBE_STEPS_88] +
[(gr, gr, gr) for gr in _GRAY_STEPS_88])
assert len(_COLOR_VALUES_256) == 256
assert len(_COLOR_VALUES_88) == 88
_FG_COLOR_MASK = 0x000000ff
_BG_COLOR_MASK = 0x0000ff00
_FG_BASIC_COLOR = 0x00010000
_FG_HIGH_COLOR = 0x00020000
_BG_BASIC_COLOR = 0x00040000
_BG_HIGH_COLOR = 0x00080000
_BG_SHIFT = 8
_HIGH_88_COLOR = 0x00100000
_STANDOUT = 0x02000000
_UNDERLINE = 0x04000000
_BOLD = 0x08000000
_BLINK = 0x10000000
_ITALICS = 0x20000000
_STRIKETHROUGH = 0x40000000
_FG_MASK = (_FG_COLOR_MASK | _FG_BASIC_COLOR | _FG_HIGH_COLOR |
_STANDOUT | _UNDERLINE | _BLINK | _BOLD | _ITALICS | _STRIKETHROUGH)
_BG_MASK = _BG_COLOR_MASK | _BG_BASIC_COLOR | _BG_HIGH_COLOR
DEFAULT = 'default'
BLACK = 'black'
DARK_RED = 'dark red'
DARK_GREEN = 'dark green'
BROWN = 'brown'
DARK_BLUE = 'dark blue'
DARK_MAGENTA = 'dark magenta'
DARK_CYAN = 'dark cyan'
LIGHT_GRAY = 'light gray'
DARK_GRAY = 'dark gray'
LIGHT_RED = 'light red'
LIGHT_GREEN = 'light green'
YELLOW = 'yellow'
LIGHT_BLUE = 'light blue'
LIGHT_MAGENTA = 'light magenta'
LIGHT_CYAN = 'light cyan'
WHITE = 'white'
_BASIC_COLORS = [
BLACK,
DARK_RED,
DARK_GREEN,
BROWN,
DARK_BLUE,
DARK_MAGENTA,
DARK_CYAN,
LIGHT_GRAY,
DARK_GRAY,
LIGHT_RED,
LIGHT_GREEN,
YELLOW,
LIGHT_BLUE,
LIGHT_MAGENTA,
LIGHT_CYAN,
WHITE,
]
_ATTRIBUTES = {
'bold': _BOLD,
'italics': _ITALICS,
'underline': _UNDERLINE,
'blink': _BLINK,
'standout': _STANDOUT,
'strikethrough': _STRIKETHROUGH,
}
def _value_lookup_table(values, size):
"""
Generate a lookup table for finding the closest item in values.
Lookup returns (index into values)+1
values -- list of values in ascending order, all < size
size -- size of lookup table and maximum value
>>> _value_lookup_table([0, 7, 9], 10)
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2]
"""
middle_values = [0] + [(values[i] + values[i + 1] + 1) // 2
for i in range(len(values) - 1)] + [size]
lookup_table = []
for i in range(len(middle_values)-1):
count = middle_values[i + 1] - middle_values[i]
lookup_table.extend([i] * count)
return lookup_table
_CUBE_256_LOOKUP = _value_lookup_table(_CUBE_STEPS_256, 256)
_GRAY_256_LOOKUP = _value_lookup_table([0] + _GRAY_STEPS_256 + [0xff], 256)
_CUBE_88_LOOKUP = _value_lookup_table(_CUBE_STEPS_88, 256)
_GRAY_88_LOOKUP = _value_lookup_table([0] + _GRAY_STEPS_88 + [0xff], 256)
# convert steps to values that will be used by string versions of the colors
# 1 hex digit for rgb and 0..100 for grayscale
_CUBE_STEPS_256_16 = [int_scale(n, 0x100, 0x10) for n in _CUBE_STEPS_256]
_GRAY_STEPS_256_101 = [int_scale(n, 0x100, 101) for n in _GRAY_STEPS_256]
_CUBE_STEPS_88_16 = [int_scale(n, 0x100, 0x10) for n in _CUBE_STEPS_88]
_GRAY_STEPS_88_101 = [int_scale(n, 0x100, 101) for n in _GRAY_STEPS_88]
# create lookup tables for 1 hex digit rgb and 0..100 for grayscale values
_CUBE_256_LOOKUP_16 = [_CUBE_256_LOOKUP[int_scale(n, 16, 0x100)]
for n in range(16)]
_GRAY_256_LOOKUP_101 = [_GRAY_256_LOOKUP[int_scale(n, 101, 0x100)]
for n in range(101)]
_CUBE_88_LOOKUP_16 = [_CUBE_88_LOOKUP[int_scale(n, 16, 0x100)]
for n in range(16)]
_GRAY_88_LOOKUP_101 = [_GRAY_88_LOOKUP[int_scale(n, 101, 0x100)]
for n in range(101)]
# The functions _gray_num_256() and _gray_num_88() do not include the gray
# values from the color cube so that the gray steps are an even width.
# The color cube grays are available by using the rgb functions. Pure
# white and black are taken from the color cube, since the gray range does
# not include them, and the basic colors are more likely to have been
# customized by an end-user.
def _gray_num_256(gnum):
"""Return ths color number for gray number gnum.
Color cube black and white are returned for 0 and 25 respectively
since those values aren't included in the gray scale.
"""
# grays start from index 1
gnum -= 1
if gnum < 0:
return _CUBE_BLACK
if gnum >= _GRAY_SIZE_256:
return _CUBE_WHITE_256
return _GRAY_START_256 + gnum
def _gray_num_88(gnum):
"""Return ths color number for gray number gnum.
Color cube black and white are returned for 0 and 9 respectively
since those values aren't included in the gray scale.
"""
# gnums start from index 1
gnum -= 1
if gnum < 0:
return _CUBE_BLACK
if gnum >= _GRAY_SIZE_88:
return _CUBE_WHITE_88
return _GRAY_START_88 + gnum
def _color_desc_256(num):
"""
Return a string description of color number num.
0..15 -> 'h0'..'h15' basic colors (as high-colors)
16..231 -> '#000'..'#fff' color cube colors
232..255 -> 'g3'..'g93' grays
>>> _color_desc_256(15)
'h15'
>>> _color_desc_256(16)
'#000'
>>> _color_desc_256(17)
'#006'
>>> _color_desc_256(230)
'#ffd'
>>> _color_desc_256(233)
'g7'
>>> _color_desc_256(234)
'g11'
"""
assert num >= 0 and num < 256, num
if num < _CUBE_START:
return 'h%d' % num
if num < _GRAY_START_256:
num -= _CUBE_START
b, num = num % _CUBE_SIZE_256, num // _CUBE_SIZE_256
g, num = num % _CUBE_SIZE_256, num // _CUBE_SIZE_256
r = num % _CUBE_SIZE_256
return '#%x%x%x' % (_CUBE_STEPS_256_16[r], _CUBE_STEPS_256_16[g],
_CUBE_STEPS_256_16[b])
return 'g%d' % _GRAY_STEPS_256_101[num - _GRAY_START_256]
def _color_desc_88(num):
"""
Return a string description of color number num.
0..15 -> 'h0'..'h15' basic colors (as high-colors)
16..79 -> '#000'..'#fff' color cube colors
80..87 -> 'g18'..'g90' grays
>>> _color_desc_88(15)
'h15'
>>> _color_desc_88(16)
'#000'
>>> _color_desc_88(17)
'#008'
>>> _color_desc_88(78)
'#ffc'
>>> _color_desc_88(81)
'g36'
>>> _color_desc_88(82)
'g45'
"""
assert num > 0 and num < 88
if num < _CUBE_START:
return 'h%d' % num
if num < _GRAY_START_88:
num -= _CUBE_START
b, num = num % _CUBE_SIZE_88, num // _CUBE_SIZE_88
g, r= num % _CUBE_SIZE_88, num // _CUBE_SIZE_88
return '#%x%x%x' % (_CUBE_STEPS_88_16[r], _CUBE_STEPS_88_16[g],
_CUBE_STEPS_88_16[b])
return 'g%d' % _GRAY_STEPS_88_101[num - _GRAY_START_88]
def _parse_color_256(desc):
"""
Return a color number for the description desc.
'h0'..'h255' -> 0..255 actual color number
'#000'..'#fff' -> 16..231 color cube colors
'g0'..'g100' -> 16, 232..255, 231 grays and color cube black/white
'g#00'..'g#ff' -> 16, 232...255, 231 gray and color cube black/white
Returns None if desc is invalid.
>>> _parse_color_256('h142')
142
>>> _parse_color_256('#f00')
196
>>> _parse_color_256('g100')
231
>>> _parse_color_256('g#80')
244
"""
if len(desc) > 4:
# keep the length within reason before parsing
return None
try:
if desc.startswith('h'):
# high-color number
num = int(desc[1:], 10)
if num < 0 or num > 255:
return None
return num
if desc.startswith('#') and len(desc) == 4:
# color-cube coordinates
rgb = int(desc[1:], 16)
if rgb < 0:
return None
b, rgb = rgb % 16, rgb // 16
g, r = rgb % 16, rgb // 16
# find the closest rgb values
r = _CUBE_256_LOOKUP_16[r]
g = _CUBE_256_LOOKUP_16[g]
b = _CUBE_256_LOOKUP_16[b]
return _CUBE_START + (r * _CUBE_SIZE_256 + g) * _CUBE_SIZE_256 + b
# Only remaining possibility is gray value
if desc.startswith('g#'):
# hex value 00..ff
gray = int(desc[2:], 16)
if gray < 0 or gray > 255:
return None
gray = _GRAY_256_LOOKUP[gray]
elif desc.startswith('g'):
# decimal value 0..100
gray = int(desc[1:], 10)
if gray < 0 or gray > 100:
return None
gray = _GRAY_256_LOOKUP_101[gray]
else:
return None
if gray == 0:
return _CUBE_BLACK
gray -= 1
if gray == _GRAY_SIZE_256:
return _CUBE_WHITE_256
return _GRAY_START_256 + gray
except ValueError:
return None
def _parse_color_88(desc):
"""
Return a color number for the description desc.
'h0'..'h87' -> 0..87 actual color number
'#000'..'#fff' -> 16..79 color cube colors
'g0'..'g100' -> 16, 80..87, 79 grays and color cube black/white
'g#00'..'g#ff' -> 16, 80...87, 79 gray and color cube black/white
Returns None if desc is invalid.
>>> _parse_color_88('h142')
>>> _parse_color_88('h42')
42
>>> _parse_color_88('#f00')
64
>>> _parse_color_88('g100')
79
>>> _parse_color_88('g#80')
83
"""
if len(desc) > 4:
# keep the length within reason before parsing
return None
try:
if desc.startswith('h'):
# high-color number
num = int(desc[1:], 10)
if num < 0 or num > 87:
return None
return num
if desc.startswith('#') and len(desc) == 4:
# color-cube coordinates
rgb = int(desc[1:], 16)
if rgb < 0:
return None
b, rgb = rgb % 16, rgb // 16
g, r = rgb % 16, rgb // 16
# find the closest rgb values
r = _CUBE_88_LOOKUP_16[r]
g = _CUBE_88_LOOKUP_16[g]
b = _CUBE_88_LOOKUP_16[b]
return _CUBE_START + (r * _CUBE_SIZE_88 + g) * _CUBE_SIZE_88 + b
# Only remaining possibility is gray value
if desc.startswith('g#'):
# hex value 00..ff
gray = int(desc[2:], 16)
if gray < 0 or gray > 255:
return None
gray = _GRAY_88_LOOKUP[gray]
elif desc.startswith('g'):
# decimal value 0..100
gray = int(desc[1:], 10)
if gray < 0 or gray > 100:
return None
gray = _GRAY_88_LOOKUP_101[gray]
else:
return None
if gray == 0:
return _CUBE_BLACK
gray -= 1
if gray == _GRAY_SIZE_88:
return _CUBE_WHITE_88
return _GRAY_START_88 + gray
except ValueError:
return None
class AttrSpecError(Exception):
pass
class AttrSpec(object):
def __init__(self, fg, bg, colors=256):
"""
fg -- a string containing a comma-separated foreground color
and settings
Color values:
'default' (use the terminal's default foreground),
'black', 'dark red', 'dark green', 'brown', 'dark blue',
'dark magenta', 'dark cyan', 'light gray', 'dark gray',
'light red', 'light green', 'yellow', 'light blue',
'light magenta', 'light cyan', 'white'
High-color example values:
'#009' (0% red, 0% green, 60% red, like HTML colors)
'#fcc' (100% red, 80% green, 80% blue)
'g40' (40% gray, decimal), 'g#cc' (80% gray, hex),
'#000', 'g0', 'g#00' (black),
'#fff', 'g100', 'g#ff' (white)
'h8' (color number 8), 'h255' (color number 255)
Setting:
'bold', 'italics', 'underline', 'blink', 'standout',
'strikethrough'
Some terminals use 'bold' for bright colors. Most terminals
ignore the 'blink' setting. If the color is not given then
'default' will be assumed.
bg -- a string containing the background color
Color values:
'default' (use the terminal's default background),
'black', 'dark red', 'dark green', 'brown', 'dark blue',
'dark magenta', 'dark cyan', 'light gray'
High-color exaples:
see fg examples above
An empty string will be treated the same as 'default'.
colors -- the maximum colors available for the specification
Valid values include: 1, 16, 88 and 256. High-color
values are only usable with 88 or 256 colors. With
1 color only the foreground settings may be used.
>>> AttrSpec('dark red', 'light gray', 16)
AttrSpec('dark red', 'light gray')
>>> AttrSpec('yellow, underline, bold', 'dark blue')
AttrSpec('yellow,bold,underline', 'dark blue')
>>> AttrSpec('#ddb', '#004', 256) # closest colors will be found
AttrSpec('#dda', '#006')
>>> AttrSpec('#ddb', '#004', 88)
AttrSpec('#ccc', '#000', colors=88)
"""
if colors not in (1, 16, 88, 256):
raise AttrSpecError('invalid number of colors (%d).' % colors)
self._value = 0 | _HIGH_88_COLOR * (colors == 88)
self.foreground = fg
self.background = bg
if self.colors > colors:
raise AttrSpecError(('foreground/background (%s/%s) require ' +
'more colors than have been specified (%d).') %
(repr(fg), repr(bg), colors))
foreground_basic = property(lambda s: s._value & _FG_BASIC_COLOR != 0)
foreground_high = property(lambda s: s._value & _FG_HIGH_COLOR != 0)
foreground_number = property(lambda s: s._value & _FG_COLOR_MASK)
background_basic = property(lambda s: s._value & _BG_BASIC_COLOR != 0)
background_high = property(lambda s: s._value & _BG_HIGH_COLOR != 0)
background_number = property(lambda s: (s._value & _BG_COLOR_MASK)
>> _BG_SHIFT)
italics = property(lambda s: s._value & _ITALICS != 0)
bold = property(lambda s: s._value & _BOLD != 0)
underline = property(lambda s: s._value & _UNDERLINE != 0)
blink = property(lambda s: s._value & _BLINK != 0)
standout = property(lambda s: s._value & _STANDOUT != 0)
strikethrough = property(lambda s: s._value & _STRIKETHROUGH != 0)
def _colors(self):
"""
Return the maximum colors required for this object.
Returns 256, 88, 16 or 1.
"""
if self._value & _HIGH_88_COLOR:
return 88
if self._value & (_BG_HIGH_COLOR | _FG_HIGH_COLOR):
return 256
if self._value & (_BG_BASIC_COLOR | _BG_BASIC_COLOR):
return 16
return 1
colors = property(_colors)
def __repr__(self):
"""
Return an executable python representation of the AttrSpec
object.
"""
args = "%r, %r" % (self.foreground, self.background)
if self.colors == 88:
# 88-color mode is the only one that is handled differently
args = args + ", colors=88"
return "%s(%s)" % (self.__class__.__name__, args)
def _foreground_color(self):
"""Return only the color component of the foreground."""
if not (self.foreground_basic or self.foreground_high):
return 'default'
if self.foreground_basic:
return _BASIC_COLORS[self.foreground_number]
if self.colors == 88:
return _color_desc_88(self.foreground_number)
return _color_desc_256(self.foreground_number)
def _foreground(self):
return (self._foreground_color() +
',bold' * self.bold + ',italics' * self.italics +
',standout' * self.standout + ',blink' * self.blink +
',underline' * self.underline + ',strikethrough' * self.strikethrough)
def _set_foreground(self, foreground):
color = None
flags = 0
# handle comma-separated foreground
for part in foreground.split(','):
part = part.strip()
if part in _ATTRIBUTES:
# parse and store "settings"/attributes in flags
if flags & _ATTRIBUTES[part]:
raise AttrSpecError(("Setting %s specified more than" +
"once in foreground (%s)") % (repr(part),
repr(foreground)))
flags |= _ATTRIBUTES[part]
continue
# past this point we must be specifying a color
if part in ('', 'default'):
scolor = 0
elif part in _BASIC_COLORS:
scolor = _BASIC_COLORS.index(part)
flags |= _FG_BASIC_COLOR
elif self._value & _HIGH_88_COLOR:
scolor = _parse_color_88(part)
flags |= _FG_HIGH_COLOR
else:
scolor = _parse_color_256(part)
flags |= _FG_HIGH_COLOR
# _parse_color_*() return None for unrecognised colors
if scolor is None:
raise AttrSpecError(("Unrecognised color specification %s " +
"in foreground (%s)") % (repr(part), repr(foreground)))
if color is not None:
raise AttrSpecError(("More than one color given for " +
"foreground (%s)") % (repr(foreground),))
color = scolor
if color is None:
color = 0
self._value = (self._value & ~_FG_MASK) | color | flags
foreground = property(_foreground, _set_foreground)
def _background(self):
"""Return the background color."""
if not (self.background_basic or self.background_high):
return 'default'
if self.background_basic:
return _BASIC_COLORS[self.background_number]
if self._value & _HIGH_88_COLOR:
return _color_desc_88(self.background_number)
return _color_desc_256(self.background_number)
def _set_background(self, background):
flags = 0
if background in ('', 'default'):
color = 0
elif background in _BASIC_COLORS:
color = _BASIC_COLORS.index(background)
flags |= _BG_BASIC_COLOR
elif self._value & _HIGH_88_COLOR:
color = _parse_color_88(background)
flags |= _BG_HIGH_COLOR
else:
color = _parse_color_256(background)
flags |= _BG_HIGH_COLOR
if color is None:
raise AttrSpecError(("Unrecognised color specification " +
"in background (%s)") % (repr(background),))
self._value = (self._value & ~_BG_MASK) | (color << _BG_SHIFT) | flags
background = property(_background, _set_background)
def get_rgb_values(self):
"""
Return (fg_red, fg_green, fg_blue, bg_red, bg_green, bg_blue) color
components. Each component is in the range 0-255. Values are taken
from the XTerm defaults and may not exactly match the user's terminal.
If the foreground or background is 'default' then all their compenents
will be returned as None.
>>> AttrSpec('yellow', '#ccf', colors=88).get_rgb_values()
(255, 255, 0, 205, 205, 255)
>>> AttrSpec('default', 'g92').get_rgb_values()
(None, None, None, 238, 238, 238)
"""
if not (self.foreground_basic or self.foreground_high):
vals = (None, None, None)
elif self.colors == 88:
assert self.foreground_number < 88, "Invalid AttrSpec _value"
vals = _COLOR_VALUES_88[self.foreground_number]
else:
vals = _COLOR_VALUES_256[self.foreground_number]
if not (self.background_basic or self.background_high):
return vals + (None, None, None)
elif self.colors == 88:
assert self.background_number < 88, "Invalid AttrSpec _value"
return vals + _COLOR_VALUES_88[self.background_number]
else:
return vals + _COLOR_VALUES_256[self.background_number]
def __eq__(self, other):
return isinstance(other, AttrSpec) and self._value == other._value
def __ne__(self, other):
return not self == other
__hash__ = object.__hash__
class RealTerminal(object):
def __init__(self):
super(RealTerminal,self).__init__()
self._signal_keys_set = False
self._old_signal_keys = None
def tty_signal_keys(self, intr=None, quit=None, start=None,
stop=None, susp=None, fileno=None):
"""
Read and/or set the tty's signal character settings.
This function returns the current settings as a tuple.
Use the string 'undefined' to unmap keys from their signals.
The value None is used when no change is being made.
Setting signal keys is done using the integer ascii
code for the key, eg. 3 for CTRL+C.
If this function is called after start() has been called
then the original settings will be restored when stop()
is called.
"""
if fileno is None:
fileno = sys.stdin.fileno()
if not os.isatty(fileno):
return
tattr = termios.tcgetattr(fileno)
sattr = tattr[6]
skeys = (sattr[termios.VINTR], sattr[termios.VQUIT],
sattr[termios.VSTART], sattr[termios.VSTOP],
sattr[termios.VSUSP])
if intr == 'undefined': intr = 0
if quit == 'undefined': quit = 0
if start == 'undefined': start = 0
if stop == 'undefined': stop = 0
if susp == 'undefined': susp = 0
if intr is not None: tattr[6][termios.VINTR] = intr
if quit is not None: tattr[6][termios.VQUIT] = quit
if start is not None: tattr[6][termios.VSTART] = start
if stop is not None: tattr[6][termios.VSTOP] = stop
if susp is not None: tattr[6][termios.VSUSP] = susp
if intr is not None or quit is not None or \
start is not None or stop is not None or \
susp is not None:
termios.tcsetattr(fileno, termios.TCSADRAIN, tattr)
self._signal_keys_set = True
return skeys
class ScreenError(Exception):
pass
class BaseScreen(with_metaclass(signals.MetaSignals, object)):
"""
Base class for Screen classes (raw_display.Screen, .. etc)
"""
signals = [UPDATE_PALETTE_ENTRY, INPUT_DESCRIPTORS_CHANGED]
def __init__(self):
super(BaseScreen,self).__init__()
self._palette = {}
self._started = False
started = property(lambda self: self._started)
def start(self, *args, **kwargs):
"""Set up the screen. If the screen has already been started, does
nothing.
May be used as a context manager, in which case :meth:`stop` will
automatically be called at the end of the block:
with screen.start():
...
You shouldn't override this method in a subclass; instead, override
:meth:`_start`.
"""
if not self._started:
self._start(*args, **kwargs)
self._started = True
return StoppingContext(self)
def _start(self):
pass
def stop(self):
if self._started:
self._stop()
self._started = False
def _stop(self):
pass
def run_wrapper(self, fn, *args, **kwargs):
"""Start the screen, call a function, then stop the screen. Extra
arguments are passed to `start`.
Deprecated in favor of calling `start` as a context manager.
"""
with self.start(*args, **kwargs):
return fn()
def register_palette(self, palette):
"""Register a set of palette entries.
palette -- a list of (name, like_other_name) or
(name, foreground, background, mono, foreground_high,
background_high) tuples
The (name, like_other_name) format will copy the settings
from the palette entry like_other_name, which must appear
before this tuple in the list.
The mono and foreground/background_high values are
optional ie. the second tuple format may have 3, 4 or 6
values. See register_palette_entry() for a description
of the tuple values.
"""
for item in palette:
if len(item) in (3,4,6):
self.register_palette_entry(*item)
continue
if len(item) != 2:
raise ScreenError("Invalid register_palette entry: %s" %
repr(item))
name, like_name = item
if like_name not in self._palette:
raise ScreenError("palette entry '%s' doesn't exist"%like_name)
self._palette[name] = self._palette[like_name]
def register_palette_entry(self, name, foreground, background,
mono=None, foreground_high=None, background_high=None):
"""Register a single palette entry.
name -- new entry/attribute name
foreground -- a string containing a comma-separated foreground
color and settings
Color values:
'default' (use the terminal's default foreground),
'black', 'dark red', 'dark green', 'brown', 'dark blue',
'dark magenta', 'dark cyan', 'light gray', 'dark gray',
'light red', 'light green', 'yellow', 'light blue',
'light magenta', 'light cyan', 'white'
Settings:
'bold', 'underline', 'blink', 'standout', 'strikethrough'
Some terminals use 'bold' for bright colors. Most terminals
ignore the 'blink' setting. If the color is not given then
'default' will be assumed.
background -- a string containing the background color
Background color values:
'default' (use the terminal's default background),
'black', 'dark red', 'dark green', 'brown', 'dark blue',
'dark magenta', 'dark cyan', 'light gray'
mono -- a comma-separated string containing monochrome terminal
settings (see "Settings" above.)
None = no terminal settings (same as 'default')
foreground_high -- a string containing a comma-separated
foreground color and settings, standard foreground
colors (see "Color values" above) or high-colors may
be used
High-color example values:
'#009' (0% red, 0% green, 60% red, like HTML colors)
'#fcc' (100% red, 80% green, 80% blue)
'g40' (40% gray, decimal), 'g#cc' (80% gray, hex),
'#000', 'g0', 'g#00' (black),
'#fff', 'g100', 'g#ff' (white)
'h8' (color number 8), 'h255' (color number 255)
None = use foreground parameter value
background_high -- a string containing the background color,
standard background colors (see "Background colors" above)
or high-colors (see "High-color example values" above)
may be used
None = use background parameter value
"""
basic = AttrSpec(foreground, background, 16)
if type(mono) == tuple:
# old style of specifying mono attributes was to put them
# in a tuple. convert to comma-separated string
mono = ",".join(mono)
if mono is None:
mono = DEFAULT
mono = AttrSpec(mono, DEFAULT, 1)
if foreground_high is None:
foreground_high = foreground
if background_high is None:
background_high = background
high_256 = AttrSpec(foreground_high, background_high, 256)
# 'hX' where X > 15 are different in 88/256 color, use
# basic colors for 88-color mode if high colors are specified
# in this way (also avoids crash when X > 87)
def large_h(desc):
if not desc.startswith('h'):
return False
if ',' in desc:
desc = desc.split(',',1)[0]
num = int(desc[1:], 10)
return num > 15
if large_h(foreground_high) or large_h(background_high):
high_88 = basic
else:
high_88 = AttrSpec(foreground_high, background_high, 88)
signals.emit_signal(self, UPDATE_PALETTE_ENTRY,
name, basic, mono, high_88, high_256)
self._palette[name] = (basic, mono, high_88, high_256)
def _test():
import doctest
doctest.testmod()
if __name__=='__main__':
_test()
|
rndusr/urwid
|
urwid/display_common.py
|
Python
|
lgpl-2.1
| 31,238
|
#!/usr/bin/python -u
# -*- coding: utf-8 -*-
import libxml2
import time
import traceback
import sys
import logging
from pyxmpp.all import JID,Iq,Presence,Message,StreamError
from pyxmpp.jabber.all import Client
class Disconnected(Exception):
pass
class MyClient(Client):
def session_started(self):
self.stream.send(Presence())
def idle(self):
print "idle"
Client.idle(self)
if self.session_established:
target=JID("jajcus",s.jid.domain)
self.stream.send(Message(to_jid=target,body=unicode("Teścik","utf-8")))
def post_disconnect(self):
print "Disconnected"
raise Disconnected
logger=logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
libxml2.debugMemory(1)
print "creating stream..."
s=MyClient(jid=JID("test@localhost/Test"),password=u"123",auth_methods=["sasl:DIGEST-MD5","digest"])
print "connecting..."
s.connect()
print "processing..."
try:
try:
s.loop(1)
finally:
s.disconnect()
except KeyboardInterrupt:
traceback.print_exc(file=sys.stderr)
except (StreamError,Disconnected),e:
raise
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
# vi: sts=4 et sw=4
|
Jajcus/pyxmpp
|
examples/c2s_test.py
|
Python
|
lgpl-2.1
| 1,349
|
# Written by Ingar Arntzen
# see LICENSE.txt for license information
"""
This module implements a generic console interface that can
be attached to any runnable python object.
"""
import code
import __builtin__
import threading
import exceptions
##############################################
# OBJECT CONSOLE
##############################################
class ConsoleError(exceptions.Exception):
"""Error associated with the console."""
pass
class ObjectConsole:
"""
This class runs a python console in the main thread, and starts
a given Object in a second thread.
The Object is assumed to implement at least two methods, run() and stop().
- The run() method is the entry point for the thread.
- The stop() method is used by the main thread to request that the
object thread does a controlled shutdown and returns from the run method.
If the worker thread does not return from run() within 2 seconds after stop()
has been invoked, the console terminates the object thread more aggressively.
AttributeNames of Object listed in the provided namespace will be
included in the console namespace.
"""
TIMEOUT = 2
def __init__(self, object_, name_space=None, run='run',
stop='stop', name=""):
self._object = object_
self._object_run = getattr(object_, run)
self._object_stop = getattr(object_, stop)
self._thread = threading.Thread(group=None,
target=self._object_run,
name="ObjectThread")
# Configure Console Namespace
self._name_space = {}
self._name_space['__builtiname_space__'] = __builtin__
self._name_space['__name__'] = __name__
self._name_space['__doc__'] = __doc__
self._name_space['help'] = self._usage
if name_space and isinstance(name_space, type({})):
self._name_space.update(name_space)
self._app_name_space = name_space
self._app_name = name
self._usage()
def _usage(self):
"""Print usage information."""
print "\nConsole:", self._app_name
for key in self._app_name_space.keys():
print "- ", key
print "- help"
def run(self):
"""Starts the given runnable object in a thread and
then starts the console."""
self._thread.start()
try:
code.interact("", None, self._name_space)
except KeyboardInterrupt:
pass
self._object_stop()
self._thread.join(ObjectConsole.TIMEOUT)
if self._thread.isAlive():
raise ConsoleError, "Worker Thread still alive"
|
egbertbouman/tribler-g
|
Tribler/UPnP/common/objectconsole.py
|
Python
|
lgpl-2.1
| 2,734
|
import os
from zeroinstall.injector import namespaces
from zeroinstall.injector.reader import InvalidInterface, load_feed
from xml.dom import minidom, Node, XMLNS_NAMESPACE
import tempfile
from logging import warn, info
group_impl_attribs = ['version', 'version-modifier', 'released', 'main', 'stability', 'arch', 'license', 'doc-dir', 'self-test', 'langs', 'local-path']
known_elements = {
'interface' : ['uri', 'min-injector-version', 'main'], # (main is deprecated)
'name' : [],
'summary' : [],
'description' : [],
'needs-terminal' : [],
'homepage' : [],
'category' : ['type'],
'icon' : ['type', 'href'],
'feed' : ['src', 'arch'],
'feed-for' : ['interface'],
'group' : group_impl_attribs,
'implementation' : ['id'] + group_impl_attribs,
'package-implementation' : ['package', 'main', 'distributions'],
'manifest-digest' : ['sha1new', 'sha256'],
'command' : ['name', 'path', 'shell-command'],
'arg' : [],
'archive' : ['href', 'size', 'extract', 'type', 'start-offset'],
'recipe' : [],
'requires' : ['interface', 'use'],
'runner' : ['interface', 'use', 'command'],
'version' : ['not-before', 'before'],
'environment' : ['name', 'insert', 'value', 'default', 'mode'],
'executable-in-var' : ['name', 'command'],
'executable-in-path' : ['name', 'command'],
#'overlay' : ['src', 'mount-point'],
}
def checkElement(elem):
if elem.namespaceURI != namespaces.XMLNS_IFACE:
info("Note: Skipping unknown (but namespaced) element <%s>", elem.localName)
return # Namespaces elements are OK
if elem.localName not in known_elements:
warn("Unknown Zero Install element <%s>.\nNon Zero-Install elements should be namespaced.", elem.localName)
return
known_attrs = known_elements[elem.localName]
for (uri, name), value in elem.attributes.itemsNS():
if uri == XMLNS_NAMESPACE:
continue # Namespace declarations are fine
if uri:
info("Note: Skipping unknown (but namespaced) attribute '%s'", name)
continue
if name not in known_attrs:
warn("Unknown Zero Install attribute '%s' on <%s>.\nNon Zero-Install attributes should be namespaced.",
name, elem.localName)
for child in elem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
checkElement(child)
def check(data, warnings = True, implementation_id_alg=None, generate_sizes=False):
fd, tmp_name = tempfile.mkstemp(prefix = '0publish-validate-')
os.close(fd)
try:
tmp_file = file(tmp_name, 'w')
tmp_file.write(data)
tmp_file.close()
try:
feed = load_feed(tmp_name, local=True, implementation_id_alg=implementation_id_alg, generate_sizes=generate_sizes)
except InvalidInterface, ex:
raise
except Exception, ex:
warn("Internal error: %s", ex)
raise InvalidInterface(str(ex))
finally:
os.unlink(tmp_name)
if warnings:
doc = minidom.parseString(data)
checkElement(doc.documentElement)
return feed
|
timdiels/0publish
|
validator.py
|
Python
|
lgpl-2.1
| 2,847
|
import requests
params = {'username':'Ryan', 'password':'password'}
r = requests.post("http://pythonscraping.com/pages/cookies/welcome.php", params)
print("Cookie is set to:")
print(r.cookies.get_dict())
print("-------------")
print("Going to profile page...")
r = requests.get("http://pythonscraping.com/pages/cookies/profile.php", cookies = r.cookies)
print(r.text)
|
XiangYz/webscraper
|
test10.py
|
Python
|
lgpl-2.1
| 368
|
"""SCons.Tool.tar
Tool-specific initialization for tar.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/tar.py rel_2.4.0:3365:9259ea1c13d7 2015/09/21 14:03:43 bdbaddog"
import SCons.Action
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
tars = ['tar', 'gtar']
TarAction = SCons.Action.Action('$TARCOM', '$TARCOMSTR')
TarBuilder = SCons.Builder.Builder(action = TarAction,
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$TARSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for tar to an Environment."""
try:
bld = env['BUILDERS']['Tar']
except KeyError:
bld = TarBuilder
env['BUILDERS']['Tar'] = bld
env['TAR'] = env.Detect(tars) or 'gtar'
env['TARFLAGS'] = SCons.Util.CLVar('-c')
env['TARCOM'] = '$TAR $TARFLAGS -f $TARGET $SOURCES'
env['TARSUFFIX'] = '.tar'
def exists(env):
return env.Detect(tars)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Uli1/mapnik
|
scons/scons-local-2.4.0/SCons/Tool/tar.py
|
Python
|
lgpl-2.1
| 2,503
|
#!/usr/bin/python
#
# examples/xdamage.py -- demonstrate damage extension
#
# Copyright (C) 2019 Mohit Garg <mrmohitgarg1990@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
# Python 2/3 compatibility.
from __future__ import print_function
import sys
import os
# Change path so we find Xlib
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Xlib import display, X, threaded,Xutil
import time
try:
import thread
except ModuleNotFoundError:
import _thread as thread
from Xlib.ext import damage
from PIL import Image, ImageTk
import traceback
def redraw(win, gc):
# win.clear_area()
win.fill_rectangle(gc, 0, 0, 60, 60)
def blink(display, win, gc, cols):
while 1:
time.sleep(2)
print('Changing color', cols[0])
gc.change(foreground = cols[0])
cols = (cols[1], cols[0])
redraw(win, gc)
display.flush()
def get_image_from_win(win, pt_w, pt_h, pt_x=0, pt_y=0):
try:
raw = win.get_image(pt_x, pt_y, pt_w, pt_h, X.ZPixmap, 0xffffffff)
image = Image.frombytes("RGB", (pt_w, pt_h), raw.data, "raw", "BGRX")
return image
except Exception:
traceback.print_exc()
def check_ext(disp):
# Check for extension
if not disp.has_extension('DAMAGE'):
sys.stderr.write('server does not have the DAMAGE extension\n')
sys.stderr.write("\n".join(disp.list_extensions()))
if disp.query_extension('DAMAGE') is None:
sys.exit(1)
else:
r = disp.damage_query_version()
print('DAMAGE version {}.{}'.format(r.major_version, r.minor_version))
def main():
d = display.Display()
root = d.screen().root
check_ext(d)
colormap = d.screen().default_colormap
red = colormap.alloc_named_color("red").pixel
blue = colormap.alloc_named_color("blue").pixel
background = colormap.alloc_named_color("white").pixel
window1 = root.create_window(100, 100, 250, 100, 1,
X.CopyFromParent, X.InputOutput,
X.CopyFromParent,
background_pixel = background,
event_mask = X.StructureNotifyMask | X.ExposureMask)
window1.set_wm_name('Changing Window')
window1.map()
gc = window1.create_gc(foreground = red)
thread.start_new_thread(blink, (d, window1, gc, (blue, red)))
window1.damage_create(damage.DamageReportRawRectangles)
window1.set_wm_normal_hints(
flags=(Xutil.PPosition | Xutil.PSize | Xutil.PMinSize),
min_width=50,
min_height=50
)
window2 = root.create_window(100, 250, 250, 100, 1,
X.CopyFromParent, X.InputOutput,
X.CopyFromParent,
background_pixel = background,
event_mask = X.StructureNotifyMask | X.ExposureMask)
window2.set_wm_normal_hints(
flags=(Xutil.PPosition | Xutil.PSize | Xutil.PMinSize),
min_width=50,
min_height=50
)
window2.set_wm_name('Tracking Window')
window2.map()
while 1:
event = d.next_event()
if event.type == X.Expose:
if event.count == 0:
redraw(window1, gc)
elif event.type == d.extension_event.DamageNotify:
image = get_image_from_win(window1, event.area.width, event.area.height, event.area.x, event.area.y)
bgpm = window2.create_pixmap(image.width, image.height, d.screen().root_depth)
bggc = window2.create_gc(foreground=0, background=0)
bgpm.put_pil_image(bggc, 0, 0, image)
window2.copy_area(bggc, bgpm, 0, 0, image.width, image.height, 0, 0)
# bggc.free()
elif event.type == X.DestroyNotify:
sys.exit(0)
if __name__ == "__main__":
main()
|
python-xlib/python-xlib
|
examples/xdamage.py
|
Python
|
lgpl-2.1
| 4,638
|
import ardurpc
from ardurpc.handler import Handler
class Base(Handler):
"""Handler for the Base Text-LCD type"""
def __init__(self, **kwargs):
Handler.__init__(self, **kwargs)
def getWidth(self):
"""
Get the display width as number of characters.
:return: Width
:rtype: Integer
"""
return self._call(0x01)
def getHeight(self):
"""
Get the display height as number of characters.
:return: Height
:rtype: Integer
"""
return self._call(0x02)
def clear(self):
"""
Clear the LCD screen and set the cursor position to the upper-left corner.
"""
return self._call(0x11)
def home(self):
"""
Set the cursor position to the upper-left corner.
"""
return self._call(0x12)
def setCursor(self, col, row):
"""
Position the cursor.
"""
return self._call(0x13, '>BB', col, row)
def write(self, c):
"""
Print a single character to the LCD.
"""
c = c.encode('ASCII')
return self._call(0x21, '>B', c[0])
def print(self, s):
"""
Print text to the LCD.
"""
s = s.encode('ASCII')
return self._call(0x22, '>B%ds' % len(s), len(s), s)
ardurpc.register(0x0300, Base, mask=8)
|
DinoTools/ArduRPC-python
|
ardurpc/handler/lcd/__init__.py
|
Python
|
lgpl-3.0
| 1,384
|
from __future__ import division
"""
instek_pst.py
part of the CsPyController package for AQuA experiment control by Martin Lichtman
Handles sending commands to Instek PST power supplies over RS232.
created = 2015.07.09
modified >= 2015.07.09
"""
__author__ = 'Martin Lichtman'
import logging
logger = logging.getLogger(__name__)
from atom.api import Bool, Str, Member, Int
from instrument_property import Prop, IntProp, ListProp, FloatProp
from cs_instruments import Instrument
from cs_errors import PauseError
from ctypes import *
class Vaunix(Prop):
isInitialized = Bool(False)
ID = Int()
va = Member()
model = Str()
serial = Int()
frequency = Member()
power = Member()
pulsewidth = Member()
pulserep = Member()
pulseenable = Bool()
startfreq = Member()
endfreq = Member()
sweeptime = Member()
sweepmode = Bool()
sweeptype = Bool()
sweepenable = Bool()
sweepdir = Bool()
internalref = Bool()
useexternalmod = Bool()
rfonoff = Bool()
maxPower = Int()
minPower = Int()
minFreq = Int()
maxFreq = Int()
def __init__(self, name, experiment, description=''):
super(Vaunix, self).__init__(name, experiment, description)
self.frequency = FloatProp('Frequency', experiment, 'Frequency (MHz)', '0')
self.power = FloatProp('Power', experiment, 'Power (dBm)', '0')
self.pulsewidth = FloatProp('PulseWidth', experiment, 'Pulse Width (us)', '0')
self.pulserep = FloatProp('PulseRep', experiment, 'Pulse Rep Time (us)', '0')
self.startfreq = FloatProp('StartFreq', experiment, 'Start Frequency (MHz)', '0')
self.endfreq = FloatProp('EndFreq', experiment, 'End Frequency (MHz)', '0')
self.sweeptime = IntProp('SweepTime', experiment, 'Sweep Time (ms)', '0')
self.properties += ['ID', 'model', 'serial', 'frequency','power','pulsewidth','pulserep','pulseenable','startfreq','endfreq','sweeptime',
'sweepmode', 'sweeptype', 'sweepdir', 'sweepenable', 'internalref', 'useexternalmod', 'rfonoff', 'maxPower']
def initialize(self,va):
self.va = va
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode !=0):
errcodereset = self.va.fnLMS_CloseDevice(self.ID)
if (errcodereset != 0): #if device fails to initialize, it may be because it was not closed previously. Try closing and reinitializing it.
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
errcode = self.va.fnLMS_InitDevice(self.ID)
if (errcode != 0):
logger.error("Failed to initialize Vaunix device {}. Error code {}.".format(self.ID,errcode))
raise PauseError
self.maxPower = int(self.va.fnLMS_GetMaxPwr(self.ID)/4)
self.minPower = int(self.va.fnLMS_GetMinPwr(self.ID)/4)
self.minFreq = int(self.va.fnLMS_GetMinFreq(self.ID))
self.maxFreq = int(self.va.fnLMS_GetMaxFreq(self.ID))
return
def freq_unit(self,val):
return int(val*100000)
def power_unit(self,value):
return int((self.maxPower - value)*4)
def power_sanity_check(self,value):
if (value < self.minPower or value > self.maxPower):
logger.error("Vaunix device {} power ({} dBm) outside min/max range: {} dBm, {} dBm.".format(self.ID,value,self.minPower,self.maxPower))
raise PauseError
return
def freq_sanity_check(self,value):
if (value < self.minFreq or value > self.maxFreq):
logger.error("Vaunix device {} frequency ({} x10 Hz) outside min/max range: {} x10 Hz, {} x10 Hz.".format(self.ID,value,self.minFreq,self.maxFreq))
raise PauseError
return
def update(self):
if (self.rfonoff):
self.freq_sanity_check(self.freq_unit(self.frequency.value))
self.va.fnLMS_SetFrequency(self.ID, self.freq_unit(self.frequency.value))
self.power_sanity_check(self.power.value)
self.va.fnLMS_SetPowerLevel(self.ID, self.power_unit(self.power.value))
if (self.sweepenable):
self.freq_sanity_check(self.freq_unit(self.startfreq.value))
self.va.fnLMS_SetStartFrequency(self.ID, self.freq_unit(self.startfreq.value))
self.freq_sanity_check(self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetEndFrequency(self.ID, self.freq_unit(self.endfreq.value))
self.va.fnLMS_SetSweepTime(self.ID, self.sweeptime.value)
self.va.fnLMS_SetSweepDirection(self.ID, self.sweepdir)
self.va.fnLMS_SetSweepMode(self.ID, self.sweepmode) #True: Repeat Sweep, False: Sweep Once
self.va.fnLMS_SetSweepType(self.ID, self.sweeptype) #True: Bidirectional Sweep, False: Unidirectional Sweep
self.va.fnLMS_StartSweep(self.ID, self.sweepenable)
self.va.fnLMS_SetFastPulsedOutput(self.ID, c_float(self.pulsewidth.value*1e-6), c_float(self.pulserep.value*1e-6), self.pulseenable)
self.va.fnLMS_SetUseExternalPulseMod(self.ID, self.useexternalmod)
self.va.fnLMS_SetUseInternalRef(self.ID, self.internalref) #True: internal ref, False: external ref
self.va.fnLMS_SaveSettings(self.ID)
self.va.fnLMS_SetRFOn(self.ID, self.rfonoff)
self.getparams()
return
def getparams(self):
logger.info("Parameters for Vaunix # {}".format(self.ID))
logger.info("Frequency: {} MHz".format(
self.va.fnLMS_GetFrequency(self.ID)/100000))
logger.info("Power Level: {} dBm".format(
self.va.fnLMS_GetPowerLevel(self.ID)/4))
class Vaunixs(Instrument):
version = '2015.11.19'
motors = Member()
isInitialized = Bool(False)
va = Member()
testMode = Bool(False) #Test mode: Set to False for actual use.
def __init__(self, name, experiment, description=''):
super(Vaunixs, self).__init__(name, experiment, description)
self.motors = ListProp('motors', experiment, 'A list of individual Vaunix signal generators', listElementType=Vaunix,
listElementName='Vaunix')
self.properties += ['version', 'motors']
num = self.initialize()
self.motors.length = num
self.motors.refreshGUI()
#Initialize: loads and initializes DLL
def initialize(self):
num = 0
if self.enable:
CDLL_file = "./vaunix/VNX_fmsynth.dll"
self.va = CDLL(CDLL_file)
if (self.testMode):
logger.warning("Warning: Vaunix in test mode. Set testMode=False in vaunix.py to turn off test mode.")
self.va.fnLMS_SetTestMode(self.testMode) #Test mode... this needs to be set False for actual run. Do not remove this command (default setting is True).
self.isInitialized = True
num = self.detect_generators()
return num
def preExperiment(self, hdf5):
if self.enable:
if (not self.isInitialized):
self.initialize()
for i in self.motors:
#initialize serial connection to each power supply
i.initialize(self.va)
self.isInitialized = True
def preIteration(self, iterationresults, hdf5):
"""
Every iteration, send the motors updated positions.
"""
if self.enable:
msg = ''
try:
for i in self.motors:
i.update()
except Exception as e:
logger.error('Problem updating Vaunix:\n{}\n{}\n'.format(msg, e))
self.isInitialized = False
raise PauseError
def postMeasurement(self, measurementresults, iterationresults, hdf5):
return
def postIteration(self, iterationresults, hdf5):
return
def postExperiment(self, hdf5):
return
def finalize(self,hdf5):
return
#detect_generators: Calls DLL function to check for number of generators and their IDs.
def detect_generators(self):
if (not self.isInitialized): #test if DLL is already loaded. If not, load it.
self.initialize()
num=self.va.fnLMS_GetNumDevices() #ask DLL for the number of connected devices
logger.debug("Number of vaunix devices detected: {}".format(num))
while (num>len(self.motors)): #if num connected devices > number in array, add elements.
self.motors.add()
while (num<len(self.motors)): #if <, subtract elements.
self.motors.pop(self.motors.length-1)
self.motors.length -= 1
devinfotype = c_uint*num
devinfo = devinfotype()
self.va.fnLMS_GetDevInfo(addressof(devinfo)) #get device IDs
for mn, i in enumerate(self.motors):
i.ID = int(devinfo[mn]) #copy device IDs to ID variable
modnumtype = c_char*100
modnum = modnumtype()
self.va.fnLMS_GetModelNameA(i.ID,addressof(modnum)) #get device model names
i.model = modnum.value
serial = c_int()
serial = self.va.fnLMS_GetSerialNumber(i.ID) #get device serial numbers
i.serial = serial
return num
|
QuantumQuadrate/CsPyController
|
python/vaunix.py
|
Python
|
lgpl-3.0
| 9,955
|
# Copyright (c) 2020 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import List, Optional, Dict, cast
from PyQt5.QtCore import pyqtSignal, QObject, pyqtProperty, QCoreApplication
from UM.FlameProfiler import pyqtSlot
from UM.PluginRegistry import PluginRegistry
from UM.Application import Application
from UM.i18n import i18nCatalog
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.Settings.GlobalStack import GlobalStack
from .UpdatableMachinesModel import UpdatableMachinesModel
import os
import threading
import time
from cura.CuraApplication import CuraApplication
i18n_catalog = i18nCatalog("cura")
class WorkspaceDialog(QObject):
showDialogSignal = pyqtSignal()
def __init__(self, parent = None):
super().__init__(parent)
self._component = None
self._context = None
self._view = None
self._qml_url = "WorkspaceDialog.qml"
self._lock = threading.Lock()
self._default_strategy = None
self._result = {"machine": self._default_strategy,
"quality_changes": self._default_strategy,
"definition_changes": self._default_strategy,
"material": self._default_strategy}
self._override_machine = None
self._visible = False
self.showDialogSignal.connect(self.__show)
self._has_quality_changes_conflict = False
self._has_definition_changes_conflict = False
self._has_machine_conflict = False
self._has_material_conflict = False
self._has_visible_settings_field = False
self._num_visible_settings = 0
self._num_user_settings = 0
self._active_mode = ""
self._quality_name = ""
self._num_settings_overridden_by_quality_changes = 0
self._quality_type = ""
self._intent_name = ""
self._machine_name = ""
self._machine_type = ""
self._variant_type = ""
self._material_labels = []
self._extruders = []
self._objects_on_plate = False
self._is_printer_group = False
self._updatable_machines_model = UpdatableMachinesModel(self)
machineConflictChanged = pyqtSignal()
qualityChangesConflictChanged = pyqtSignal()
materialConflictChanged = pyqtSignal()
numVisibleSettingsChanged = pyqtSignal()
activeModeChanged = pyqtSignal()
qualityNameChanged = pyqtSignal()
hasVisibleSettingsFieldChanged = pyqtSignal()
numSettingsOverridenByQualityChangesChanged = pyqtSignal()
qualityTypeChanged = pyqtSignal()
intentNameChanged = pyqtSignal()
machineNameChanged = pyqtSignal()
updatableMachinesChanged = pyqtSignal()
materialLabelsChanged = pyqtSignal()
objectsOnPlateChanged = pyqtSignal()
numUserSettingsChanged = pyqtSignal()
machineTypeChanged = pyqtSignal()
variantTypeChanged = pyqtSignal()
extrudersChanged = pyqtSignal()
isPrinterGroupChanged = pyqtSignal()
@pyqtProperty(bool, notify = isPrinterGroupChanged)
def isPrinterGroup(self) -> bool:
return self._is_printer_group
def setIsPrinterGroup(self, value: bool):
if value != self._is_printer_group:
self._is_printer_group = value
self.isPrinterGroupChanged.emit()
@pyqtProperty(str, notify=variantTypeChanged)
def variantType(self) -> str:
return self._variant_type
def setVariantType(self, variant_type: str) -> None:
if self._variant_type != variant_type:
self._variant_type = variant_type
self.variantTypeChanged.emit()
@pyqtProperty(str, notify=machineTypeChanged)
def machineType(self) -> str:
return self._machine_type
def setMachineType(self, machine_type: str) -> None:
self._machine_type = machine_type
self.machineTypeChanged.emit()
def setNumUserSettings(self, num_user_settings: int) -> None:
if self._num_user_settings != num_user_settings:
self._num_user_settings = num_user_settings
self.numVisibleSettingsChanged.emit()
@pyqtProperty(int, notify=numUserSettingsChanged)
def numUserSettings(self) -> int:
return self._num_user_settings
@pyqtProperty(bool, notify=objectsOnPlateChanged)
def hasObjectsOnPlate(self) -> bool:
return self._objects_on_plate
def setHasObjectsOnPlate(self, objects_on_plate):
if self._objects_on_plate != objects_on_plate:
self._objects_on_plate = objects_on_plate
self.objectsOnPlateChanged.emit()
@pyqtProperty("QVariantList", notify = materialLabelsChanged)
def materialLabels(self) -> List[str]:
return self._material_labels
def setMaterialLabels(self, material_labels: List[str]) -> None:
if self._material_labels != material_labels:
self._material_labels = material_labels
self.materialLabelsChanged.emit()
@pyqtProperty("QVariantList", notify=extrudersChanged)
def extruders(self):
return self._extruders
def setExtruders(self, extruders):
if self._extruders != extruders:
self._extruders = extruders
self.extrudersChanged.emit()
@pyqtProperty(str, notify = machineNameChanged)
def machineName(self) -> str:
return self._machine_name
def setMachineName(self, machine_name: str) -> None:
if self._machine_name != machine_name:
self._machine_name = machine_name
self.machineNameChanged.emit()
@pyqtProperty(QObject, notify = updatableMachinesChanged)
def updatableMachinesModel(self) -> UpdatableMachinesModel:
return cast(UpdatableMachinesModel, self._updatable_machines_model)
def setUpdatableMachines(self, updatable_machines: List[GlobalStack]) -> None:
self._updatable_machines_model.update(updatable_machines)
self.updatableMachinesChanged.emit()
@pyqtProperty(str, notify=qualityTypeChanged)
def qualityType(self) -> str:
return self._quality_type
def setQualityType(self, quality_type: str) -> None:
if self._quality_type != quality_type:
self._quality_type = quality_type
self.qualityTypeChanged.emit()
@pyqtProperty(int, notify=numSettingsOverridenByQualityChangesChanged)
def numSettingsOverridenByQualityChanges(self) -> int:
return self._num_settings_overridden_by_quality_changes
def setNumSettingsOverriddenByQualityChanges(self, num_settings_overridden_by_quality_changes: int) -> None:
self._num_settings_overridden_by_quality_changes = num_settings_overridden_by_quality_changes
self.numSettingsOverridenByQualityChangesChanged.emit()
@pyqtProperty(str, notify=qualityNameChanged)
def qualityName(self) -> str:
return self._quality_name
def setQualityName(self, quality_name: str) -> None:
if self._quality_name != quality_name:
self._quality_name = quality_name
self.qualityNameChanged.emit()
@pyqtProperty(str, notify = intentNameChanged)
def intentName(self) -> str:
return self._intent_name
def setIntentName(self, intent_name: str) -> None:
if self._intent_name != intent_name:
self._intent_name = intent_name
self.intentNameChanged.emit()
@pyqtProperty(str, notify=activeModeChanged)
def activeMode(self) -> str:
return self._active_mode
def setActiveMode(self, active_mode: int) -> None:
if active_mode == 0:
self._active_mode = i18n_catalog.i18nc("@title:tab", "Recommended")
else:
self._active_mode = i18n_catalog.i18nc("@title:tab", "Custom")
self.activeModeChanged.emit()
@pyqtProperty(bool, notify = hasVisibleSettingsFieldChanged)
def hasVisibleSettingsField(self) -> bool:
return self._has_visible_settings_field
def setHasVisibleSettingsField(self, has_visible_settings_field: bool) -> None:
self._has_visible_settings_field = has_visible_settings_field
self.hasVisibleSettingsFieldChanged.emit()
@pyqtProperty(int, constant = True)
def totalNumberOfSettings(self) -> int:
general_definition_containers = ContainerRegistry.getInstance().findDefinitionContainers(id = "fdmprinter")
if not general_definition_containers:
return 0
return len(general_definition_containers[0].getAllKeys())
@pyqtProperty(int, notify = numVisibleSettingsChanged)
def numVisibleSettings(self) -> int:
return self._num_visible_settings
def setNumVisibleSettings(self, num_visible_settings: int) -> None:
if self._num_visible_settings != num_visible_settings:
self._num_visible_settings = num_visible_settings
self.numVisibleSettingsChanged.emit()
@pyqtProperty(bool, notify = machineConflictChanged)
def machineConflict(self) -> bool:
return self._has_machine_conflict
@pyqtProperty(bool, notify=qualityChangesConflictChanged)
def qualityChangesConflict(self) -> bool:
return self._has_quality_changes_conflict
@pyqtProperty(bool, notify=materialConflictChanged)
def materialConflict(self) -> bool:
return self._has_material_conflict
@pyqtSlot(str, str)
def setResolveStrategy(self, key: str, strategy: Optional[str]) -> None:
if key in self._result:
self._result[key] = strategy
def getMachineToOverride(self) -> str:
return self._override_machine
@pyqtSlot(str)
def setMachineToOverride(self, machine_name: str) -> None:
self._override_machine = machine_name
@pyqtSlot()
def closeBackend(self) -> None:
"""Close the backend: otherwise one could end up with "Slicing..."""
Application.getInstance().getBackend().close()
def setMaterialConflict(self, material_conflict: bool) -> None:
if self._has_material_conflict != material_conflict:
self._has_material_conflict = material_conflict
self.materialConflictChanged.emit()
def setMachineConflict(self, machine_conflict: bool) -> None:
if self._has_machine_conflict != machine_conflict:
self._has_machine_conflict = machine_conflict
self.machineConflictChanged.emit()
def setQualityChangesConflict(self, quality_changes_conflict: bool) -> None:
if self._has_quality_changes_conflict != quality_changes_conflict:
self._has_quality_changes_conflict = quality_changes_conflict
self.qualityChangesConflictChanged.emit()
def getResult(self) -> Dict[str, Optional[str]]:
if "machine" in self._result and self.updatableMachinesModel.count <= 1:
self._result["machine"] = None
if "quality_changes" in self._result and not self._has_quality_changes_conflict:
self._result["quality_changes"] = None
if "material" in self._result and not self._has_material_conflict:
self._result["material"] = None
# If the machine needs to be re-created, the definition_changes should also be re-created.
# If the machine strategy is None, it means that there is no name conflict with existing ones. In this case
# new definitions changes are created
if "machine" in self._result:
if self._result["machine"] == "new" or self._result["machine"] is None and self._result["definition_changes"] is None:
self._result["definition_changes"] = "new"
return self._result
def _createViewFromQML(self) -> None:
three_mf_reader_path = PluginRegistry.getInstance().getPluginPath("3MFReader")
if three_mf_reader_path:
path = os.path.join(three_mf_reader_path, self._qml_url)
self._view = CuraApplication.getInstance().createQmlComponent(path, {"manager": self})
def show(self) -> None:
# Emit signal so the right thread actually shows the view.
if threading.current_thread() != threading.main_thread():
self._lock.acquire()
# Reset the result
self._result = {"machine": self._default_strategy,
"quality_changes": self._default_strategy,
"definition_changes": self._default_strategy,
"material": self._default_strategy}
self._visible = True
self.showDialogSignal.emit()
@pyqtSlot()
def notifyClosed(self) -> None:
"""Used to notify the dialog so the lock can be released."""
self._result = {} # The result should be cleared before hide, because after it is released the main thread lock
self._visible = False
try:
self._lock.release()
except:
pass
def hide(self) -> None:
self._visible = False
self._view.hide()
try:
self._lock.release()
except:
pass
@pyqtSlot(bool)
def _onVisibilityChanged(self, visible: bool) -> None:
if not visible:
try:
self._lock.release()
except:
pass
@pyqtSlot()
def onOkButtonClicked(self) -> None:
self._view.hide()
self.hide()
@pyqtSlot()
def onCancelButtonClicked(self) -> None:
self._result = {}
self._view.hide()
self.hide()
def waitForClose(self) -> None:
"""Block thread until the dialog is closed."""
if self._visible:
if threading.current_thread() != threading.main_thread():
self._lock.acquire()
self._lock.release()
else:
# If this is not run from a separate thread, we need to ensure that the events are still processed.
while self._visible:
time.sleep(1 / 50)
QCoreApplication.processEvents() # Ensure that the GUI does not freeze.
def __show(self) -> None:
if self._view is None:
self._createViewFromQML()
if self._view:
self._view.show()
|
Ultimaker/Cura
|
plugins/3MFReader/WorkspaceDialog.py
|
Python
|
lgpl-3.0
| 14,135
|
# This file is part of PRAW.
#
# PRAW is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PRAW is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# PRAW. If not, see <http://www.gnu.org/licenses/>.
"""
Python Reddit API Wrapper.
PRAW, an acronym for "Python Reddit API Wrapper", is a python package that
allows for simple access to reddit's API. PRAW aims to be as easy to use as
possible and is designed to follow all of reddit's API rules. You have to give
a useragent, everything else is handled by PRAW so you needn't worry about
violating them.
More information about PRAW can be found at https://github.com/praw-dev/praw
"""
import json
import os
import platform
import re
import requests
import six
import sys
import decorators, errors
from .handlers import DefaultHandler
from .helpers import normalize_url
from .internal import (_prepare_request, _raise_redirect_exceptions,
_raise_response_exceptions)
from .settings import CONFIG
from requests.compat import urljoin
from requests import Request
from six.moves import html_entities, http_cookiejar
try:
from update_checker import update_check
uc_disabled = False
except NotImplementedError:
uc_disabled = True
from warnings import simplefilter, warn, warn_explicit
__version__ = '2.1.4'
try:
platform_info = platform.platform(True)
except IOError:
platform_info = ""
UA_STRING = '%%s PRAW/%s Python/%s %s' % (__version__,
sys.version.split()[0],
platform_info)
MIN_IMAGE_SIZE = 128
MAX_IMAGE_SIZE = 512000
JPEG_HEADER = b'\xff\xd8\xff'
PNG_HEADER = b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
# Enable deprecation warnings
simplefilter('default')
# Compatibility
if six.PY3:
CHR = chr
else:
CHR = unichr
class Config(object): # pylint: disable-msg=R0903, R0924
"""A class containing the configuration for a reddit site."""
API_PATHS = {'accept_mod_invite': 'api/accept_moderator_invite',
'access_token_url': 'api/v1/access_token/',
'approve': 'api/approve/',
'authorize': 'api/v1/authorize/',
'banned': 'r/%s/about/banned/',
'by_id': 'by_id/',
'captcha': 'captcha/',
'clearflairtemplates': 'api/clearflairtemplates/',
'comment': 'api/comment/',
'comments': 'comments/',
'compose': 'api/compose/',
'contributors': 'r/%s/about/contributors/',
'controversial': 'controversial/',
'del': 'api/del/',
'deleteflair': 'api/deleteflair',
'delete_sr_header': 'r/%s/api/delete_sr_header',
'delete_sr_image': 'r/%s/api/delete_sr_img',
'distinguish': 'api/distinguish/',
'domain': 'domain/%s/',
'edit': 'api/editusertext/',
'feedback': 'api/feedback/',
'flair': 'api/flair/',
'flairconfig': 'api/flairconfig/',
'flaircsv': 'api/flaircsv/',
'flairlist': 'r/%s/api/flairlist/',
'flairtemplate': 'api/flairtemplate/',
'friend': 'api/friend/',
'friends': 'prefs/friends/',
'help': 'help/',
'hide': 'api/hide/',
'inbox': 'message/inbox/',
'info': 'api/info/',
'login': 'api/login/',
'me': 'api/v1/me',
'mentions': 'message/mentions',
'moderators': 'r/%s/about/moderators/',
'modlog': 'r/%s/about/log/',
'modqueue': 'r/%s/about/modqueue/',
'mod_mail': 'r/%s/message/moderator/',
'morechildren': 'api/morechildren/',
'my_con_subreddits': 'subreddits/mine/contributor/',
'my_mod_subreddits': 'subreddits/mine/moderator/',
'my_subreddits': 'subreddits/mine/subscriber/',
'new': 'new/',
'marknsfw': 'api/marknsfw/',
'popular_subreddits': 'subreddits/popular/',
'read_message': 'api/read_message/',
'reddit_url': '/',
'register': 'api/register/',
'remove': 'api/remove/',
'report': 'api/report/',
'reports': 'r/%s/about/reports/',
'save': 'api/save/',
'saved': 'saved/',
'search': 'r/%s/search/',
'search_reddit_names': 'api/search_reddit_names/',
'sent': 'message/sent/',
'sticky_submission': 'api/set_subreddit_sticky/',
'site_admin': 'api/site_admin/',
'spam': 'r/%s/about/spam/',
'stylesheet': 'r/%s/about/stylesheet/',
'submit': 'api/submit/',
'sub_comments_gilded': 'r/%s/comments/gilded/',
'subreddit': 'r/%s/',
'subreddit_about': 'r/%s/about/',
'subreddit_comments': 'r/%s/comments/',
'subreddit_css': 'api/subreddit_stylesheet/',
'subreddit_random': 'r/%s/random/',
'subreddit_settings': 'r/%s/about/edit/',
'subscribe': 'api/subscribe/',
'top': 'top/',
'unfriend': 'api/unfriend/',
'unhide': 'api/unhide/',
'unmarknsfw': 'api/unmarknsfw/',
'unmoderated': 'r/%s/about/unmoderated/',
'unread': 'message/unread/',
'unread_message': 'api/unread_message/',
'unsave': 'api/unsave/',
'upload_image': 'api/upload_sr_img',
'user': 'user/%s/',
'user_about': 'user/%s/about/',
'username_available': 'api/username_available/',
'vote': 'api/vote/',
'wiki_edit': 'api/wiki/edit/',
'wiki_page': 'r/%s/wiki/%s', # No trailing /
'wiki_pages': 'r/%s/wiki/pages/',
'wiki_banned': 'r/%s/about/wikibanned/',
'wiki_contributors': 'r/%s/about/wikicontributors/'}
SSL_PATHS = ('access_token_url', 'authorize', 'login')
def __init__(self, site_name):
def config_boolean(item):
return item and item.lower() in ('1', 'yes', 'true', 'on')
obj = dict(CONFIG.items(site_name))
self._site_url = 'http://' + obj['domain']
if 'ssl_domain' in obj:
self._ssl_url = 'https://' + obj['ssl_domain']
else:
self._ssl_url = None
if 'oauth_domain' in obj:
if config_boolean(obj['oauth_https']):
self._oauth_url = 'https://' + obj['oauth_domain']
else:
self._oauth_url = 'http://' + obj['oauth_domain']
else:
self._oauth_url = self._ssl_url
self.api_request_delay = float(obj['api_request_delay'])
self.by_kind = {obj['comment_kind']: objects.Comment,
obj['message_kind']: objects.Message,
obj['redditor_kind']: objects.Redditor,
obj['submission_kind']: objects.Submission,
obj['subreddit_kind']: objects.Subreddit,
'modaction': objects.ModAction,
'more': objects.MoreComments,
'wikipage': objects.WikiPage,
'wikipagelisting': objects.WikiPageListing,
'UserList': objects.UserList}
self.by_object = dict((value, key) for (key, value) in
six.iteritems(self.by_kind))
self.by_object[objects.LoggedInRedditor] = obj['redditor_kind']
self.cache_timeout = float(obj['cache_timeout'])
self.check_for_updates = config_boolean(obj['check_for_updates'])
self.decode_html_entities = config_boolean(obj['decode_html_entities'])
self.domain = obj['domain']
self.output_chars_limit = int(obj['output_chars_limit'])
self.log_requests = int(obj['log_requests'])
self.http_proxy = (obj.get('http_proxy') or os.getenv('http_proxy') or
None)
# We use `get(...) or None` because `get` may return an empty string
self.client_id = obj.get('oauth_client_id') or None
self.client_secret = obj.get('oauth_client_secret') or None
self.redirect_uri = obj.get('oauth_redirect_uri') or None
if 'short_domain' in obj:
self._short_domain = 'http://' + obj['short_domain']
else:
self._short_domain = None
self.timeout = float(obj['timeout'])
try:
self.user = obj['user'] if obj['user'] else None
self.pswd = obj['pswd']
except KeyError:
self.user = self.pswd = None
self.is_reddit = obj['domain'].endswith('reddit.com')
def __getitem__(self, key):
"""Return the URL for key."""
if self._ssl_url and key in self.SSL_PATHS:
return urljoin(self._ssl_url, self.API_PATHS[key])
return urljoin(self._site_url, self.API_PATHS[key])
@property
def short_domain(self):
"""Return the short domain of the reddit.
Used to generate the shortlink. For reddit.com the short_domain is
redd.it and generate shortlinks like http://redd.it/y3r8u
"""
if self._short_domain:
return self._short_domain
else:
raise errors.ClientException('No short domain specified.')
class BaseReddit(object):
"""A base class that allows access to reddit's API.
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
RETRY_CODES = [502, 503, 504]
update_checked = False
def __init__(self, user_agent, site_name=None, handler=None,
disable_update_check=False):
"""Initialize our connection with a reddit server.
The user_agent is how your application identifies itself. Read the
official API guidelines for user_agents
https://github.com/reddit/reddit/wiki/API. Applications using default
user_agents such as "Python/urllib" are drastically limited.
site_name allows you to specify which reddit you want to connect to.
The installation defaults are reddit.com, if you only need to connect
to reddit.com then you can safely ignore this. If you want to connect
to another reddit, set site_name to the name of that reddit. This must
match with an entry in praw.ini. If site_name is None, then the site
name will be looked for in the environment variable REDDIT_SITE. If it
is not found there, the default site name reddit matching reddit.com
will be used.
disable_update_check allows you to prevent an update check from
occurring in spite of the check_for_updates setting in praw.ini.
"""
if not user_agent or not isinstance(user_agent, six.string_types):
raise TypeError('User agent must be a non-empty string.')
self.config = Config(site_name or os.getenv('REDDIT_SITE') or 'reddit')
if handler:
self.handler = handler
else:
self.handler = DefaultHandler()
self.http = requests.session() # Dummy session
self.http.headers['User-Agent'] = UA_STRING % user_agent
if self.config.http_proxy:
self.http.proxies = {'http': self.config.http_proxy}
self.modhash = None
# Check for updates if permitted and this is the first Reddit instance
if not disable_update_check and not self.update_checked \
and self.config.check_for_updates and not uc_disabled:
update_check(__name__, __version__)
self.update_checked = True
def _request(self, url, params=None, data=None, files=None, auth=None,
timeout=45, raw_response=False):
"""Given a page url and a dict of params, open and return the page.
:param url: the url to grab content from.
:param params: a dictionary containing the GET data to put in the url
:param data: a dictionary containing the extra data to submit
:param files: a dictionary specifying the files to upload
:param auth: Add the HTTP authentication headers (see requests)
:param timeout: Specifies the maximum time that the actual HTTP request
can take.
:param raw_response: return the response object rather than the
response body
:returns: either the response body or the response object
"""
def decode(match):
return CHR(html_entities.name2codepoint[match.group(1)])
def handle_redirect():
response = None
url = request.url
while url: # Manually handle 302 redirects
request.url = url
response = self.handler.request(request=request.prepare(),
proxies=self.http.proxies,
timeout=timeout, **kwargs)
url = _raise_redirect_exceptions(response)
return response
request = _prepare_request(self, url, params, data, auth, files)
timeout = self.config.timeout if timeout is None else timeout
# Prepare extra arguments
key_items = []
oauth = request.headers.get('Authorization', None)
for key_value in (params, data, request.cookies, auth, oauth):
if isinstance(key_value, dict):
key_items.append(tuple(key_value.items()))
elif isinstance(key_value, http_cookiejar.CookieJar):
key_items.append(tuple(key_value.get_dict().items()))
else:
key_items.append(key_value)
cache_key = (normalize_url(request.url), tuple(key_items))
kwargs = {'_rate_domain': self.config.domain,
'_rate_delay': int(self.config.api_request_delay),
'_cache_key': cache_key,
'_cache_ignore': bool(files) or raw_response,
'_cache_timeout': int(self.config.cache_timeout)}
remaining_attempts = 3
while True:
try:
response = handle_redirect()
_raise_response_exceptions(response)
self.http.cookies.update(response.cookies)
if raw_response:
return response
elif self.config.decode_html_entities:
return re.sub('&([^;]+);', decode, response.text)
else:
return response.text
except requests.exceptions.HTTPError as error:
remaining_attempts -= 1
if error.response.status_code not in self.RETRY_CODES or \
remaining_attempts == 0:
raise
except requests.exceptions.RequestException:
remaining_attempts -= 1
if remaining_attempts == 0:
raise
def _json_reddit_objecter(self, json_data):
"""Return an appropriate RedditObject from json_data when possible."""
try:
object_class = self.config.by_kind[json_data['kind']]
except KeyError:
if 'json' in json_data:
if len(json_data) != 1:
warn_explicit('Unknown object type: %s' %
json_data, UserWarning, '', 0)
return json_data['json']
else:
return object_class.from_api_response(self, json_data['data'])
return json_data
def evict(self, urls):
"""Evict url(s) from the cache."""
if isinstance(urls, six.string_types):
urls = (urls,)
self.handler.evict(urls)
@decorators.oauth_generator
def get_content(self, url, params=None, limit=0, place_holder=None,
root_field='data', thing_field='children',
after_field='after', _use_oauth=False):
"""A generator method to return reddit content from a URL.
Starts at the initial url, and fetches content using the `after`
JSON data until `limit` entries have been fetched, or the
`place_holder` has been reached.
:param url: the url to start fetching content from
:param params: dictionary containing extra GET data to put in the url
:param limit: the number of content entries to fetch. If limit <= 0,
fetch the default for your account (25 for unauthenticated
users). If limit is None, then fetch as many entries as possible
(reddit returns at most 100 per request, however, PRAW will
automatically make additional requests as necessary).
:param place_holder: if not None, the method will fetch `limit`
content, stopping if it finds content with `id` equal to
`place_holder`. The place_holder item is the last item to be
yielded from this generator. Note that the use of `place_holder` is
not 100% reliable as the place holder item may no longer exist due
to being removed or deleted.
:param root_field: indicates the field in the json response that holds
the data. Most objects use 'data', however some (flairlist) don't
have the 'data' object. Use None for the root object.
:param thing_field: indicates the field under the root_field which
contains the list of things. Most objects use 'children'.
:param after_field: indicates the field which holds the after item
element
:type place_holder: a string corresponding to a reddit base36 id
without prefix, e.g. 'asdfasdf'
:returns: a list of reddit content, of type Subreddit, Comment,
Submission or user flair.
"""
objects_found = 0
params = params or {}
fetch_all = fetch_once = False
if limit is None:
fetch_all = True
params['limit'] = 1024 # Just use a big number
elif limit > 0:
params['limit'] = limit
else:
fetch_once = True
# While we still need to fetch more content to reach our limit, do so.
while fetch_once or fetch_all or objects_found < limit:
if _use_oauth: # Set the necessary _use_oauth value
assert self._use_oauth is False
self._use_oauth = _use_oauth # pylint: disable-msg=W0201
try:
page_data = self.request_json(url, params=params)
finally: # Restore _use_oauth value
if _use_oauth:
self._use_oauth = False # pylint: disable-msg=W0201
fetch_once = False
if root_field:
root = page_data[root_field]
else:
root = page_data
for thing in root[thing_field]:
yield thing
objects_found += 1
# Terminate when we've reached the limit, or place holder
if objects_found == limit or (place_holder and
thing.id == place_holder):
return
# Set/update the 'after' parameter for the next iteration
if root.get(after_field):
# We use `root.get` to also test if the value evaluates to True
params['after'] = root[after_field]
else:
return
@decorators.raise_api_exceptions
def request_json(self, url, params=None, data=None, as_objects=True):
"""Get the JSON processed from a page.
:param url: the url to grab content from.
:param params: a dictionary containing the GET data to put in the url
:param data: a dictionary containing the extra data to submit
:param as_objects: if True return reddit objects else raw json dict.
:returns: JSON processed page
"""
url += '.json'
response = self._request(url, params, data)
if as_objects:
hook = self._json_reddit_objecter
else:
hook = None
# Request url just needs to be available for the objecter to use
self._request_url = url # pylint: disable-msg=W0201
data = json.loads(response, object_hook=hook)
delattr(self, '_request_url')
# Update the modhash
if isinstance(data, dict) and 'data' in data \
and 'modhash' in data['data']:
self.modhash = data['data']['modhash']
return data
class OAuth2Reddit(BaseReddit):
"""Provides functionality for obtaining reddit OAuth2 access tokens.
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
def __init__(self, *args, **kwargs):
super(OAuth2Reddit, self).__init__(*args, **kwargs)
self.client_id = self.config.client_id
self.client_secret = self.config.client_secret
self.redirect_uri = self.config.redirect_uri
def _handle_oauth_request(self, data):
auth = (self.client_id, self.client_secret)
url = self.config['access_token_url']
response = self._request(url, auth=auth, data=data, raw_response=True)
if response.status_code != 200:
raise errors.OAuthException('Unexpected OAuthReturn: %d' %
response.status_code, url)
retval = response.json()
if 'error' in retval:
error = retval['error']
if error == 'invalid_grant':
raise errors.OAuthInvalidGrant(error, url)
raise errors.OAuthException(retval['error'], url)
return retval
@decorators.require_oauth
def get_access_information(self, code):
"""Return the access information for an OAuth2 authorization grant.
:param code: the code received in the request from the OAuth2 server
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable. The scope value will be a set
containing the scopes the tokens are valid for.
"""
data = {'code': code, 'grant_type': 'authorization_code',
'redirect_uri': self.redirect_uri}
retval = self._handle_oauth_request(data)
return {'access_token': retval['access_token'],
'refresh_token': retval.get('refresh_token'),
'scope': set(retval['scope'].split(','))}
@decorators.require_oauth
def get_authorize_url(self, state, scope='identity', refreshable=False):
"""Return the URL to send the user to for OAuth2 authorization.
:param state: a unique key that represents this individual client
:param scope: the reddit scope to ask permissions for. Multiple scopes
can be enabled by passing in a container of strings.
:param refreshable: when True, a permanent "refreshable" token is
issued
"""
params = {'client_id': self.client_id, 'response_type': 'code',
'redirect_uri': self.redirect_uri, 'state': state}
if isinstance(scope, six.string_types):
params['scope'] = scope
else:
params['scope'] = ','.join(scope)
params['duration'] = 'permanent' if refreshable else 'temporary'
request = Request('GET', self.config['authorize'], params=params)
return request.prepare().url
@property
def has_oauth_app_info(self):
"""Return True if all the necessary OAuth settings are set."""
return all((self.client_id, self.client_secret, self.redirect_uri))
@decorators.require_oauth
def refresh_access_information(self, refresh_token):
"""Return updated access information for an OAuth2 authorization grant.
:param refresh_token: the refresh token used to obtain the updated
information
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable. The scope value will be a set
containing the scopes the tokens are valid for.
"""
data = {'grant_type': 'refresh_token',
'redirect_uri': self.redirect_uri,
'refresh_token': refresh_token}
retval = self._handle_oauth_request(data)
return {'access_token': retval['access_token'],
'refresh_token': refresh_token,
'scope': set(retval['scope'].split(','))}
def set_oauth_app_info(self, client_id, client_secret, redirect_uri):
"""Set the App information to use with OAuth2.
This function need only be called if your praw.ini site configuration
does not already contain the necessary information.
Go to https://ssl.reddit.com/prefs/apps/ to discover the appropriate
values for your application.
:param client_id: the client_id of your application
:param client_secret: the client_secret of your application
:param redirect_uri: the redirect_uri of your application
"""
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
class UnauthenticatedReddit(BaseReddit):
"""This mixin provides bindings for basic functions of reddit's API.
None of these functions require authenticated access to reddit's API.
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
def __init__(self, *args, **kwargs):
super(UnauthenticatedReddit, self).__init__(*args, **kwargs)
self._random_count = 0
@decorators.require_captcha
def create_redditor(self, user_name, password, email='', captcha=None):
"""Register a new user.
:returns: The json response from the server.
"""
data = {'email': email,
'passwd': password,
'passwd2': password,
'user': user_name}
if captcha:
data.update(captcha)
return self.request_json(self.config['register'], data=data)
def get_all_comments(self, *args, **kwargs):
"""Return a get_content generator for comments from `all` subreddits.
This is a **deprecated** convenience function for :meth:`.get_comments`
with `all` specified for the subreddit. This function will be removed
in a future version of PRAW.
"""
warn('Please use `get_comments(\'all\', ...)` instead',
DeprecationWarning)
return self.get_comments('all', *args, **kwargs)
@decorators.restrict_access(scope='read')
def get_comments(self, subreddit, gilded_only=False, *args, **kwargs):
"""Return a get_content generator for comments in the given subreddit.
:param gilded_only: If True only return gilded comments.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
if gilded_only:
url = self.config['sub_comments_gilded'] % six.text_type(subreddit)
else:
url = self.config['subreddit_comments'] % six.text_type(subreddit)
return self.get_content(url, *args, **kwargs)
@decorators.restrict_access(scope='read')
def get_controversial(self, *args, **kwargs):
"""Return a get_content generator for controversial submissions.
Corresponds to submissions provided by
http://www.reddit.com/controversial/ for the session.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['controversial'], *args, **kwargs)
@decorators.restrict_access(scope='read')
def get_domain_listing(self, domain, sort='hot', period=None, *args,
**kwargs):
"""Return a get_content generator for submissions by domain.
Corresponds to the submissions provided by
http://www.reddit.com/domain/{domain}.
:param domain: The domain to generate a submission listing for.
:param sort: When provided must be one of 'hot', 'new', 'rising',
'controversial, or 'top'. Defaults to 'hot'.
:param period: When sort is either 'controversial', or 'top' the period
can be either None (for account default), 'all', 'year', 'month',
'week', 'day', or 'hour'.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
# Verify arguments
if sort not in ('controversial', 'hot', 'new', 'rising', 'top'):
raise TypeError('Invalid sort parameter.')
if period not in (None, 'all', 'day', 'hour', 'month', 'week', 'year'):
raise TypeError('Invalid period parameter.')
if sort not in ('controversial', 'top') and period:
raise TypeError('Period cannot be set for that sort argument.')
# Build url
url = self.config['domain'] % domain
if sort != 'hot':
url += sort
if period: # Set or overwrite params 't' parameter
kwargs.setdefault('params', {})['t'] = period
return self.get_content(url, *args, **kwargs)
def get_flair(self, subreddit, redditor):
"""Return the flair for a user on the given subreddit.
:param subreddit: Can be either a Subreddit object or the name of a
subreddit.
:param redditor: Can be either a Redditor object or the name of a
redditor.
:returns: None if the user doesn't exist, otherwise a dictionary
containing the keys `flair_css_class`, `flair_text`, and `user`.
"""
name = six.text_type(redditor)
params = {'name': name}
data = self.request_json(self.config['flairlist'] %
six.text_type(subreddit), params=params)
if not data['users'] or \
data['users'][0]['user'].lower() != name.lower():
return None
return data['users'][0]
@decorators.restrict_access(scope='read')
def get_front_page(self, *args, **kwargs):
"""Return a get_content generator for the front page submissions.
Corresponds to the submissions provided by http://www.reddit.com/ for
the session.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['reddit_url'], *args, **kwargs)
@decorators.restrict_access(scope='read')
def get_info(self, url=None, thing_id=None, limit=None):
"""Look up existing Submissions by thing_id (fullname) or url.
:param url: The url to lookup.
:param thing_id: The submission to lookup by fullname.
:param limit: The maximum number of Submissions to return when looking
up by url. When None, uses account default settings.
:returns: When thing_id is provided, return the corresponding
Submission object, or None if not found. When url is provided
return a list of Submission objects (up to limit) for the url.
"""
if bool(url) == bool(thing_id):
raise TypeError('Only one of url or thing_id is required!')
elif thing_id and limit:
raise TypeError('Limit keyword is not applicable with thing_id.')
if url:
params = {'url': url}
if limit:
params['limit'] = limit
else:
params = {'id': thing_id}
items = self.request_json(self.config['info'],
params=params)['data']['children']
if url:
return items
elif items:
return items[0]
else:
return None
def get_moderators(self, subreddit):
"""Return the list of moderators for the given subreddit."""
return self.request_json(self.config['moderators'] %
six.text_type(subreddit))
@decorators.restrict_access(scope='read')
def get_new(self, *args, **kwargs):
"""Return a get_content generator for new submissions.
Corresponds to the submissions provided by http://www.reddit.com/new/
for the session.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['new'], *args, **kwargs)
def get_popular_reddits(self, *args, **kwargs):
"""Return a get_content generator for the most active subreddits.
This is a **deprecated** version of :meth:`.get_popular_subreddits`.
"""
warn('Please use `get_popular_subreddits` instead', DeprecationWarning)
return self.get_popular_subreddits(*args, **kwargs)
def get_popular_subreddits(self, *args, **kwargs):
"""Return a get_content generator for the most active subreddits.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
url = self.config['popular_subreddits']
return self.get_content(url, *args, **kwargs)
def get_random_subreddit(self):
"""Return a random Subreddit object.
Utilizes the same mechanism as http://www.reddit.com/r/random/.
"""
response = self._request(self.config['subreddit'] % 'random',
raw_response=True)
return self.get_subreddit(response.url.rsplit('/', 2)[-2])
def get_random_submission(self, subreddit='all'):
"""Return a random Submission object.
:param subreddit: Limit the submission to the specified
subreddit(s). Default: all
"""
url = self.config['subreddit_random'] % six.text_type(subreddit)
try:
self._request(url, params={'unique': self._random_count},
raw_response=True)
except errors.RedirectException as exc: # This _should_ occur
self._random_count += 1 # Avoid network-level caching
return self.get_submission(exc.response_url)
raise errors.ClientException('Expected exception not raised.')
def get_redditor(self, user_name, *args, **kwargs):
"""Return a Redditor instance for the user_name specified.
The additional parameters are passed directly into the
:class:`.Redditor` constructor.
"""
return objects.Redditor(self, user_name, *args, **kwargs)
def get_submission(self, url=None, submission_id=None, comment_limit=0,
comment_sort=None):
"""Return a Submission object for the given url or submission_id.
:param comment_limit: The desired number of comments to fetch. If <= 0
fetch the default number for the session's user. If None, fetch the
maximum possible.
:param comment_sort: The sort order for retrieved comments. When None
use the default for the session's user.
"""
if bool(url) == bool(submission_id):
raise TypeError('One (and only one) of id or url is required!')
if submission_id:
url = urljoin(self.config['comments'], submission_id)
return objects.Submission.from_url(self, url,
comment_limit=comment_limit,
comment_sort=comment_sort)
def get_submissions(self, fullnames, *args, **kwargs):
"""Generate Submission objects for each item provided in `fullnames`.
A submission fullname looks like `t3_<base36_id>`. Submissions are
yielded in the same order they appear in `fullnames`.
Up to 100 items are batched at a time -- this happens transparently.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `limit` parameters cannot be
altered.
"""
fullnames = fullnames[:]
while fullnames:
cur = fullnames[:100]
fullnames[:100] = []
url = self.config['by_id'] + ','.join(cur)
for item in self.get_content(url, limit=len(cur), *args, **kwargs):
yield item
def get_subreddit(self, subreddit_name, *args, **kwargs):
"""Return a Subreddit object for the subreddit_name specified.
The additional parameters are passed directly into the
:class:`.Subreddit` constructor.
"""
if subreddit_name.lower() == 'random':
return self.get_random_subreddit()
return objects.Subreddit(self, subreddit_name, *args, **kwargs)
@decorators.restrict_access(scope='read')
def get_top(self, *args, **kwargs):
"""Return a get_content generator for top submissions.
Corresponds to the submissions provided by http://www.reddit.com/top/
for the session.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['top'], *args, **kwargs)
def get_wiki_page(self, subreddit, page):
"""Return a WikiPage object for the subreddit and page provided."""
return self.request_json(self.config['wiki_page'] %
(six.text_type(subreddit), page.lower()))
def get_wiki_pages(self, subreddit):
"""Return a list of WikiPage objects for the subreddit."""
return self.request_json(self.config['wiki_pages'] %
six.text_type(subreddit))
def is_username_available(self, username):
"""Return True if username is valid and available, otherwise False."""
params = {'user': username}
try:
result = self.request_json(self.config['username_available'],
params=params)
except errors.APIException as exception:
if exception.error_type == 'BAD_USERNAME':
result = False
else:
raise
return result
def search(self, query, subreddit=None, sort=None, syntax=None,
period=None, *args, **kwargs):
"""Return a generator for submissions that match the search query.
:param query: The query string to search for.
:param subreddit: Limit search results to the subreddit if provided.
:param sort: The sort order of the results.
:param syntax: The syntax of the search query.
:param period: The time period of the results.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` and `param` parameters cannot be
altered.
See http://www.reddit.com/help/search for more information on how to
build a search query.
"""
params = {'q': query}
if sort:
params['sort'] = sort
if syntax:
params['syntax'] = syntax
if period:
params['t'] = period
if subreddit:
params['restrict_sr'] = 'on'
url = self.config['search'] % subreddit
else:
url = self.config['search'] % 'all'
return self.get_content(url, params=params, *args, **kwargs)
def search_reddit_names(self, query):
"""Return subreddits whose display name contains the query."""
data = {'query': query}
results = self.request_json(self.config['search_reddit_names'],
data=data)
return [self.get_subreddit(name) for name in results['names']]
@decorators.require_captcha
def send_feedback(self, name, email, message, reason='feedback',
captcha=None):
"""Send feedback to the admins.
Please don't abuse this. Read the send feedback page at
http://www.reddit.com/feedback/ (for reddit.com) before use.
:returns: The json response from the server.
"""
data = {'name': name,
'email': email,
'reason': reason,
'text': message}
if captcha:
data.update(captcha)
return self.request_json(self.config['feedback'], data=data)
class AuthenticatedReddit(OAuth2Reddit, UnauthenticatedReddit):
"""This class adds the methods necessary for authenticating with reddit.
Authentication can either be login based (through login), or OAuth2 based
(via set_access_credentials).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
def __init__(self, *args, **kwargs):
super(AuthenticatedReddit, self).__init__(*args, **kwargs)
# Add variable to distinguish between authentication type
# * None means unauthenticated
# * True mean login authenticated
# * set(...) means OAuth authenticated with the scopes in the set
self._authentication = None
self._use_oauth = False # Updated on a request by request basis
self.access_token = None
self.refresh_token = None
self.user = None
def __str__(self):
if isinstance(self._authentication, set):
return 'OAuth2 reddit session (scopes: {0})'.format(
', '.join(self._authentication))
elif self._authentication:
return 'LoggedIn reddit session (user: {0})'.format(self.user)
else:
return 'Unauthenticated reddit sesssion'
@decorators.restrict_access(scope=None, login=True)
def accept_moderator_invite(self, subreddit):
"""Accept a moderator invite to the given subreddit.
Callable upon an instance of Subreddit with no arguments.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit)}
# Clear moderated subreddits and cache
self.user._mod_subs = None # pylint: disable-msg=W0212
self.evict(self.config['my_mod_subreddits'])
return self.request_json(self.config['accept_mod_invite'], data=data)
def clear_authentication(self):
"""Clear any existing authentication on the reddit object.
This function is implicitly called on `login` and
`set_access_credentials`.
"""
self._authentication = None
self.access_token = None
self.refresh_token = None
self.http.cookies.clear()
self.user = None
def edit_wiki_page(self, subreddit, page, content, reason=''):
"""Create or edit a wiki page with title `page` for `subreddit`.
:returns: The json response from the server.
"""
data = {'content': content,
'page': page,
'r': six.text_type(subreddit),
'reason': reason}
return self.request_json(self.config['wiki_edit'], data=data)
def get_access_information(self, code, # pylint: disable-msg=W0221
update_session=True):
"""Return the access information for an OAuth2 authorization grant.
:param code: the code received in the request from the OAuth2 server
:param update_session: Update the current session with the retrieved
token(s).
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable.
"""
retval = super(AuthenticatedReddit, self).get_access_information(code)
if update_session:
self.set_access_credentials(**retval)
return retval
@decorators.restrict_access(scope='identity', oauth_only=True)
def get_me(self):
"""Return a LoggedInRedditor object."""
response = self.request_json(self.config['me'])
user = objects.Redditor(self, response['name'], response)
user.__class__ = objects.LoggedInRedditor
return user
def has_scope(self, scope):
"""Return True if OAuth2 authorized for the passed in scope."""
return self.is_oauth_session() and scope in self._authentication
def is_logged_in(self):
"""Return True when session is authenticated via login."""
return self._authentication is True
def is_oauth_session(self):
"""Return True when the current session is an OAuth2 session."""
return isinstance(self._authentication, set)
def login(self, username=None, password=None):
"""Login to a reddit site.
Look for username first in parameter, then praw.ini and finally if both
were empty get it from stdin. Look for password in parameter, then
praw.ini (but only if username matches that in praw.ini) and finally
if they both are empty get it with getpass. Add the variables user
(username) and pswd (password) to your praw.ini file to allow for auto-
login.
A successful login will overwrite any existing authentication.
"""
if password and not username:
raise Exception('Username must be provided when password is.')
user = username or self.config.user
if not user:
sys.stdout.write('Username: ')
sys.stdout.flush()
user = sys.stdin.readline().strip()
pswd = None
else:
pswd = password or self.config.pswd
if not pswd:
import getpass
pswd = getpass.getpass('Password for %s: ' % user)
data = {'passwd': pswd,
'user': user}
self.clear_authentication()
self.request_json(self.config['login'], data=data)
# Update authentication settings
self._authentication = True
self.user = self.get_redditor(user)
self.user.__class__ = objects.LoggedInRedditor
def refresh_access_information(self, # pylint: disable-msg=W0221
refresh_token=None,
update_session=True):
"""Return updated access information for an OAuth2 authorization grant.
:param refresh_token: The refresh token used to obtain the updated
information. When not provided, use the stored refresh_token.
:param update_session: Update the session with the returned data.
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable. The scope value will be a set
containing the scopes the tokens are valid for.
"""
response = super(AuthenticatedReddit, self).refresh_access_information(
refresh_token=refresh_token or self.refresh_token)
if update_session:
self.set_access_credentials(**response)
return response
@decorators.require_oauth
def set_access_credentials(self, scope, access_token, refresh_token=None,
update_user=True):
"""Set the credentials used for OAuth2 authentication.
Calling this function will overwrite any currently existing access
credentials.
:param scope: A set of reddit scopes the tokens provide access to
:param access_token: the access_token of the authentication
:param refresh_token: the refresh token of the authentication
:param update_user: Whether or not to set the user attribute for
identity scopes
"""
if not isinstance(scope, set):
raise TypeError('`scope` parameter must be a set')
self.clear_authentication()
# Update authentication settings
self._authentication = scope
self.access_token = access_token
self.refresh_token = refresh_token
# Update the user object
if update_user and 'identity' in scope:
self.user = self.get_me()
class ModConfigMixin(AuthenticatedReddit):
"""Adds methods requiring the 'modconfig' scope (or mod access).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='modconfig')
def create_subreddit(self, name, title, description='', language='en',
subreddit_type='public', content_options='any',
over_18=False, default_set=True, show_media=False,
domain='', wikimode='disabled'):
"""Create a new subreddit.
:returns: The json response from the server.
"""
data = {'name': name,
'title': title,
'description': description,
'lang': language,
'type': subreddit_type,
'link_type': content_options,
'over_18': 'on' if over_18 else 'off',
'allow_top': 'on' if default_set else 'off',
'show_media': 'on' if show_media else 'off',
'wikimode': wikimode,
'domain': domain}
return self.request_json(self.config['site_admin'], data=data)
@decorators.restrict_access(scope='modconfig')
def delete_image(self, subreddit, name=None, header=False):
"""Delete an image from the subreddit.
:param name: The name of the image if removing a CSS image.
:param header: When true, delete the subreddit header.
:returns: The json response from the server.
"""
if name and header:
raise TypeError('Both name and header cannot be set.')
elif name:
data = {'img_name': name}
url = self.config['delete_sr_image']
self.evict(self.config['stylesheet'] % six.text_type(subreddit))
else:
data = True
url = self.config['delete_sr_header']
return self.request_json(url % six.text_type(subreddit), data=data)
@decorators.restrict_access(scope='modconfig')
def get_settings(self, subreddit):
"""Return the settings for the given subreddit."""
return self.request_json(self.config['subreddit_settings'] %
six.text_type(subreddit))['data']
@decorators.restrict_access(scope='modconfig')
def set_settings(self, subreddit, title, public_description='',
description='', language='en', subreddit_type='public',
content_options='any', over_18=False, default_set=True,
show_media=False, domain='', domain_css=False,
domain_sidebar=False, header_hover_text='',
prev_description_id=None,
prev_public_description_id=None, wikimode='disabled',
wiki_edit_age=30, wiki_edit_karma=100,
submit_link_label='', submit_text_label='',
exclude_banned_modqueue=False, comment_score_hide_mins=0,
public_traffic=False, **kwargs):
"""Set the settings for the given subreddit.
:param subreddit: Must be a subreddit object.
:returns: The json response from the server.
"""
data = {'sr': subreddit.fullname,
'allow_top': 'on' if default_set else 'off',
'comment_score_hide_mins': comment_score_hide_mins,
'description': description,
'domain': domain or '',
'domain_css': 'on' if domain_css else 'off',
'domain_sidebar': 'on' if domain_sidebar else 'off',
'exclude_banned_modqueue': ('on' if exclude_banned_modqueue
else 'off'),
'header-title': header_hover_text or '',
'lang': language,
'link_type': content_options,
'over_18': 'on' if over_18 else 'off',
'public_description': public_description,
'public_traffic': public_traffic,
'show_media': 'on' if show_media else 'off',
'submit_link_label': submit_link_label or '',
'submit_text_label': submit_text_label or '',
'title': title,
'type': subreddit_type,
'wiki_edit_age': six.text_type(wiki_edit_age),
'wiki_edit_karma': six.text_type(wiki_edit_karma),
'wikimode': wikimode}
if prev_description_id is not None:
data['prev_description_id'] = prev_description_id
if prev_public_description_id is not None:
data['prev_public_description_id'] = prev_public_description_id
if kwargs:
msg = 'Extra settings fields: {0}'.format(kwargs.keys())
warn_explicit(msg, UserWarning, '', 0)
data.update(kwargs)
self.evict(self.config['subreddit_settings'] %
six.text_type(subreddit))
return self.request_json(self.config['site_admin'], data=data)
@decorators.restrict_access(scope='modconfig')
def set_stylesheet(self, subreddit, stylesheet, prevstyle=None):
"""Set stylesheet for the given subreddit.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit),
'stylesheet_contents': stylesheet,
'op': 'save'} # Options: save / preview
if prevstyle is not None:
data['prevstyle'] = prevstyle
self.evict(self.config['stylesheet'] % six.text_type(subreddit))
return self.request_json(self.config['subreddit_css'], data=data)
@decorators.restrict_access(scope='modconfig')
def upload_image(self, subreddit, image_path, name=None, header=False):
"""Upload an image to the subreddit.
:param image_path: A path to the jpg or png image you want to upload.
:param name: The name to provide the image. When None the name will be
filename less any extension.
:param header: When True, upload the image as the subreddit header.
:returns: True when the upload was successful. False otherwise. Note
this is subject to change.
"""
if name and header:
raise TypeError('Both name and header cannot be set.')
image_type = None
# Verify image is a jpeg or png and meets size requirements
with open(image_path, 'rb') as image:
size = os.path.getsize(image.name)
if size < MIN_IMAGE_SIZE:
raise errors.ClientException('`image` is not a valid image')
elif size > MAX_IMAGE_SIZE:
raise errors.ClientException('`image` is too big. Max: {0} '
'bytes'.format(MAX_IMAGE_SIZE))
first_bytes = image.read(MIN_IMAGE_SIZE)
image.seek(0)
if first_bytes.startswith(JPEG_HEADER):
image_type = 'jpg'
elif first_bytes.startswith(PNG_HEADER):
image_type = 'png'
else:
raise errors.ClientException('`image` must be either jpg or '
'png.')
data = {'r': six.text_type(subreddit), 'img_type': image_type}
if header:
data['header'] = 1
else:
if not name:
name = os.path.splitext(os.path.basename(image.name))[0]
data['name'] = name
response = self._request(self.config['upload_image'], data=data,
files={'file': image})
# HACK: Until json response, attempt to parse the errors
json_start = response.find('[[')
json_end = response.find(']]')
try:
image_errors = dict(json.loads(response[json_start:json_end + 2]))
except Exception: # pylint: disable-msg=W0703
warn_explicit('image_upload parsing issue', UserWarning, '', 0)
return False
if image_errors['BAD_CSS_NAME']:
raise errors.APIException(image_errors['BAD_CSS_NAME'], None)
elif image_errors['IMAGE_ERROR']:
raise errors.APIException(image_errors['IMAGE_ERROR'], None)
return True
def update_settings(self, subreddit, **kwargs):
"""Update only the given settings for the given subreddit.
The settings to update must be given by keyword and match one of the
parameter names in `set_settings`.
:returns: The json response from the server.
"""
settings = self.get_settings(subreddit)
settings.update(kwargs)
del settings['subreddit_id']
return self.set_settings(subreddit, **settings)
class ModFlairMixin(AuthenticatedReddit):
"""Adds methods requiring the 'modflair' scope (or mod access).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='modflair')
def add_flair_template(self, subreddit, text='', css_class='',
text_editable=False, is_link=False):
"""Add a flair template to the given subreddit.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit),
'text': text,
'css_class': css_class,
'text_editable': six.text_type(text_editable),
'flair_type': 'LINK_FLAIR' if is_link else 'USER_FLAIR'}
return self.request_json(self.config['flairtemplate'], data=data)
@decorators.restrict_access(scope='modflair')
def clear_flair_templates(self, subreddit, is_link=False):
"""Clear flair templates for the given subreddit.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit),
'flair_type': 'LINK_FLAIR' if is_link else 'USER_FLAIR'}
return self.request_json(self.config['clearflairtemplates'], data=data)
@decorators.restrict_access(scope='modflair')
def configure_flair(self, subreddit, flair_enabled=False,
flair_position='right',
flair_self_assign=False,
link_flair_enabled=False,
link_flair_position='left',
link_flair_self_assign=False):
"""Configure the flair setting for the given subreddit.
:returns: The json response from the server.
"""
flair_enabled = 'on' if flair_enabled else 'off'
flair_self_assign = 'on' if flair_self_assign else 'off'
if not link_flair_enabled:
link_flair_position = ''
link_flair_self_assign = 'on' if link_flair_self_assign else 'off'
data = {'r': six.text_type(subreddit),
'flair_enabled': flair_enabled,
'flair_position': flair_position,
'flair_self_assign_enabled': flair_self_assign,
'link_flair_position': link_flair_position,
'link_flair_self_assign_enabled': link_flair_self_assign}
return self.request_json(self.config['flairconfig'], data=data)
@decorators.restrict_access(scope='modflair')
def delete_flair(self, subreddit, user):
"""Delete the flair for the given user on the given subreddit.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit),
'name': six.text_type(user)}
return self.request_json(self.config['deleteflair'], data=data)
@decorators.restrict_access(scope='modflair')
def get_flair_list(self, subreddit, *args, **kwargs):
"""Return a get_content generator of flair mappings.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url`, `root_field`, `thing_field`, and
`after_field` parameters cannot be altered.
"""
return self.get_content(self.config['flairlist'] %
six.text_type(subreddit), *args,
root_field=None, thing_field='users',
after_field='next', **kwargs)
@decorators.restrict_access(scope='modflair')
def set_flair(self, subreddit, item, flair_text='', flair_css_class=''):
"""Set flair for the user in the given subreddit.
Item can be a string, Redditor object, or Submission object. If item is
a string it will be treated as the name of a Redditor.
:returns: The json response from the server.
"""
data = {'r': six.text_type(subreddit),
'text': flair_text or '',
'css_class': flair_css_class or ''}
if isinstance(item, objects.Submission):
data['link'] = item.fullname
evict = item.permalink
else:
data['name'] = six.text_type(item)
evict = self.config['flairlist'] % six.text_type(subreddit)
response = self.request_json(self.config['flair'], data=data)
self.evict(evict)
return response
@decorators.restrict_access(scope='modflair')
def set_flair_csv(self, subreddit, flair_mapping):
"""Set flair for a group of users in the given subreddit.
flair_mapping should be a list of dictionaries with the following keys:
user: the user name
flair_text: the flair text for the user (optional)
flair_css_class: the flair css class for the user (optional)
:returns: The json response from the server.
"""
if not flair_mapping:
raise errors.ClientException('flair_mapping must be set')
item_order = ['user', 'flair_text', 'flair_css_class']
lines = []
for mapping in flair_mapping:
if 'user' not in mapping:
raise errors.ClientException('flair_mapping must '
'contain `user` key')
lines.append(','.join([mapping.get(x, '') for x in item_order]))
response = []
while len(lines):
data = {'r': six.text_type(subreddit),
'flair_csv': '\n'.join(lines[:100])}
response.extend(self.request_json(self.config['flaircsv'],
data=data))
lines = lines[100:]
self.evict(self.config['flairlist'] % six.text_type(subreddit))
return response
class ModLogMixin(AuthenticatedReddit):
"""Adds methods requiring the 'modlog' scope (or mod access).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='modlog')
def get_mod_log(self, subreddit, mod=None, action=None, *args, **kwargs):
"""Return a get_content generator for moderation log items.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for.
:param mod: If given, only return the actions made by this moderator.
Both a moderator name or Redditor object can be used here.
:param action: If given, only return entries for the specified action.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
params = kwargs.setdefault('params', {})
if mod is not None:
params['mod'] = six.text_type(mod)
if action is not None:
params['type'] = six.text_type(action)
return self.get_content(self.config['modlog'] %
six.text_type(subreddit), *args, **kwargs)
class ModOnlyMixin(AuthenticatedReddit):
"""Adds methods requiring the logged in moderator access.
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope=None, mod=True)
def get_banned(self, subreddit):
"""Return the list of banned users for the given subreddit."""
return self.request_json(self.config['banned'] %
six.text_type(subreddit))
@decorators.restrict_access(scope=None, mod=True)
def get_contributors(self, subreddit):
"""Return the list of contributors for the given subreddit."""
return self.request_json(self.config['contributors'] %
six.text_type(subreddit))
@decorators.restrict_access(scope='privatemessages', mod=True)
def get_mod_mail(self, subreddit='mod', *args, **kwargs):
"""Return a get_content generator for moderator messages.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the moderator mail from. Defaults to `mod`
which includes items for all the subreddits you moderate.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['mod_mail'] %
six.text_type(subreddit), *args, **kwargs)
@decorators.restrict_access(scope=None, mod=True)
def get_mod_queue(self, subreddit='mod', *args, **kwargs):
"""Return a get_content_generator for the moderator queue.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for. Defaults to `mod` which
includes items for all the subreddits you moderate.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['modqueue'] %
six.text_type(subreddit), *args, **kwargs)
@decorators.restrict_access(scope=None, mod=True)
def get_reports(self, subreddit='mod', *args, **kwargs):
"""Return a get_content generator of reported submissions.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for. Defaults to `mod` which
includes items for all the subreddits you moderate.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['reports'] %
six.text_type(subreddit), *args, **kwargs)
@decorators.restrict_access(scope=None, mod=True)
def get_spam(self, subreddit='mod', *args, **kwargs):
"""Return a get_content generator of spam-filtered items.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for. Defaults to `mod` which
includes items for all the subreddits you moderate.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['spam'] % six.text_type(subreddit),
*args, **kwargs)
@decorators.restrict_access(scope=None, mod=True)
def get_stylesheet(self, subreddit):
"""Return the stylesheet and images for the given subreddit."""
return self.request_json(self.config['stylesheet'] %
six.text_type(subreddit))['data']
@decorators.restrict_access(scope=None, mod=True)
def get_unmoderated(self, subreddit='mod', *args, **kwargs):
"""Return a get_content generator of unmoderated items.
:param subreddit: Either a Subreddit object or the name of the
subreddit to return the flair list for. Defaults to `mod` which
includes items for all the subreddits you moderate.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['unmoderated'] %
six.text_type(subreddit), *args, **kwargs)
@decorators.restrict_access(scope=None, mod=True)
def get_wiki_banned(self, subreddit):
"""Return a list of users banned from the wiki."""
return self.request_json(self.config['wiki_banned'] %
six.text_type(subreddit))
@decorators.restrict_access(scope=None, mod=True)
def get_wiki_contributors(self, subreddit):
"""Return a list of users who can contribute to the wiki."""
return self.request_json(self.config['wiki_contributors'] %
six.text_type(subreddit))
class MySubredditsMixin(AuthenticatedReddit):
"""Adds methods requiring the 'mysubreddits' scope (or login).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='mysubreddits')
def get_my_contributions(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
The subreddits generated are those where the session's user is a
contributor.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['my_con_subreddits'], *args,
**kwargs)
@decorators.restrict_access(scope='mysubreddits')
def get_my_moderation(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
The subreddits generated are those where the session's user is a
moderator.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['my_mod_subreddits'], *args,
**kwargs)
def get_my_reddits(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
This is a **deprecated** version of :meth:`.get_my_subreddits`.
"""
warn('Please use `get_my_subreddits` instead', DeprecationWarning)
return self.get_my_subreddits(*args, **kwargs)
@decorators.restrict_access(scope='mysubreddits')
def get_my_subreddits(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
The subreddits generated are those that the session's user is
subscribed to.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['my_subreddits'], *args, **kwargs)
class PrivateMessagesMixin(AuthenticatedReddit):
"""Adds methods requiring the 'privatemessages' scope (or login).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='privatemessages')
def _mark_as_read(self, thing_ids, unread=False):
"""Mark each of the supplied thing_ids as (un)read.
:returns: The json response from the server.
"""
data = {'id': ','.join(thing_ids)}
key = 'unread_message' if unread else 'read_message'
response = self.request_json(self.config[key], data=data)
self.evict([self.config[x] for x in ['inbox', 'mod_mail', 'unread']])
return response
@decorators.restrict_access(scope='privatemessages')
def get_inbox(self, *args, **kwargs):
"""Return a get_content generator for inbox messages.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['inbox'], *args, **kwargs)
@decorators.restrict_access(scope='privatemessages')
def get_sent(self, *args, **kwargs):
"""Return a get_content generator for sent messages.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['sent'], *args, **kwargs)
@decorators.restrict_access(scope='privatemessages')
def get_unread(self, unset_has_mail=False, update_user=False, *args,
**kwargs):
"""Return a get_content generator for unread messages.
:param unset_has_mail: When True, clear the has_mail flag (orangered)
for the user.
:param update_user: If both `unset_has_mail` and `update user` is True,
set the `has_mail` attribute of the logged-in user to False.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
params = kwargs.setdefault('params', {})
if unset_has_mail:
params['mark'] = 'true'
if update_user: # Update the user object
# Use setattr to avoid pylint error
setattr(self.user, 'has_mail', False)
return self.get_content(self.config['unread'], *args, **kwargs)
@decorators.restrict_access(scope='privatemessages')
def get_mentions(self, *args, **kwargs):
"""Return a get_content generator for username mentions.
This will only work for users with reddit gold.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['mentions'], *args, **kwargs)
@decorators.restrict_access(scope='privatemessages')
@decorators.require_captcha
def send_message(self, recipient, subject, message, captcha=None):
"""Send a message to a redditor or a subreddit's moderators (mod mail).
When sending a message to a subreddit the recipient parameter must
either be a subreddit object or the subreddit name needs to be prefixed
with either '/r/' or '#'.
:returns: The json response from the server.
"""
if isinstance(recipient, objects.Subreddit):
recipient = '/r/%s' % six.text_type(recipient)
else:
recipient = six.text_type(recipient)
data = {'text': message,
'subject': subject,
'to': recipient}
if captcha:
data.update(captcha)
response = self.request_json(self.config['compose'], data=data)
self.evict(self.config['sent'])
return response
class SubmitMixin(AuthenticatedReddit):
"""Adds methods requiring the 'submit' scope (or login).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='submit')
def _add_comment(self, thing_id, text):
"""Comment on the given thing with the given text.
:returns: A Comment object for the newly created comment.
"""
data = {'thing_id': thing_id,
'text': text}
retval = self.request_json(self.config['comment'], data=data)
# REDDIT: reddit's end should only ever return a single comment
return retval['data']['things'][0]
@decorators.restrict_access(scope='submit')
@decorators.require_captcha
def submit(self, subreddit, title, text=None, url=None, captcha=None):
"""Submit a new link to the given subreddit.
Accepts either a Subreddit object or a str containing the subreddit's
display name.
:returns: The newly created Submission object if the reddit instance
can access it. Otherwise, return the url to the submission.
"""
if bool(text) == bool(url):
raise TypeError('One (and only one) of text or url is required!')
data = {'sr': six.text_type(subreddit),
'title': title}
if text:
data['kind'] = 'self'
data['text'] = text
else:
data['kind'] = 'link'
data['url'] = url
if captcha:
data.update(captcha)
result = self.request_json(self.config['submit'], data=data)
url = result['data']['url']
# Clear the OAUth setting when attempting to fetch the submission
# pylint: disable-msg=W0212
if self._use_oauth:
self._use_oauth = False
# Hack until reddit/627 is resolved
if url.startswith(self.config._oauth_url):
url = self.config._site_url + url[len(self.config._oauth_url):]
# pylint: enable-msg=W0212
try:
return self.get_submission(url)
except requests.exceptions.HTTPError as error:
# The request may still fail if the submission was made to a
# private subreddit.
if error.response.status_code == 403:
return url
raise
class SubscribeMixin(AuthenticatedReddit):
"""Adds methods requiring the 'subscribe' scope (or login).
You should **not** directly instantiate instances of this class. Use
:class:`.Reddit` instead.
"""
@decorators.restrict_access(scope='subscribe')
def subscribe(self, subreddit, unsubscribe=False):
"""Subscribe to the given subreddit.
:param subreddit: Either the subreddit name or a subreddit object.
:param unsubscribe: When True, unsubscribe.
:returns: The json response from the server.
"""
data = {'action': 'unsub' if unsubscribe else 'sub',
'sr_name': six.text_type(subreddit)}
response = self.request_json(self.config['subscribe'], data=data)
self.evict(self.config['my_subreddits'])
return response
def unsubscribe(self, subreddit):
"""Unsubscribe from the given subreddit.
:param subreddit: Either the subreddit name or a subreddit object.
:returns: The json response from the server.
"""
return self.subscribe(subreddit, unsubscribe=True)
class Reddit(ModConfigMixin, ModFlairMixin, ModLogMixin, ModOnlyMixin,
MySubredditsMixin, PrivateMessagesMixin, SubmitMixin,
SubscribeMixin):
"""Provides access to reddit's API.
See :class:`.BaseReddit`'s documentation for descriptions of the
initialization parameters.
"""
# Prevent recursive import
from praw import objects
|
mikeolteanu/livepythonconsole-app-engine
|
boilerplate/external/praw/__init__.py
|
Python
|
lgpl-3.0
| 81,183
|
from six import iteritems
class MaxDisplacement(object):
def __init__(self, data):
self.translations = {}
self.rotations = {}
for line in data:
sid = line[0]
self.translations[sid] = line[1:4]
self.rotations[sid] = line[4:]
def write_f06(self, page_stamp='%i', pageNum=1):
msg = ['0 MAXIMUM DISPLACEMENTS',
' SUBCASE/',
' DAREA ID T1 T2 T3 R1 R2 R3']
for sid, trans in sorted(iteritems(self.translations)):
rot = self.rotations[sid]
msg.append('0 %8i %13.8E %13.8E %13.8E %13.8E %13.8E %13.8E' %
(tuple([sid] + trans + rot)))
msg.append(page_stamp % page_num)
return '\n'.join(msg)
|
saullocastro/pyNastran
|
pyNastran/f06/dev/f06_classes.py
|
Python
|
lgpl-3.0
| 904
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Hobza) of interaction energies for bimolecular complexes.
| Geometries from <Reference>.
| Reference interaction energies from Rezac and Hobza, JCTC (in press).
- **cp** ``'off'`` <erase this comment and after unless on is a valid option> || ``'on'``
- **rlxd** ``'off'`` <erase this comment and after unless on is valid option> || ``'on'``
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` <members_description>
- ``'large'`` <members_description>
- ``'<subset>'`` <members_description>
"""
import re
import qcdb
# <<< A24 Database Module >>>
dbse = 'A24'
# <<< Database Members >>>
HRXN = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
HRXN_SM = []
HRXN_LG = []
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
ACTV['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] from Rezac and Hobza dx.doi.org/10.1021/ct400057w >>>
BIND = {}
BIND['%s-%s' % (dbse, 1 )] = -6.524
BIND['%s-%s' % (dbse, 2 )] = -5.014
BIND['%s-%s' % (dbse, 3 )] = -4.749
BIND['%s-%s' % (dbse, 4 )] = -4.572
BIND['%s-%s' % (dbse, 5 )] = -3.157
BIND['%s-%s' % (dbse, 6 )] = -1.679
BIND['%s-%s' % (dbse, 7 )] = -0.779
BIND['%s-%s' % (dbse, 8 )] = -0.672
BIND['%s-%s' % (dbse, 9 )] = -4.474
BIND['%s-%s' % (dbse, 10 )] = -2.578
BIND['%s-%s' % (dbse, 11 )] = -1.629
BIND['%s-%s' % (dbse, 12 )] = -1.537
BIND['%s-%s' % (dbse, 13 )] = -1.389
BIND['%s-%s' % (dbse, 14 )] = -1.110
BIND['%s-%s' % (dbse, 15 )] = -0.514
BIND['%s-%s' % (dbse, 16 )] = -1.518
BIND['%s-%s' % (dbse, 17 )] = -0.837
BIND['%s-%s' % (dbse, 18 )] = -0.615
BIND['%s-%s' % (dbse, 19 )] = -0.538
BIND['%s-%s' % (dbse, 20 )] = -0.408
BIND['%s-%s' % (dbse, 21 )] = -0.370
BIND['%s-%s' % (dbse, 22 )] = 0.784
BIND['%s-%s' % (dbse, 23 )] = 0.897
BIND['%s-%s' % (dbse, 24 )] = 1.075
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = """ water_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 1)] = """Dimer from water_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 1)] = """Monomer A water_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 1)] = """Monomer B water_ammonia_Cs """
TAGL['%s-%s' % (dbse, 2)] = """ water_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 2)] = """Dimer from water_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 2)] = """Monomer A from water_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 2)] = """Monomer B from water_water_Cs """
TAGL['%s-%s' % (dbse, 3)] = """ HCN_HCN_Cxv """
TAGL['%s-%s-dimer' % (dbse, 3)] = """Dimer from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-CP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-CP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s-monoA-unCP' % (dbse, 3)] = """Monomer A from HCN_HCN_Cxv """
TAGL['%s-%s-monoB-unCP' % (dbse, 3)] = """Monomer B from HCN_HCN_Cxv """
TAGL['%s-%s' % (dbse, 4)] = """ HF_HF_Cs """
TAGL['%s-%s-dimer' % (dbse, 4)] = """Dimer from HF_HF_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 4)] = """Monomer A from HF_HF_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 4)] = """Monomer B from HF_HF_Cs """
TAGL['%s-%s' % (dbse, 5)] = """ ammonia_ammonia_C2h """
TAGL['%s-%s-dimer' % (dbse, 5)] = """Dimer from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-CP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-CP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 5)] = """Monomer A from ammonia_ammonia_C2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 5)] = """Monomer B from ammonia_ammonia_C2h """
TAGL['%s-%s' % (dbse, 6)] = """ methane_HF_C3v """
TAGL['%s-%s-dimer' % (dbse, 6)] = """Dimer from methane_HF_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 6)] = """Monomer A from methane_HF_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 6)] = """Monomer B from methane_HF_C3v """
TAGL['%s-%s' % (dbse, 7)] = """ ammmonia_methane_C3v """
TAGL['%s-%s-dimer' % (dbse, 7)] = """Dimer from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 7)] = """Monomer A from ammmonia_methane_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 7)] = """Monomer B from ammmonia_methane_C3v """
TAGL['%s-%s' % (dbse, 8)] = """ methane_water_Cs """
TAGL['%s-%s-dimer' % (dbse, 8)] = """Dimer from methane_water_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 8)] = """Monomer A from methane_water_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 8)] = """Monomer B from methane_water_Cs """
TAGL['%s-%s' % (dbse, 9)] = """ formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 9)] = """Dimer from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 9)] = """Monomer A from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 9)] = """Monomer B from formaldehyde_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 10)] = """ ethene_wat_Cs """
TAGL['%s-%s-dimer' % (dbse, 10)] = """Dimer from ethene_wat_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 10)] = """Monomer A from ethene_wat_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 10)] = """Monomer B from ethene_wat_Cs """
TAGL['%s-%s' % (dbse, 11)] = """ ethene_formaldehyde_Cs """
TAGL['%s-%s-dimer' % (dbse, 11)] = """Dimer from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 11)] = """Monomer A from ethene_formaldehyde_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 11)] = """Monomer B from ethene_formaldehyde_Cs """
TAGL['%s-%s' % (dbse, 12)] = """ ethyne_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 12)] = """Dimer from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 12)] = """Monomer A from ethyne_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 12)] = """Monomer B from ethyne_ethyne_C2v """
TAGL['%s-%s' % (dbse, 13)] = """ ethene_ammonia_Cs """
TAGL['%s-%s-dimer' % (dbse, 13)] = """Dimer from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 13)] = """Monomer A from ethene_ammonia_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 13)] = """Monomer B from ethene_ammonia_Cs """
TAGL['%s-%s' % (dbse, 14)] = """ ethene_ethene_C2v """
TAGL['%s-%s-dimer' % (dbse, 14)] = """Dimer from ethene_ethene_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 14)] = """Monomer A from ethene_ethene_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 14)] = """Monomer B from ethene_ethene_C2v """
TAGL['%s-%s' % (dbse, 15)] = """ methane_ethene_Cs """
TAGL['%s-%s-dimer' % (dbse, 15)] = """Dimer from methane_ethene_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 15)] = """Monomer A from methane_ethene_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 15)] = """Monomer B from methane_ethene_Cs """
TAGL['%s-%s' % (dbse, 16)] = """ borane_methane_Cs """
TAGL['%s-%s-dimer' % (dbse, 16)] = """Dimer from borane_methane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 16)] = """Monomer A from borane_methane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 16)] = """Monomer B from borane_methane_Cs """
TAGL['%s-%s' % (dbse, 17)] = """ methane_ethane_Cs """
TAGL['%s-%s-dimer' % (dbse, 17)] = """Dimer from methane_ethane_Cs """
TAGL['%s-%s-monoA-CP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-CP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s-monoA-unCP' % (dbse, 17)] = """Monomer A from methane_ethane_Cs """
TAGL['%s-%s-monoB-unCP' % (dbse, 17)] = """Monomer B from methane_ethane_Cs """
TAGL['%s-%s' % (dbse, 18)] = """ methane_ethane_C3 """
TAGL['%s-%s-dimer' % (dbse, 18)] = """Dimer from methane_ethane_C3 """
TAGL['%s-%s-monoA-CP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-CP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s-monoA-unCP' % (dbse, 18)] = """Monomer A from methane_ethane_C3 """
TAGL['%s-%s-monoB-unCP' % (dbse, 18)] = """Monomer B from methane_ethane_C3 """
TAGL['%s-%s' % (dbse, 19)] = """ methane_methane_D3d """
TAGL['%s-%s-dimer' % (dbse, 19)] = """Dimer from methane_methane_D3d """
TAGL['%s-%s-monoA-CP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-CP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s-monoA-unCP' % (dbse, 19)] = """Monomer A from methane_methane_D3d """
TAGL['%s-%s-monoB-unCP' % (dbse, 19)] = """Monomer B from methane_methane_D3d """
TAGL['%s-%s' % (dbse, 20)] = """ methane_Ar_C3v """
TAGL['%s-%s-dimer' % (dbse, 20)] = """Dimer from methane_Ar_C3v """
TAGL['%s-%s-monoA-CP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-CP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s-monoA-unCP' % (dbse, 20)] = """Monomer A from methane_Ar_C3v """
TAGL['%s-%s-monoB-unCP' % (dbse, 20)] = """Monomer B from methane_Ar_C3v """
TAGL['%s-%s' % (dbse, 21)] = """ ethene_Ar_C2v """
TAGL['%s-%s-dimer' % (dbse, 21)] = """Dimer from ethene_Ar_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 21)] = """Monomer A from ethene_Ar_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 21)] = """Monomer B from ethene_Ar_C2v """
TAGL['%s-%s' % (dbse, 22)] = """ ethene_ethyne_C2v """
TAGL['%s-%s-dimer' % (dbse, 22)] = """Dimer from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-CP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-CP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s-monoA-unCP' % (dbse, 22)] = """Monomer A from ethene_ethyne_C2v """
TAGL['%s-%s-monoB-unCP' % (dbse, 22)] = """Monomer B from ethene_ethyne_C2v """
TAGL['%s-%s' % (dbse, 23)] = """ ethene_ethene_D2h """
TAGL['%s-%s-dimer' % (dbse, 23)] = """Dimer from ethene_ethene_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 23)] = """Monomer A from ethene_ethene_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 23)] = """Monomer B from ethene_ethene_D2h """
TAGL['%s-%s' % (dbse, 24)] = """ ethyne_ethyne_D2h """
TAGL['%s-%s-dimer' % (dbse, 24)] = """Dimer from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-CP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-CP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
TAGL['%s-%s-monoA-unCP' % (dbse, 24)] = """Monomer A from ethyne_ethyne_D2h """
TAGL['%s-%s-monoB-unCP' % (dbse, 24)] = """Monomer B from ethyne_ethyne_D2h """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '1')] = qcdb.Molecule("""
0 1
O 0.00000000 -0.05786571 -1.47979303
H 0.00000000 0.82293384 -1.85541474
H 0.00000000 0.07949567 -0.51934253
--
0 1
N 0.00000000 0.01436394 1.46454628
H 0.00000000 -0.98104857 1.65344779
H -0.81348351 0.39876776 1.92934049
H 0.81348351 0.39876776 1.92934049
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '2')] = qcdb.Molecule("""
0 1
O -0.06699914 0.00000000 1.49435474
H 0.81573427 0.00000000 1.86586639
H 0.06885510 0.00000000 0.53914277
--
0 1
O 0.06254775 0.00000000 -1.42263208
H -0.40696540 -0.76017841 -1.77174450
H -0.40696540 0.76017841 -1.77174450
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '3')] = qcdb.Molecule("""
0 1
H 0.00000000 0.00000000 3.85521306
C 0.00000000 0.00000000 2.78649976
N 0.00000000 0.00000000 1.63150791
--
0 1
H 0.00000000 0.00000000 -0.59377492
C 0.00000000 0.00000000 -1.66809824
N 0.00000000 0.00000000 -2.82525056
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '4')] = qcdb.Molecule("""
0 1
H 0.00000000 0.80267982 1.69529329
F 0.00000000 -0.04596666 1.34034818
--
0 1
H 0.00000000 -0.12040787 -0.49082840
F 0.00000000 0.00976945 -1.40424978
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '5')] = qcdb.Molecule("""
0 1
N -0.04998129 -1.58709323 0.00000000
H 0.12296265 -2.16846018 0.81105976
H 0.12296265 -2.16846018 -0.81105976
H 0.65988580 -0.86235298 0.00000000
--
0 1
N 0.04998129 1.58709323 0.00000000
H -0.12296265 2.16846018 0.81105976
H -0.65988580 0.86235298 0.00000000
H -0.12296265 2.16846018 -0.81105976
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '6')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.00000000 1.77071609
H 0.51593378 -0.89362352 1.42025061
H -0.00000000 0.00000000 2.85805859
H 0.51593378 0.89362352 1.42025061
H -1.03186756 0.00000000 1.42025061
--
0 1
H -0.00000000 0.00000000 -0.54877328
F -0.00000000 0.00000000 -1.46803256
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '7')] = qcdb.Molecule("""
0 1
N -0.00000000 0.00000000 1.84833659
H 0.93730979 -0.00000000 2.23206741
H -0.46865489 -0.81173409 2.23206741
H -0.46865489 0.81173409 2.23206741
--
0 1
H 0.00000000 -0.00000000 -0.94497174
C 0.00000000 -0.00000000 -2.03363752
H 0.51251439 0.88770096 -2.40095125
H 0.51251439 -0.88770096 -2.40095125
H -1.02502878 0.00000000 -2.40095125
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '8')] = qcdb.Molecule("""
0 1
C 0.00069016 0.00000000 -1.99985520
H -0.50741740 0.88759452 -2.37290605
H 1.03052749 0.00000000 -2.35282982
H -0.01314396 0.00000000 -0.91190852
H -0.50741740 -0.88759452 -2.37290605
--
0 1
O -0.00472553 0.00000000 1.71597466
H 0.03211863 0.75755459 2.30172044
H 0.03211863 -0.75755459 2.30172044
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '9')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60123980 -1.35383976
O 0.00000000 -0.59301814 -1.55209021
H 0.93542250 1.17427624 -1.26515132
H -0.93542250 1.17427624 -1.26515132
--
0 1
C 0.00000000 -0.60200476 1.55228866
O 0.00000000 0.59238638 1.35511328
H 0.00000000 -1.00937982 2.57524635
H 0.00000000 -1.32002906 0.71694997
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '10')] = qcdb.Molecule("""
0 1
C 0.01058825 -0.66806246 1.29820809
C 0.01058825 0.66806246 1.29820809
H 0.86863216 1.23267933 0.95426815
H -0.84608285 1.23258495 1.64525385
H -0.84608285 -1.23258495 1.64525385
H 0.86863216 -1.23267933 0.95426815
--
0 1
H -0.79685627 0.00000000 -2.50911038
O 0.04347445 0.00000000 -2.04834054
H -0.19067546 0.00000000 -1.11576944
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '11')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59797089 1.47742864
C 0.00000000 0.42131196 2.33957848
H 0.92113351 -1.02957102 1.10653516
H -0.92113351 -1.02957102 1.10653516
H -0.92393815 0.85124826 2.70694633
H 0.92393815 0.85124826 2.70694633
--
0 1
O 0.00000000 -0.51877334 -1.82845679
C 0.00000000 0.68616220 -1.73709412
H 0.00000000 1.33077474 -2.63186355
H 0.00000000 1.18902807 -0.75645498
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '12')] = qcdb.Molecule("""
0 1
C 0.00000000 0.60356400 -2.18173438
H 0.00000000 1.66847581 -2.18429610
C 0.00000000 -0.60356400 -2.18173438
H 0.00000000 -1.66847581 -2.18429610
--
0 1
C -0.00000000 0.00000000 1.57829513
H -0.00000000 0.00000000 0.51136193
C -0.00000000 0.00000000 2.78576543
H -0.00000000 0.00000000 3.85017859
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '13')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.59662248 1.58722206
C 0.00000000 0.68258238 1.20494642
H 0.92312147 1.22423658 1.04062463
H -0.92312147 1.22423658 1.04062463
H -0.92388993 -1.13738548 1.75121281
H 0.92388993 -1.13738548 1.75121281
--
0 1
N 0.00000000 -0.00401379 -2.31096701
H -0.81122549 -0.45983060 -2.71043881
H 0.00000000 -0.22249432 -1.32128161
H 0.81122549 -0.45983060 -2.71043881
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '14')] = qcdb.Molecule("""
0 1
H 0.92444510 -1.23172221 -1.90619313
H -0.92444510 -1.23172221 -1.90619313
H -0.92444510 1.23172221 -1.90619313
H 0.92444510 1.23172221 -1.90619313
C 0.00000000 0.66728778 -1.90556520
C 0.00000000 -0.66728778 -1.90556520
--
0 1
H -0.00000000 1.23344948 2.82931792
H 0.00000000 1.22547148 0.97776199
H -0.00000000 -1.22547148 0.97776199
H -0.00000000 -1.23344948 2.82931792
C -0.00000000 -0.66711698 1.90601042
C -0.00000000 0.66711698 1.90601042
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '15')] = qcdb.Molecule("""
0 1
C 0.00000000 0.64634385 -1.60849815
C 0.00000000 -0.67914355 -1.45381675
H -0.92399961 -1.24016223 -1.38784883
H 0.92399961 -1.24016223 -1.38784883
H 0.92403607 1.20737602 -1.67357285
H -0.92403607 1.20737602 -1.67357285
--
0 1
H 0.00000000 0.08295411 1.59016711
C 0.00000000 0.02871509 2.67711785
H 0.88825459 0.52261990 3.06664029
H -0.88825459 0.52261990 3.06664029
H 0.00000000 -1.01394800 2.98955227
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '16')] = qcdb.Molecule("""
0 1
C 0.00346000 0.00000000 1.38045208
H 0.84849635 0.00000000 0.68958651
H 0.39513333 0.00000000 2.39584935
H -0.60268447 -0.88994299 1.22482674
H -0.60268447 0.88994299 1.22482674
--
0 1
B -0.00555317 0.00000000 -1.59887976
H 0.58455128 -1.03051800 -1.67949525
H 0.58455128 1.03051800 -1.67949525
H -1.18903148 0.00000000 -1.47677217
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '17')] = qcdb.Molecule("""
0 1
C 0.00000000 -0.06374421 2.42054090
H 0.00000000 1.02169396 2.34238038
H 0.88828307 -0.46131911 1.93307194
H -0.88828307 -0.46131911 1.93307194
H 0.00000000 -0.35363606 3.46945195
--
0 1
C 0.00000000 0.78133572 -1.13543912
H 0.00000000 1.37465349 -2.05114442
H -0.88043002 1.06310554 -0.55580918
C 0.00000000 -0.71332890 -1.44723686
H 0.88043002 1.06310554 -0.55580918
H 0.00000000 -1.30641812 -0.53140693
H -0.88100343 -0.99533072 -2.02587154
H 0.88100343 -0.99533072 -2.02587154
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '18')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.85810471
H 0.39304720 -0.94712229 -2.49369739
H 0.62370837 0.81395000 -2.49369739
H -1.01675556 0.13317229 -2.49369739
H 0.00000000 -0.00000000 -3.94634214
--
0 1
C 0.00000000 -0.00000000 0.76143405
C -0.00000000 -0.00000000 2.28821715
H -0.61711193 -0.80824397 0.36571527
H -0.39140385 0.93855659 0.36571527
H 1.00851577 -0.13031262 0.36571527
H -1.00891703 0.13031295 2.68258296
H 0.39160418 -0.93890425 2.68258296
H 0.61731284 0.80859130 2.68258296
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '19')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.81901457
H 0.51274115 0.88809373 1.45476743
H 0.51274115 -0.88809373 1.45476743
H -1.02548230 0.00000000 1.45476743
H 0.00000000 -0.00000000 2.90722072
--
0 1
C 0.00000000 -0.00000000 -1.81901457
H -0.00000000 0.00000000 -2.90722072
H -0.51274115 0.88809373 -1.45476743
H -0.51274115 -0.88809373 -1.45476743
H 1.02548230 -0.00000000 -1.45476743
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '20')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 -2.62458428
H 0.51286762 0.88831278 -2.26110195
H 0.51286762 -0.88831278 -2.26110195
H -0.00000000 0.00000000 -3.71273928
H -1.02573525 0.00000000 -2.26110195
--
0 1
AR -0.00000000 0.00000000 1.05395172
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '21')] = qcdb.Molecule("""
0 1
C 0.00000000 0.66718073 -2.29024825
C 0.00000000 -0.66718073 -2.29024825
H -0.92400768 1.23202333 -2.28975239
H 0.92400768 1.23202333 -2.28975239
H -0.92400768 -1.23202333 -2.28975239
H 0.92400768 -1.23202333 -2.28975239
--
0 1
AR -0.00000000 0.00000000 1.60829261
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '22')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.68478123
H 0.92396100 1.23195600 -1.68478123
H 0.92396100 -1.23195600 -1.68478123
H -0.92396100 -1.23195600 -1.68478123
C 0.00000000 0.66717600 -1.68478123
C 0.00000000 -0.66717600 -1.68478123
--
0 1
H -0.00000000 -1.66786500 1.81521877
H -0.00000000 1.66786500 1.81521877
C -0.00000000 -0.60339700 1.81521877
C -0.00000000 0.60339700 1.81521877
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '23')] = qcdb.Molecule("""
0 1
H -0.92396100 1.23195600 -1.75000000
H 0.92396100 1.23195600 -1.75000000
H 0.92396100 -1.23195600 -1.75000000
H -0.92396100 -1.23195600 -1.75000000
C 0.00000000 0.66717600 -1.75000000
C -0.00000000 -0.66717600 -1.75000000
--
0 1
H -0.92396100 1.23195600 1.75000000
H 0.92396100 1.23195600 1.75000000
H 0.92396100 -1.23195600 1.75000000
H -0.92396100 -1.23195600 1.75000000
C 0.00000000 0.66717600 1.75000000
C -0.00000000 -0.66717600 1.75000000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '24')] = qcdb.Molecule("""
0 1
H -0.00000000 -1.66786500 -1.75000000
H 0.00000000 1.66786500 -1.75000000
C -0.00000000 -0.60339700 -1.75000000
C 0.00000000 0.60339700 -1.75000000
--
0 1
H -0.00000000 -1.66786500 1.75000000
H 0.00000000 1.66786500 1.75000000
C -0.00000000 -0.60339700 1.75000000
C 0.00000000 0.60339700 1.75000000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
|
ashutoshvt/psi4
|
psi4/share/psi4/databases/A24alt.py
|
Python
|
lgpl-3.0
| 29,758
|
# -*- coding: utf-8 *-*
# made for python3!
from tkinter import *
from tkinter.ttk import *
class TkWindow():
registers = {}
def __init__(self, parent, title, width=400, height=300):
self.parent = parent #Tk or toplevel
self.w = width
self.h = height
self.make_gui(title)
self.loaded()
def loaded(self):
pass # overload me
"""register another window to receive a signal"""
@classmethod
def register(cls, target, signame):
if not target in cls.registers:
cls.registers[target] = []
cls.registers[target].append(signame)
"""send a signal to all registered windows"""
def send(self, signame, data=None):
cls = self.__class__
for targ, sigs in cls.registers.items():
if sigs != None:
if signame in sigs:
targ.receive(self, signame, data)
"""receive a signame"""
def receive(self, sender, signame, data):
print("receive not overloaded but signal registered for <"
+ signame + "> from <"
+ str(sender) + "> with <" + str(data) +">")
# overload me in your receiving window for your application
def make_gui(self, title):
self.parent.title(title)
Style().configure("TFrame", padding=5)
self.frame = Frame(self.parent,
width=self.w,
height=self.h)
def makelabel(self, parent, lcol=0, lrow=0, caption='', **options):
entry = Label(parent, text=caption, **options).grid(row=lrow, column=lcol, sticky=NE)
return entry
"""create a multiline text entry field with a label"""
def maketext(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
print(lrow, lcol)
if caption != '':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=NE)
entry = Text(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def makeentry(self, parent, lcol=0, lrow=0, erow=0, ecol=1, caption='', width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
entry = Entry(parent, **options)
if width:
entry.config(width=width)
entry.grid(row=erow, column=ecol, sticky=W)
return entry
def setentryvalue(self, entry, value):
entry.delete(0,END)
entry.insert(0, value)
def settextvalue(self, entry, value):
entry.delete(0.0,END);
entry.insert(0.0, value);
def setbuttontext(self, button, txt):
button['text'] = txt
def makecombo(self, parent, ccol=1, crow=0, lcol=0, lrow=0, caption='',
width=None, **options):
if caption!='':
Label(parent, text=caption).grid(row=lrow, column=lcol, sticky=E)
cbox = Combobox(parent, **options)
if width:
cbox.config(width=width)
cbox.grid(row=crow, column=ccol)
return cbox
def makecheck(self, parent, ecol=0, erow=0, caption='', **options):
cb = Checkbutton(parent, text=caption, **options)
cb.grid(row=erow, column=ecol, sticky=W)
return cb
def makebutton(self, parent, bcol=0, brow=0, caption='Press me', sticky=W, **options):
bu = Button(parent, text=caption, **options)
bu.grid(row=brow, column=bcol, sticky=sticky)
return bu
"""create a list at the givne position"""
def makelist(self, parent, llcol=0, llrow=1, lcol=0, lrow=0,
caption='List', elements=[], mode='v',
lrowspan=1, lcolspan=1,
**options):
frame = Frame(parent)
frame.grid(row=lrow, column=lcol, rowspan=lrowspan, columnspan=lcolspan)
hscroll = vscroll = None
if caption!='':
Label(parent, text=caption).grid(row=llrow, column=llcol, sticky=W)
lb = Listbox(frame, **options)
if 'v' in mode:
vscroll = Scrollbar(frame, orient=VERTICAL)
lb.config(yscrollcommand = vscroll.set)
vscroll.config(command=lb.yview)
vscroll.pack(side=RIGHT, fill=Y)
if 'h' in mode:
hscroll = Scrollbar(frame, orient=HROZONTAL)
lb.configure(xscrollcommand = hscroll.set)
hscroll.config(command = lb.xview)
hscroll.pack(side=BOTTOM, fill=X)
lb.pack(side=LEFT, fill=BOTH, expand=1)
if len(elements)>0:
self.setlistelements(elements)
return lb
def setlistelements(self, lb, elements):
lb.delete(0, END)
for element in elements:
lb.insert(END, element)
|
ManInAGarden/PiADCMeasure
|
tkwindow.py
|
Python
|
lgpl-3.0
| 4,958
|
"""A likelihood function representing a Student-t distribution.
Author:
Ilias Bilionis
Date:
1/21/2013
"""
__all__ = ['StudentTLikelihoodFunction']
import numpy as np
import scipy
import math
from . import GaussianLikelihoodFunction
class StudentTLikelihoodFunction(GaussianLikelihoodFunction):
"""An object representing a Student-t likelihood function."""
# The degrees of freedom
_nu = None
@property
def nu(self):
"""Get the degrees of freedom."""
return self._nu
@nu.setter
def nu(self, value):
"""Set the degrees of freedom."""
if not isinstance(value, float):
raise TypeError('nu must be a float.')
self._nu = value
def __init__(self, nu, num_input=None, data=None, mean_function=None, cov=None,
name='Student-t Likelihood Function'):
"""Initialize the object.
Arguments:
nu --- The degrees of freedom of the distribution.
Keyword Arguments
num_input --- The number of inputs. Optional, if
mean_function is a proper Function.
data --- The observed data. A vector. Optional,
if mean_function is a proper Function.
It can be set later.
mean_function --- The mean function. See the super class
for the description.
cov --- The covariance matrix. It can either be
a positive definite matrix, or a number.
The data or a proper mean_funciton is
preassumed.
name --- A name for the likelihood function.
"""
self.nu = nu
super(StudentTLikelihoodFunction, self).__init__(num_input=num_input,
data=data,
mean_function=mean_function,
cov=cov,
name=name)
def __call__(self, x):
"""Evaluate the function at x."""
mu = self.mean_function(x)
y = scipy.linalg.solve_triangular(self.L_cov, self.data - mu)
return (
- 0.5 * (self.nu + self.num_data) * math.log(1. + np.dot(y, y) / self.nu))
|
ebilionis/py-best
|
best/random/_student_t_likelihood_function.py
|
Python
|
lgpl-3.0
| 2,586
|
import pytest
from argparse import Namespace
from behave_cmdline import environment as env
@pytest.fixture(scope="function")
def dummy_context():
ns = Namespace()
env.before_scenario(ns, None)
yield ns
env.after_scenario(ns, None)
|
buguroo/behave-cmdline
|
tests/unit/conftest.py
|
Python
|
lgpl-3.0
| 249
|
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear_transformTools.py
# @author Jeremie Passerin
# @version 1.0
#
##########################################################
# GLOBAL
##########################################################
# Built_in
from gear.xsi import xsi, c
import gear.xsi.uitoolkit as uit
import gear.xsi.transform as tra
##########################################################
# XSI LOAD / UNLOAD PLUGIN
##########################################################
# ========================================================
def XSILoadPlugin(in_reg):
in_reg.Author = "Jeremie Passerin, Miquel Campos"
in_reg.Name = "gear_transformTools"
in_reg.Email = "geerem@hotmail.com, hello@miqueltd.com"
in_reg.URL = "http://www.jeremiepasserin.com, http://www.miqueltd.com "
in_reg.Major = 1
in_reg.Minor = 0
# Commands
in_reg.RegisterCommand("gear_MatchSRT","gear_MatchSRT")
in_reg.RegisterCommand("gear_MatchT","gear_MatchT")
in_reg.RegisterCommand("gear_MatchR","gear_MatchR")
in_reg.RegisterCommand("gear_MatchS","gear_MatchS")
in_reg.RegisterCommand("gear_MatchRT","gear_MatchRT")
in_reg.RegisterCommand("gear_MatchSR","gear_MatchSR")
in_reg.RegisterCommand("gear_MatchST","gear_MatchST")
return True
# ========================================================
def XSIUnloadPlugin(in_reg):
strPluginName = in_reg.Name
xsi.LogMessage(str(strPluginName) + str(" has been unloaded."), c.siVerbose)
return True
##########################################################
# MATCH TRANSFORM
##########################################################
# ========================================================
def gear_MatchSRT_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object)
# ========================================================
def gear_MatchT_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, True, False, False)
# ========================================================
def gear_MatchR_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, False, True, False)
# ========================================================
def gear_MatchS_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, False, False, True)
# ========================================================
def gear_MatchRT_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, True, True, False)
# ========================================================
def gear_MatchSR_Execute():
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, False, True, True)
# ========================================================
def gear_MatchST_Execute():
if not xsi.Selection.Count:
gear.log("No Selection", gear.sev_error)
return
source_object = xsi.Selection(0)
target_object = uit.pickSession()
if not target_object:
return
tra.matchGlobalTransform(source_object, target_object, True, False, True)
|
miquelcampos/GEAR_mc
|
workgroup/Addons/gear/Application/Plugins/gear_transformTools.py
|
Python
|
lgpl-3.0
| 5,127
|
"""Various array operations.
Author: Seth Axen
E-mail: seth.axen@gmail.com
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
QUATERNION_DTYPE = np.float64
X_AXIS, Y_AXIS, Z_AXIS = np.identity(3, dtype=np.float64)
EPS = 1e-12 # epsilon, a number close to 0
# Vector Algebra Methods
def as_unit(v, axis=1):
"""Return array of unit vectors parallel to vectors in `v`.
Parameters
----------
v : ndarray of float
axis : int, optional
Axis along which to normalize length.
Returns
-------
ndarray of float : Unit vector of `v`, i.e. `v` divided by its
magnitude along `axis`.
"""
u = np.array(v, dtype=np.float64, copy=True)
if u.ndim == 1:
sqmag = u.dot(u)
if sqmag >= EPS:
u /= sqmag ** 0.5
else:
if axis == 1:
sqmag = np.einsum("...ij,...ij->...i", u, u)
else:
sqmag = np.einsum("...ij,...ij->...j", u, u)
sqmag[sqmag < EPS] = 1.0
u /= np.expand_dims(np.sqrt(sqmag), axis)
return u
def make_distance_matrix(coords):
"""Build pairwise distance matrix from coordinates.
Parameters
----------
coords : ndarray of float
an Mx3 array of cartesian coordinates.
Returns
-------
ndarray of float : square symmetrical distance matrix
"""
return squareform(pdist(coords))
def make_transform_matrix(center, y=None, z=None):
"""Make 4x4 homogenous transformation matrix.
Given Nx4 array A where A[:, 4] = 1., the transform matrix M should be
used with dot(M, A.T).T. Order of operations is 1. translation, 2. align
`y` x `z` plane to yz-plane 3. align `y` to y-axis.
Parameters
----------
center : 1x3 array of float
Coordinate that should be centered after transformation.
y : None or 1x3 array of float
Vector that should lie on the y-axis after transformation
z : None or 1x3 array of float
Vector that after transformation should lie on yz-plane in direction
of z-axis.
Returns
-------
4x4 array of float
4x4 homogenous transformation matrix.
"""
translate = np.identity(4, dtype=np.float64)
translate[:3, 3] = -np.asarray(center, dtype=np.float64)
if y is not None:
y = np.atleast_2d(y)
if z is None:
rotate = np.identity(4, dtype=np.float64)
rotate[:3, :3] = make_rotation_matrix(y, Y_AXIS)
else:
z = np.atleast_2d(z)
rotate_norm = np.identity(4, dtype=np.float64)
x_unit = as_unit(np.cross(y, z))
rotate_norm[:3, :3] = make_rotation_matrix(x_unit, X_AXIS)
new_y = np.dot(rotate_norm[:3, :3], y.flatten())
rotate_y = np.identity(4, dtype=np.float64)
rotate_y[:3, :3] = make_rotation_matrix(new_y.flatten(), Y_AXIS)
rotate = np.dot(rotate_y, rotate_norm)
transform = np.dot(rotate, translate)
else:
transform = translate
return transform
def make_rotation_matrix(v0, v1):
"""Create 3x3 matrix of rotation from `v0` onto `v1`.
Should be used by dot(R, v0.T).T.
Parameters
----------
v0 : 1x3 array of float
Initial vector before alignment.
v1 : 1x3 array of float
Vector to which to align `v0`.
"""
v0 = as_unit(v0)
v1 = as_unit(v1)
u = np.cross(v0.ravel(), v1.ravel())
if np.all(u == 0.0):
return np.identity(3, dtype=np.float64)
sin_ang = u.dot(u) ** 0.5
u /= sin_ang
cos_ang = np.dot(v0, v1.T)
# fmt: off
ux = np.array([[ 0., -u[2], u[1]],
[ u[2], 0., -u[0]],
[-u[1], u[0], 0.]], dtype=np.float64)
# fmt: on
rot = (
cos_ang * np.identity(3, dtype=np.float64)
+ sin_ang * ux
+ (1 - cos_ang) * np.outer(u, u)
)
return rot
def transform_array(transform_matrix, a):
"""Pad an array with 1s, transform, and return with original dimensions.
Parameters
----------
transform_matrix : 4x4 array of float
4x4 homogenous transformation matrix
a : Nx3 array of float
Array of 3-D coordinates.
Returns
-------
Nx3 array of float : Transformed array
"""
return unpad_array(np.dot(transform_matrix, pad_array(a).T).T)
def pad_array(a, n=1.0, axis=1):
"""Return `a` with row of `n` appended to `axis`.
Parameters
----------
a : ndarray
Array to pad
n : float or int, optional
Value to pad `a` with
axis : int, optional
Axis of `a` to pad with `n`.
Returns
-------
ndarray
Padded array.
"""
if a.ndim == 1:
pad = np.ones(a.shape[0] + 1, dtype=a.dtype) * n
pad[: a.shape[0]] = a
else:
shape = list(a.shape)
shape[axis] += 1
pad = np.ones(shape, dtype=a.dtype)
pad[: a.shape[0], : a.shape[1]] = a
return pad
def unpad_array(a, axis=1):
"""Return `a` with row removed along `axis`.
Parameters
----------
a : ndarray
Array from which to remove row
axis : int, optional
Axis from which to remove row
Returns
-------
ndarray
Unpadded array.
"""
if a.ndim == 1:
return a[:-1]
else:
shape = list(a.shape)
shape[axis] -= 1
return a[: shape[0], : shape[1]]
def project_to_plane(vec_arr, norm):
"""Project array of vectors to plane with normal `norm`.
Parameters
----------
vec_arr : Nx3 array
Array of N 3D vectors.
norm : 1x3 array
Normal vector to plane.
Returns
-------
Nx3 array
Array of vectors projected onto plane.
"""
unit_norm = as_unit(norm).flatten()
mag_on_norm = np.dot(vec_arr, unit_norm)
if vec_arr.ndim == 1:
vec_on_norm = np.array(unit_norm, copy=True)
vec_on_norm *= mag_on_norm
else:
vec_on_norm = np.tile(unit_norm, (vec_arr.shape[0], 1))
vec_on_norm *= mag_on_norm[:, None]
return vec_arr - vec_on_norm
def calculate_angles(vec_arr, ref, ref_norm=None):
"""Calculate angles between vectors in `vec_arr` and `ref` vector.
If `ref_norm` is not provided, angle ranges between 0 and pi. If it is
provided, angle ranges between 0 and 2pi. Note that if `ref_norm` is
orthogonal to `vec_arr` and `ref`, then the angle is rotation around the
axis, but if a non-orthogonal axis is provided, this may not be the case.
Parameters
----------
vec_arr : Nx3 array of float
Array of N 3D vectors.
ref : 1x3 array of float
Reference vector
ref_norm : 1x3 array of float
Normal vector.
Returns
-------
1-D array
Array of N angles
"""
unit_vec_arr = as_unit(vec_arr)
unit_ref = as_unit(ref).flatten()
ang = np.arccos(np.clip(np.dot(unit_vec_arr, unit_ref), -1.0, 1.0))
# handle cases where a vector is the origin
ang[np.all(unit_vec_arr == np.zeros(3), axis=1)] = 0.0
if ref_norm is not None:
sign = np.sign(
np.dot(ref_norm, np.cross(unit_vec_arr, unit_ref).T)
).flatten()
sign[sign == 0] = 1
ang = rotate_angles(sign * ang, 2 * np.pi)
return ang
def rotate_angles(angles, amount):
"""Rotate angles by `amount`, keeping in 0 to 2pi range.
Parameters
----------
angles : 1-D array of float
Angles in radians
amount : float
Amount to rotate angles by
Returns
-------
1-D array of float : Rotated angles
"""
return (angles + amount) % (2 * np.pi)
def quaternion_to_transform_matrix(quaternion, translation=np.zeros(3)):
"""Convert quaternion to homogenous 4x4 transform matrix.
Parameters
----------
quaternion : 4x1 array of float
Quaternion describing rotation after translation.
translation : 3x1 array of float, optional
Translation to be performed before rotation.
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.linalg.norm(q)
if n < 1e-12:
return np.identity(4, dtype=np.float64)
q /= n
q = 2 * np.outer(q, q)
# fmt: off
transform_mat = np.array(
[[1.-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.],
[ q[1, 2]+q[3, 0], 1.-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.-q[1, 1]-q[2, 2], 0.],
[ 0., 0., 0., 1.]],
dtype=np.float64
)
# fmt: on
transform_mat[:3, 3] = translation
return transform_mat
def transform_matrix_to_quaternion(transform_matrix, dtype=QUATERNION_DTYPE):
"""Convert homogenous 4x4 transform matrix to quaternion.
Parameters
----------
transform_matrix : 4x4 array of float
Homogenous transformation matrix.
dtype : numpy dtype, optional
Datatype for returned quaternion.
"""
T = np.array(transform_matrix, dtype=np.float64)
R = T[:3, :3]
q = np.zeros(4, dtype=dtype)
q[0] = np.sqrt(1.0 + R.trace()) / 2.0
q[1] = R[2, 1] - R[1, 2]
q[2] = R[0, 2] - R[2, 0]
q[3] = R[1, 0] - R[0, 1]
q[1:4] /= 4.0 * q[0]
return q
|
keiserlab/e3fp
|
e3fp/fingerprint/array_ops.py
|
Python
|
lgpl-3.0
| 9,285
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 6 17:01:05 2014
@author: remi
@TODO :
in the function train_RForest_with_kfold we should keep all result proba for each class, this could be very intersting.
"""
import numpy as np ; #efficient arrays
import pandas as pd; # data frame to do sql like operation
import sklearn
reload(sklearn)
from sklearn.ensemble import RandomForestClassifier ; #base lib
from sklearn import cross_validation, preprocessing ; #normalizing data, creating kfold validation
def create_test_data(feature_number, data_size, class_list):
"""simple function to emulate input, gid is a unique int, other are features"""
import random ; #used to chose a class randomly
#create test vector
feature = np.random.random_sample((data_size,feature_number)) * 10 ;
gid = np.arange(13,data_size+13) ;
#create ground truth class vector : a 1,N vector containing randomly one of the possible class
ground_truth_class = np.zeros(data_size);
for i,(not_used) in enumerate(ground_truth_class):
ground_truth_class[i] = np.random.choice(class_list) ;
return gid, feature, ground_truth_class ;
def create_label_equivalency(labels_name, labels_number):
"""we create an equivalency list between class name and class number"""
import numpy as np;
labels = np.zeros(len(labels_name), dtype={'names':['class_id', 'class_name']\
, 'formats':['i4','a10']}) ;
for i in np.arange(0,len(labels_name)):
labels['class_id'][i] = labels_number[i]
labels['class_name'][i] = labels_name[i]
return labels;
def preprocess_data(X):
from sklearn import preprocessing ;
scaler = preprocessing.StandardScaler(copy=False,with_std=False);
scaler.fit_transform(X) ;
#scaler.transform(Y);
#scaler.transform(X);
return scaler;
def train_RForest_with_kfold(i,train, test, gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time ):
import datetime;
import time;
# creating data for train and test
X_train, X_test, Y_train, Y_test, Weight_train, Weight_test = X[train],X[test], Y[train], Y[test], weight[train], weight[test] ;
#learning
time_temp = time.clock();
print ' starting learning at \n\t\t\t\t%s' % datetime.datetime.now() ;
clf.fit(X_train,Y_train,Weight_train) ;
learning_time = learning_time+ time.clock() - time_temp;
#predicting
print ' learning finished, starting prediction at \n\t\t\t\t%s' % datetime.datetime.now() ;
time_temp = time.clock();
tmp_prob = clf.predict(X_test) ;
predicting_time += time.clock() - time_temp;
print ' prediction finished at \n\t\t\t\t%s' % datetime.datetime.now() ;
#grouping for score per class
proba_class_chosen = np.column_stack( \
(np.array(gid)[test],tmp_prob, Y_test,Weight_test ) ) ;
#constructinig the result data frame
df = pd.DataFrame(proba_class_chosen, columns = ("gid","class_chosen","ground_truth_class" ,"weight")) ;
if (i==0):
result = result.append(df, ignore_index=True) ;
else:
#print 'entering here, df is : ', df
result = result.append( df,ignore_index=True) ;
#plpy.notice("feature used, by importcy");
#plpy.notice(clf.feature_importances_)
#storing how important was each feature to make the prediction
feature_importances.append(clf.feature_importances_) ;
return learning_time,predicting_time,result
def Rforest_learn_predict(gid, X, Y,weight, labels, k_folds, random_forest_trees ,plot_directory):
from sklearn.metrics import classification_report
import datetime;
scaler = preprocess_data(X);
#creating the random forest object
clf = RandomForestClassifier(random_forest_trees, criterion="entropy" ,min_samples_leaf=20) ;
#cutting the set into 10 pieces, then propossing 10 partiion of 9(trainng)+1(test) data
kf_total = cross_validation.KFold(len(X), n_folds = k_folds, shuffle = True, random_state = 4) ;
result = pd.DataFrame() ;
feature_importances = [] ;
learning_time = 0.0 ;
predicting_time = 0.0 ;
for i ,(train, test) in enumerate(kf_total) :
print ' workingg on kfold %s , %s' % (i+1,datetime.datetime.now())
learning_time,predicting_time, result = train_RForest_with_kfold(i,train, test,gid,X,Y,weight,scaler,clf,result,feature_importances,learning_time,predicting_time) ;
report = classification_report( result['ground_truth_class'],result['class_chosen'],target_names = labels)#,sample_weight=result['weight']) ;
return np.column_stack((result['gid']
,result['ground_truth_class'].astype(int)
, result['class_chosen'].astype(int)
, np.zeros(len(result['ground_truth_class'])) )),report,feature_importances,learning_time,predicting_time;
def RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list, k_folds,random_forest_ntree, plot_directory):
"""Compute random forest classifiers using feature_iar and gt_classes ground trhuth. Divide the data set into kfolds to perform the operation K times
@param gids is a int[n]
@param feature_iar is a float[m x n], where m is the number of feature, and the matrix is wirtten row by row
@param gt_classes is a int[n] giving the ground truth class for each observation
@param k_folds is a int describing in how much part we should split the data set
@param random_forest_ntree how much tree in the frest?
@param plot_directory is a string like '/tmp', describing the directory where to write the figures generated
"""
#reshape input feature vector into feature matrix
feature_iar = np.array( feature_iar, dtype=np.float)
feature = np.reshape(feature_iar,( len(gids),len(feature_iar)/len(gids) ) ) ;
gids = np.array(gids);
gt_classes = np.array(gt_classes)
#plpy.notice('toto')
feature[np.isnan(feature)]=0 ;
labels = create_label_equivalency(labels_name,class_list )
weight_iar = np.array(weight)
return Rforest_learn_predict(gids
,feature
,gt_classes
,weight_iar
,labels
, k_folds
, random_forest_ntree
,plot_directory) ;
def RForest_learn_predict_pg_test():
#param
nfeature = 3
n_obs = 1000 ;
class_list = [1,2,3,4,5,6,7]
labels = ['FF1', 'FF2', 'FF3', 'FO2', 'FO3', 'LA6', 'NoC']
k_folds = 10
random_forest_ntree = 10;
plot_directory = '/media/sf_E_RemiCura/PROJETS/point_cloud/PC_in_DB/LOD_ordering_for_patches_of_points/result_rforest/vosges';
#creating input of function
gids = np.arange(13,n_obs+13);
feature_iar = np.random.rand(nfeature*n_obs)*10 ;
gt_classes = np.zeros(n_obs);
for i,(not_used) in enumerate(gt_classes):
gt_classes[i] = np.random.choice(class_list) ;
#
gids= [8736, 8737, 8738, 8739, 8742, 8743, 8744, 8746, 8748, 8749]
feature_iar = [0.0, 0.0, 0.0, 0.0, 1.0, 28.0, 2.0, 593.17, 0.0, 2.0, 4.0, 0.0, 0.0, 1.0, 36.511, 1.0, 592.176, 7.52, 0.0, 0.0, 0.0, 0.0, 1.0, 46.0, 1.0, 598.33, 0.0, 4.0, 23.0, 91.0, 347.0, 1.0, 33.2, 1.0, 585.271, 22.89, 6.0, 36.0, 189.0, 517.0, 1.0, 15.42, 2.0, 616.146, 39.41, 7.0, 37.0, 171.0, 497.0, 1.0, 13.532, 2.0, 607.817, 46.73, 6.0, 33.0, 155.0, 360.0, 1.0, 14.62, 2.0, 596.008, 42.09, 3.0, 29.0, 99.0, 255.0, 1.0, 11.295, 2.0, 572.784, 45.55, 3.0, 30.0, 118.0, 274.0, 1.0, 12.154, 2.0, 517.455, 49.62, 3.0, 28.0, 110.0, 278.0, 0.99, 11.016, 2.0, 495.071, 50.03] ;
gt_classes =[4, 4, 4, 4, 3, 3, 3, 2, 1, 1]
labels_name = ['FF1', 'FF2', 'FF3', 'NoC']
class_list =[1, 2, 3, 4]
weight = [0.25, 0.25, 0.25, 0.25, 0.3333, 0.3333, 0.3333, 1.0, 0.5, 0.5]
random_forest_ntree = 10 ;
#launching function
result = RForest_learn_predict_pg(gids,feature_iar,gt_classes,weight,labels_name,class_list,k_folds,random_forest_ntree, plot_directory)
return result ;
#print RForest_learn_predict_pg_test()
|
Remi-C/LOD_ordering_for_patches_of_points
|
script/loading benchmark/rforest_on_patch_lean.py
|
Python
|
lgpl-3.0
| 8,121
|
import gtk
import highgtk.entity
import highgtk.present.default.layout
def add (inquiry):
window = getattr (inquiry, "present_window", None)
if window is None:
inquiry.present_window = gtk.Dialog()
title = getattr (inquiry, "title", None)
if title is None:
root = highgtk.entity.get_root (inquiry)
title = "Inquiry from %s" % root.name
inquiry.present_window.add_button (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
ok_text = getattr (inquiry, "ok_text", gtk.STOCK_OK)
inquiry.present_window.add_button (ok_text, gtk.RESPONSE_OK)
inquiry.present_window.set_default_response (gtk.RESPONSE_OK)
inquiry.present_window.connect ("response", response, inquiry)
inquiry.present_window.connect ("delete_event", delete_event, inquiry)
inquiry.present_window.set_title (title)
inquiry.present_window.set_position (gtk.WIN_POS_CENTER)
inquiry.present_layout = highgtk.present.default.layout.get_layout (inquiry.data)
inquiry.present_layout.build (inquiry.present_window.get_content_area())
inquiry.present_report = gtk.Label()
inquiry.present_report.set_line_wrap (True)
inquiry.present_report.set_alignment (0.0, 0.5)
inquiry.present_window.get_content_area().pack_end (inquiry.present_report)
inquiry.present_window.show_all()
else:
window.present()
def remove (inquiry):
window = getattr (inquiry, "present_window", None)
if window is not None:
window.hide()
del inquiry.present_window
def cancel (inquiry):
method_name = getattr (inquiry, "cancel_method", None)
if method_name is not None:
method = getattr (inquiry.parent, method_name)
method (inquiry)
inquiry.remove (inquiry)
def okay (inquiry):
method_name = getattr (inquiry, "ok_method", "inquiry_okay")
error = inquiry.present_layout.get_error()
if error is not None:
inquiry.error_report.primary = error
inquiry.add (inquiry.error_report)
else:
method = getattr (inquiry.parent, method_name)
method (inquiry, inquiry.present_layout.get_data())
inquiry.remove (inquiry)
def response (widget, response_id, inquiry):
if response_id==gtk.RESPONSE_OK:
okay (inquiry)
elif response_id==gtk.RESPONSE_CANCEL:
cancel (inquiry)
return True
def delete_event (widget, event, inquiry):
cancel (inquiry)
return True
|
zinnschlag/high-pygtk
|
highgtk/present/default/inquiry.py
|
Python
|
lgpl-3.0
| 2,487
|
import re
s = open("neurolab/tool.py", "r").read()
s = re.sub('^([ \t\r\f\v]*)(.+?)\.shape = (.*?), (.*?)$', '\g<1>\g<2> = \g<2>.reshape((\g<3>, \g<4>,)) #replacement for \"\\g<0>\"', s, flags=re.MULTILINE)
s = re.sub('^([ \t\r\f\v]*)(.+?)\.shape = (\S+?)$', '\g<1>\g<2> = \g<2>.reshape(\g<3>) #replacement for \"\\g<0>\"', s, flags=re.MULTILINE)
f = open("neurolab/newtool.py", "w")
f.write(s)
f.close()
|
inferrna/neurolabcl
|
replacer.py
|
Python
|
lgpl-3.0
| 407
|
from odoo import fields, models
class SaasSubscriptionLog(models.Model):
_name = 'saas_portal.subscription_log'
_order = 'id desc'
client_id = fields.Many2one('saas_portal.client', 'Client')
expiration = fields.Datetime('Previous expiration')
expiration_new = fields.Datetime('New expiration')
reason = fields.Text('Reason')
|
it-projects-llc/odoo-saas-tools
|
saas_portal_subscription/models/subscription_log.py
|
Python
|
lgpl-3.0
| 352
|
# encoding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
url_basename,
xpath_text,
)
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxEmbedIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .vimeo import VimeoIE
from .dailymotion import (
DailymotionIE,
DailymotionCloudIE,
)
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .vessel import VesselIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudIE
from .vbox7 import Vbox7IE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': 're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
{
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/rg3/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/rg3/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embedded Ustream video
{
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417',
'info_dict': {
'id': '45734260',
'ext': 'flv',
'uploader': 'AU SPA: The NSA and Privacy',
'title': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman'
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
}
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/rg3/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
'info_dict': {
'id': '1986',
'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
},
'playlist_mincount': 2,
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen | RTL Nieuws',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed protected with referrer
{
'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero',
'info_dict': {
'id': '1_g4fbemnq',
'ext': 'mp4',
'title': 'Violetta - Achter De Schermen - Ruggero',
'description': 'Achter de schermen met Ruggero',
'timestamp': 1435133761,
'upload_date': '20150624',
'uploader_id': 'echojecka',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'batchUser',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
# Eagle.Platform embed (generic URL)
{
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
},
# ClipYou (Eagle.Platform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': 're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
},
},
# SVT embed
{
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '2900353',
'ext': 'flv',
'title': 'Här trycker Jagr till Giroux (under SVT-intervjun)',
'duration': 27,
'age_limit': 0,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': '701714499682',
'ext': 'mp4',
'title': 'PREVIEW: On Assignment: David Letterman',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Ooyala embed
{
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
'duration': 191.933,
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Dailymotion Cloud video
{
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
'md5': 'dcaf23ad0c67a256f4278bce6e0bae38',
'info_dict': {
'id': 'x2uy8t3',
'ext': 'mp4',
'title': 'Sauvons les abeilles ! - Le débat',
'description': 'md5:d9082128b1c5277987825d684939ca26',
'thumbnail': 're:^https?://.*\.jpe?g$',
'timestamp': 1434970506,
'upload_date': '20150622',
'uploader': 'Public Sénat',
'uploader_id': 'xa9gza',
}
},
# OnionStudios embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '2855',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'thumbnail': 're:^https?://.*\.jpe?g$',
'uploader': 'ClickHole',
'uploader_id': 'clickhole',
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# ScreenwaveMedia embed
{
'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1',
'md5': '24ace5baba0d35d55c6810b51f34e9e0',
'info_dict': {
'id': 'cinemasnob-55d26273809dd',
'ext': 'mp4',
'title': 'cinemasnob',
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# JWPlayer with M3U8
{
'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video',
'info_dict': {
'id': 'playlist',
'ext': 'mp4',
'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ',
'uploader': 'ren.tv',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': 'ace83b9ed19b21f68e1b50e844fdf95d',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
}
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': 'videos.expansion@el-mundo.net',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# }
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
continue
entries.append({
'_type': 'url',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return {
'_type': 'url',
'url': self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if '/' in url:
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = m.group('format_id')
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': m.group('format_id'),
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc, video_id, mpd_base_url=url.rpartition('/')[0])
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen('Brightcove video detected.')
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(webpage)
if bc_urls:
return _playlist_from_matches(bc_urls, ie='BrightcoveNew')
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return _playlist_from_matches(tp_urls, ie='ThePlatform')
# Look for Vessel embeds
vessel_urls = VesselIE._extract_urls(webpage)
if vessel_urls:
return _playlist_from_matches(vessel_urls, ie=VesselIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage)
if matches:
return _playlist_from_matches(matches, ie='RtlNl')
vimeo_url = VimeoIE._extract_vimeo_url(url, webpage)
if vimeo_url is not None:
return self.url_result(vimeo_url)
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(
r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for Wordpress "YouTube Video Importer" plugin
matches = re.findall(r'''(?x)<div[^>]+
class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+
data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: m[-1])
matches = DailymotionIE._extract_urls(webpage)
if matches:
return _playlist_from_matches(matches)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return _playlist_from_matches(
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
embed_url = self._proto_relative_url(
unescapeHTML(match.group('url')))
return {
'_type': 'url_transparent',
'url': embed_url,
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'wistia:%s' % match.group('id'),
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(
r'''(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
''', webpage)
if match:
return self.url_result(self._proto_relative_url(
'wistia:%s' % match.group('id')), 'Wistia')
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage)
if matches:
return {
'_type': 'playlist',
'entries': [{
'_type': 'url',
'ie_key': 'CondeNast',
'url': ma,
} for ma in matches],
'title': video_title,
'id': video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded NovaMov-based player
mobj = re.search(
r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1''', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Facebook player
facebook_url = FacebookIE._extract_url(webpage)
if facebook_url is not None:
return self.url_result(facebook_url, 'Facebook')
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Odnoklassniki')
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return _playlist_from_matches(
matches, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return _playlist_from_matches(matches, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
# Look for embedded PornHub player
pornhub_url = PornHubIE._extract_url(webpage)
if pornhub_url:
return self.url_result(pornhub_url, 'PornHub')
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ustream')
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudIE._extract_urls(webpage)
if soundcloud_urls:
return _playlist_from_matches(soundcloud_urls, getter=unescapeHTML, ie=SoundcloudIE.ie_key())
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_url = KalturaIE._extract_url(webpage)
if kaltura_url:
return self.url_result(smuggle_url(kaltura_url, {'source_url': url}), KalturaIE.ie_key())
# Look for Eagle.Platform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(eagleplatform_url, EaglePlatformIE.ie_key())
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_url = JWPlatformIE._extract_url(webpage)
if jwplatform_url:
return self.url_result(jwplatform_url, 'JWPlatform')
# Look for ScreenwaveMedia embeds
mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
if mobj is not None:
return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Limelight embeds
mobj = re.search(r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage)
if mobj:
lm = {
'Media': 'media',
'Channel': 'channel',
'ChannelList': 'channel_list',
}
return self.url_result('limelight:%s:%s' % (
lm[mobj.group(1)], mobj.group(2)), 'Limelight%s' % mobj.group(1), mobj.group(2))
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vod-platform\.net/embed/[^/?#]+)',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'VODPlatform')
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_url = LiveLeakIE._extract_url(webpage)
if liveleak_url:
return self.url_result(liveleak_url, 'LiveLeak')
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
info_dict.update({
'title': video_title or info_dict['title'],
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit
})
info_dict.update(json_ld)
return info_dict
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
found = re.findall(r'(?s)<(?:video|audio)[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by youtube-dl thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
|
Rudloff/youtube-dl
|
youtube_dl/extractor/generic.py
|
Python
|
unlicense
| 99,048
|
import pyodbc
import config
def main():
# formatで`{`を使うため、`{`を重ねることでエスケープ
con_str = 'Driver={{Microsoft Access Driver (*.mdb, *.accdb)}};Dbq={0};'.format(config.PATH_ACCDB)
conn = pyodbc.connect(con_str)
cur = conn.cursor()
cur.execute("select item_name from item")
for c in cur.fetchall():
print(c[0]) #=> `ringo`, `みかん
cur.close()
conn.close()
if __name__ == '__main__':
main()
|
thinkAmi-sandbox/python_ms_access_sample
|
pyodbc_runner.py
|
Python
|
unlicense
| 487
|
# pattern seems to be multiplying every pair of digits from different numbers and adding them up
from itertools import product
def test_it(a, b):
return sum(int(d1)*int(d2) for d1,d2 in product(str(a), str(b)))
|
SelvorWhim/competitive
|
Codewars/ThinkingTestingAB.py
|
Python
|
unlicense
| 217
|
#!/usr/bin/env python
import pexpect
import traceback
import time
import os
import sys
import re
addr = 'telnet 192.168.99.1 10000'
uname = ['a', 'd', 'm', 'i', 'n']
passwd = ['p', 'a', 's', 's', 'w', 'd']
cmdline = "show statistics traffic 5/1/0-1\n"
qq = ["e", "x", "i", "t", "\n"]
logName = 'Traffic_' + time.strftime("%Y-%m-%d", time.localtime())
if __name__ == '__main__':
with open(logName, 'w') as fd:
fd.writelines(["start at: ", time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"\ndate,time, 5/1/0(Mbit/s), 5/1/1(Mbit/s)\n"])
while True:
try:
fd = open('Telnet_raw.log', 'w')
child = pexpect.spawn(addr)
child.logfile_read = fd
index = child.expect(['username:', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
for i in uname:
child.send(i)
child.send('\n.\n')
for i in passwd:
child.send(i)
child.send('\n.\n')
child.sendline(cmdline)
for i in qq:
child.send(i)
child.expect(['#', pexpect.EOF, pexpect.TIMEOUT])
child.close()
timeFin = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
else:
print index
except:
traceback.print_exc()
finally:
fd.close()
with open('Telnet_raw.log', 'r') as fs, open(logName, 'a') as fd:
temp = []
content = fs.readlines()
fd.write(time.strftime("%Y-%m-%d,%H:%M:%S,", time.localtime()))
for line in content:
temp.append(re.split(r'\s+', line))
fd.write(" %s,%s\n" % (temp[11][2], temp[13][2]))
time.sleep(1)
|
wbvalid/python2
|
telnetFlowMeasure.py
|
Python
|
unlicense
| 1,829
|
#!/usr/bin/env python
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
from tdi import html
template = html.from_string("""
<node tdi="item">
<znode tdi="nested" tdi:overlay="foo">
<ynode tdi="subnested"></ynode>
</znode>
<xnode tdi="a"></xnode>
</node>
""".lstrip()).overlay(html.from_string("""
<anode tdi="grumpf" tdi:overlay="foo">
<bnode tdi:overlay="bar"></bnode>
</anode>
<anode tdi="zonk" tdi:overlay="bar">
<bnode tdi="schnick"></bnode>
</anode>
""".lstrip())).overlay(html.from_string("""
<anode tdi="zonk" tdi:overlay="bar">
<bnode tdi="schnick"></bnode>
</anode>
""".lstrip()))
class Model(object):
def render_nested(self, node):
node['been'] = u'here'
def render_schnick(self, node):
node.content = u"something"
model = Model()
template.render(model)
|
ndparker/tdi
|
tests/template/overlay_nested2.py
|
Python
|
apache-2.0
| 865
|
# Copyright (c) 2010-2011 OpenStack, LLC.
# Copyright (c) 2008-2011 Gluster, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
from __future__ import with_statement
import cPickle as pickle
import errno
import os
import time
import traceback
from datetime import datetime
from hashlib import md5
from tempfile import mkstemp
from urllib import unquote
from contextlib import contextmanager
from ConfigParser import ConfigParser
from webob import Request, Response, UTC
from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPNotModified, HTTPPreconditionFailed, \
HTTPRequestTimeout, HTTPUnprocessableEntity, HTTPMethodNotAllowed
from xattr import getxattr, setxattr
from eventlet import sleep, Timeout, tpool
from swift.common.utils import mkdirs, normalize_timestamp, \
storage_directory, hash_path, renamer, fallocate, \
split_path, drop_buffer_cache, get_logger, write_pickle, \
plugin_enabled
from swift.common.bufferedhttp import http_connect
if plugin_enabled():
from swift.plugins.constraints import check_object_creation
from swift.plugins.utils import X_TYPE, X_OBJECT_TYPE, FILE, DIR, MARKER_DIR, \
OBJECT, DIR_TYPE, FILE_TYPE
else:
from swift.common.constraints import check_object_creation
from swift.common.constraints import check_mount, check_float, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileError, \
DiskFileNotExist
from swift.obj.replicator import tpooled_get_hashes, invalidate_hash, \
quarantine_renamer
DATADIR = 'objects'
ASYNCDIR = 'async_pending'
PICKLE_PROTOCOL = 2
METADATA_KEY = 'user.swift.metadata'
MAX_OBJECT_NAME_LENGTH = 1024
KEEP_CACHE_SIZE = (5 * 1024 * 1024)
# keep these lower-case
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
def read_metadata(fd):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor to load the metadata from
:returns: dictionary of metadata
"""
metadata = ''
key = 0
try:
while True:
metadata += getxattr(fd, '%s%s' % (METADATA_KEY, (key or '')))
key += 1
except IOError:
pass
return pickle.loads(metadata)
def write_metadata(fd, metadata):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0
while metastr:
setxattr(fd, '%s%s' % (METADATA_KEY, key or ''), metastr[:254])
metastr = metastr[254:]
key += 1
class DiskFile(object):
"""
Manage object files on disk.
:param path: path to devices on the node
:param device: device name
:param partition: partition on the device the object lives in
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param keep_data_fp: if True, don't close the fp, otherwise close it
:param disk_chunk_Size: size of chunks on file reads
"""
def __init__(self, path, device, partition, account, container, obj,
logger, keep_data_fp=False, disk_chunk_size=65536):
self.disk_chunk_size = disk_chunk_size
self.name = '/' + '/'.join((account, container, obj))
name_hash = hash_path(account, container, obj)
self.datadir = os.path.join(path, device,
storage_directory(DATADIR, partition, name_hash))
self.device_path = os.path.join(path, device)
self.tmpdir = os.path.join(path, device, 'tmp')
self.logger = logger
self.metadata = {}
self.meta_file = None
self.data_file = None
self.fp = None
self.iter_etag = None
self.started_at_0 = False
self.read_to_eof = False
self.quarantined_dir = None
self.keep_cache = False
if not os.path.exists(self.datadir):
return
files = sorted(os.listdir(self.datadir), reverse=True)
for file in files:
if file.endswith('.ts'):
self.data_file = self.meta_file = None
self.metadata = {'deleted': True}
return
if file.endswith('.meta') and not self.meta_file:
self.meta_file = os.path.join(self.datadir, file)
if file.endswith('.data') and not self.data_file:
self.data_file = os.path.join(self.datadir, file)
break
if not self.data_file:
return
self.fp = open(self.data_file, 'rb')
self.metadata = read_metadata(self.fp)
if not keep_data_fp:
self.close(verify_file=False)
if self.meta_file:
with open(self.meta_file) as mfp:
for key in self.metadata.keys():
if key.lower() not in DISALLOWED_HEADERS:
del self.metadata[key]
self.metadata.update(read_metadata(mfp))
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
read = 0
self.started_at_0 = False
self.read_to_eof = False
if self.fp.tell() == 0:
self.started_at_0 = True
self.iter_etag = md5()
while True:
chunk = self.fp.read(self.disk_chunk_size)
if chunk:
if self.iter_etag:
self.iter_etag.update(chunk)
read += len(chunk)
if read - dropped_cache > (1024 * 1024):
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
dropped_cache = read
yield chunk
else:
self.read_to_eof = True
self.drop_cache(self.fp.fileno(), dropped_cache,
read - dropped_cache)
break
finally:
self.close()
def app_iter_range(self, start, stop):
"""Returns an iterator over the data file for range (start, stop)"""
if start:
self.fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
try:
obj_size = self.get_data_file_size()
except DiskFileError, e:
self.quarantine()
return
except DiskFileNotExist:
return
if (self.iter_etag and self.started_at_0 and self.read_to_eof and
'ETag' in self.metadata and
self.iter_etag.hexdigest() != self.metadata.get('ETag')):
self.quarantine()
def close(self, verify_file=True):
"""
Close the file. Will handle quarantining file if necessary.
:param verify_file: Defaults to True. If false, will not check
file to see if it needs quarantining.
"""
if self.fp:
try:
if verify_file:
self._handle_close_quarantine()
except (Exception, Timeout), e:
import traceback
self.logger.error(_('ERROR DiskFile %(data_file)s in '
'%(data_dir)s close failure: %(exc)s : %(stack)'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self.data_file, 'data_dir': self.datadir})
finally:
self.fp.close()
self.fp = None
def is_deleted(self):
"""
Check if the file is deleted.
:returns: True if the file doesn't exist or has been flagged as
deleted.
"""
return not self.data_file or 'deleted' in self.metadata
@contextmanager
def mkstemp(self):
"""Contextmanager to make a temporary file."""
if not os.path.exists(self.tmpdir):
mkdirs(self.tmpdir)
fd, tmppath = mkstemp(dir=self.tmpdir)
try:
yield fd, tmppath
finally:
try:
os.close(fd)
except OSError:
pass
try:
os.unlink(tmppath)
except OSError:
pass
def put(self, fd, tmppath, metadata, extension='.data'):
"""
Finalize writing the file on disk, and renames it from the temp file to
the real location. This should be called after the data has been
written to the temp file.
:params fd: file descriptor of the temp file
:param tmppath: path to the temporary file being used
:param metadata: dictionary of metadata to be written
:param extention: extension to be used when making the file
"""
metadata['name'] = self.name
timestamp = normalize_timestamp(metadata['X-Timestamp'])
write_metadata(fd, metadata)
if 'Content-Length' in metadata:
self.drop_cache(fd, 0, int(metadata['Content-Length']))
tpool.execute(os.fsync, fd)
invalidate_hash(os.path.dirname(self.datadir))
renamer(tmppath, os.path.join(self.datadir, timestamp + extension))
self.metadata = metadata
def unlinkold(self, timestamp):
"""
Remove any older versions of the object file. Any file that has an
older timestamp than timestamp will be deleted.
:param timestamp: timestamp to compare with each file
"""
timestamp = normalize_timestamp(timestamp)
for fname in os.listdir(self.datadir):
if fname < timestamp:
try:
os.unlink(os.path.join(self.datadir, fname))
except OSError, err: # pragma: no cover
if err.errno != errno.ENOENT:
raise
def drop_cache(self, fd, offset, length):
"""Method for no-oping buffer cache drop method."""
if not self.keep_cache:
drop_buffer_cache(fd, offset, length)
def quarantine(self):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:returns: if quarantine is successful, path to quarantined
directory otherwise None
"""
if not (self.is_deleted() or self.quarantined_dir):
self.quarantined_dir = quarantine_renamer(self.device_path,
self.data_file)
return self.quarantined_dir
def get_data_file_size(self):
"""
Returns the os.path.getsize for the file. Raises an exception if this
file does not match the Content-Length stored in the metadata. Or if
self.data_file does not exist.
:returns: file size as an int
:raises DiskFileError: on file size mismatch.
:raises DiskFileNotExist: on file not existing (including deleted)
"""
try:
file_size = 0
if self.data_file:
file_size = os.path.getsize(self.data_file)
if 'Content-Length' in self.metadata:
metadata_size = int(self.metadata['Content-Length'])
if file_size != metadata_size:
raise DiskFileError('Content-Length of %s does not '
'match file size of %s' % (metadata_size, file_size))
return file_size
except OSError, err:
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist('Data File does not exist.')
if plugin_enabled():
from swift.plugins.DiskFile import Gluster_DiskFile
class ObjectController(object):
"""Implements the WSGI application for the Swift Object Server."""
def __init__(self, conf):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
self.logger = get_logger(conf, log_route='object-server')
self.devices = conf.get('devices', '/srv/node/')
self.mount_check = conf.get('mount_check', 'true').lower() in \
('true', 't', '1', 'on', 'yes', 'y')
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = conf.get('log_requests', 't')[:1].lower() == 't'
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
'''
self.allowed_headers = set(i.strip().lower() for i in \
conf.get('allowed_headers', \
default_allowed_headers).split(',') if i.strip() and \
i.strip().lower() not in DISALLOWED_HEADERS)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
'expiring_objects'
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.fs_object = None
def get_DiskFile_obj(self, path, device, partition, account, container, obj,
logger, keep_data_fp=False, disk_chunk_size=65536):
if self.fs_object:
return Gluster_DiskFile(path, device, partition, account, container,
obj, logger, keep_data_fp,
disk_chunk_size, fs_object = self.fs_object);
else:
return DiskFile(path, device, partition, account, container,
obj, logger, keep_data_fp, disk_chunk_size)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
"""
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if 200 <= response.status < 300:
return
else:
self.logger.error(_('ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_('ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
async_dir = os.path.join(self.devices, objdevice, ASYNCDIR)
ohash = hash_path(account, container, obj)
write_pickle(
{'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out},
os.path.join(async_dir, ohash[-3:], ohash + '-' +
normalize_timestamp(headers_out['x-timestamp'])),
os.path.join(self.devices, objdevice, 'tmp'))
def container_update(self, op, account, container, obj, headers_in,
headers_out, objdevice):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param headers_in: dictionary of headers from the original request
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
"""
host = headers_in.get('X-Container-Host', None)
partition = headers_in.get('X-Container-Partition', None)
contdevice = headers_in.get('X-Container-Device', None)
if not all([host, partition, contdevice]):
return
self.async_update(op, account, container, obj, host, partition,
contdevice, headers_out, objdevice)
def delete_at_update(self, op, delete_at, account, container, obj,
headers_in, objdevice):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param headers_in: dictionary of headers from the original request
:param objdevice: device name that the object is in
"""
host = partition = contdevice = None
headers_out = {'x-timestamp': headers_in['x-timestamp'],
'x-trans-id': headers_in.get('x-trans-id', '-')}
if op != 'DELETE':
host = headers_in.get('X-Delete-At-Host', None)
partition = headers_in.get('X-Delete-At-Partition', None)
contdevice = headers_in.get('X-Delete-At-Device', None)
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
self.async_update(op, self.expiring_objects_account,
str(delete_at / self.expiring_objects_container_divisor *
self.expiring_objects_container_divisor),
'%s-%s/%s/%s' % (delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice)
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
try:
device, partition, account, container, obj = \
split_path(unquote(request.path), 5, 5, True)
except ValueError, err:
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
file = self.get_DiskFile_obj(self.devices, device, partition, account, container,
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
if 'X-Delete-At' in file.metadata and \
int(file.metadata['X-Delete-At']) <= time.time():
return HTTPNotFound(request=request)
if file.is_deleted():
response_class = HTTPNotFound
else:
response_class = HTTPAccepted
try:
file_size = file.get_data_file_size()
except (DiskFileError, DiskFileNotExist):
file.quarantine()
return HTTPNotFound(request=request)
metadata = {'X-Timestamp': request.headers['x-timestamp']}
metadata.update(val for val in request.headers.iteritems()
if val[0].lower().startswith('x-object-meta-'))
for header_key in self.allowed_headers:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
if old_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update('PUT', new_delete_at, account, container,
obj, request.headers, device)
if old_delete_at:
self.delete_at_update('DELETE', old_delete_at, account,
container, obj, request.headers, device)
with file.mkstemp() as (fd, tmppath):
file.put(fd, tmppath, metadata, extension='.meta')
return response_class(request=request)
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
try:
device, partition, account, container, obj = \
split_path(unquote(request.path), 5, 5, True)
except ValueError, err:
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
error_response = check_object_creation(request, obj)
if error_response:
return error_response
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
file = self.get_DiskFile_obj(self.devices, device, partition, account, container,
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
orig_timestamp = file.metadata.get('X-Timestamp')
upload_expiration = time.time() + self.max_upload_time
etag = md5()
upload_size = 0
last_sync = 0
with file.mkstemp() as (fd, tmppath):
if 'content-length' in request.headers:
fallocate(fd, int(request.headers['content-length']))
reader = request.environ['wsgi.input'].read
for chunk in iter(lambda: reader(self.network_chunk_size), ''):
upload_size += len(chunk)
if time.time() > upload_expiration:
return HTTPRequestTimeout(request=request)
etag.update(chunk)
while chunk:
written = os.write(fd, chunk)
chunk = chunk[written:]
# For large files sync every 512MB (by default) written
if upload_size - last_sync >= self.bytes_per_sync:
tpool.execute(os.fdatasync, fd)
drop_buffer_cache(fd, last_sync, upload_size - last_sync)
last_sync = upload_size
if 'content-length' in request.headers and \
int(request.headers['content-length']) != upload_size:
return Response(status='499 Client Disconnect')
etag = etag.hexdigest()
if 'etag' in request.headers and \
request.headers['etag'].lower() != etag:
return HTTPUnprocessableEntity(request=request)
content_type = request.headers['content-type']
if self.fs_object and not content_type:
content_type = FILE_TYPE
if not self.fs_object:
metadata = {
'X-Timestamp': request.headers['x-timestamp'],
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(os.fstat(fd).st_size),
}
else:
metadata = {
'X-Timestamp': request.headers['x-timestamp'],
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(os.fstat(fd).st_size),
X_TYPE: OBJECT,
X_OBJECT_TYPE: FILE,
}
if self.fs_object and \
request.headers['content-type'].lower() == DIR_TYPE:
metadata.update({X_OBJECT_TYPE: MARKER_DIR})
metadata.update(val for val in request.headers.iteritems()
if val[0].lower().startswith('x-object-meta-') and
len(val[0]) > 14)
for header_key in self.allowed_headers:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
if old_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update('PUT', new_delete_at, account,
container, obj, request.headers, device)
if old_delete_at:
self.delete_at_update('DELETE', old_delete_at, account,
container, obj, request.headers, device)
file.put(fd, tmppath, metadata)
file.unlinkold(metadata['X-Timestamp'])
if not orig_timestamp or \
orig_timestamp < request.headers['x-timestamp']:
self.container_update('PUT', account, container, obj,
request.headers,
{'x-size': file.metadata['Content-Length'],
'x-content-type': file.metadata['Content-Type'],
'x-timestamp': file.metadata['X-Timestamp'],
'x-etag': file.metadata['ETag'],
'x-trans-id': request.headers.get('x-trans-id', '-')},
device)
resp = HTTPCreated(request=request, etag=etag)
return resp
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
try:
device, partition, account, container, obj = \
split_path(unquote(request.path), 5, 5, True)
except ValueError, err:
return HTTPBadRequest(body=str(err), request=request,
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
file = self.get_DiskFile_obj(self.devices, device, partition, account, container,
obj, self.logger, keep_data_fp=True,
disk_chunk_size=self.disk_chunk_size)
if file.is_deleted() or ('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
if request.headers.get('if-match') == '*':
return HTTPPreconditionFailed(request=request)
else:
return HTTPNotFound(request=request)
try:
file_size = file.get_data_file_size()
except (DiskFileError, DiskFileNotExist):
file.quarantine()
return HTTPNotFound(request=request)
if request.headers.get('if-match') not in (None, '*') and \
file.metadata['ETag'] not in request.if_match:
file.close()
return HTTPPreconditionFailed(request=request)
if request.headers.get('if-none-match') != None:
if file.metadata['ETag'] in request.if_none_match:
resp = HTTPNotModified(request=request)
resp.etag = file.metadata['ETag']
file.close()
return resp
try:
if_unmodified_since = request.if_unmodified_since
except (OverflowError, ValueError):
# catches timestamps before the epoch
return HTTPPreconditionFailed(request=request)
if if_unmodified_since and \
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) > \
if_unmodified_since:
file.close()
return HTTPPreconditionFailed(request=request)
try:
if_modified_since = request.if_modified_since
except (OverflowError, ValueError):
# catches timestamps before the epoch
return HTTPPreconditionFailed(request=request)
if if_modified_since and \
datetime.fromtimestamp(float(file.metadata['X-Timestamp']), UTC) < \
if_modified_since:
file.close()
return HTTPNotModified(request=request)
response = Response(app_iter=file,
request=request, conditional_response=True)
response.headers['Content-Type'] = file.metadata.get('Content-Type',
'application/octet-stream')
for key, value in file.metadata.iteritems():
if key.lower().startswith('x-object-meta-') or \
key.lower() in self.allowed_headers:
response.headers[key] = value
response.etag = file.metadata['ETag']
response.last_modified = float(file.metadata['X-Timestamp'])
response.content_length = file_size
if response.content_length < KEEP_CACHE_SIZE and \
'X-Auth-Token' not in request.headers and \
'X-Storage-Token' not in request.headers:
file.keep_cache = True
if 'Content-Encoding' in file.metadata:
response.content_encoding = file.metadata['Content-Encoding']
response.headers['X-Timestamp'] = file.metadata['X-Timestamp']
return request.get_response(response)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
try:
device, partition, account, container, obj = \
split_path(unquote(request.path), 5, 5, True)
except ValueError, err:
resp = HTTPBadRequest(request=request)
resp.content_type = 'text/plain'
resp.body = str(err)
return resp
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
file = self.get_DiskFile_obj(self.devices, device, partition, account, container,
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
if file.is_deleted() or ('X-Delete-At' in file.metadata and
int(file.metadata['X-Delete-At']) <= time.time()):
return HTTPNotFound(request=request)
try:
file_size = file.get_data_file_size()
except (DiskFileError, DiskFileNotExist):
file.quarantine()
return HTTPNotFound(request=request)
response = Response(request=request, conditional_response=True)
response.headers['Content-Type'] = file.metadata.get('Content-Type',
'application/octet-stream')
for key, value in file.metadata.iteritems():
if key.lower().startswith('x-object-meta-') or \
key.lower() in self.allowed_headers:
response.headers[key] = value
response.etag = file.metadata['ETag']
response.last_modified = float(file.metadata['X-Timestamp'])
# Needed for container sync feature
response.headers['X-Timestamp'] = file.metadata['X-Timestamp']
response.content_length = file_size
if 'Content-Encoding' in file.metadata:
response.content_encoding = file.metadata['Content-Encoding']
response.headers['X-Timestamp'] = file.metadata['X-Timestamp']
return response
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
try:
device, partition, account, container, obj = \
split_path(unquote(request.path), 5, 5, True)
except ValueError, e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
if 'x-timestamp' not in request.headers or \
not check_float(request.headers['x-timestamp']):
return HTTPBadRequest(body='Missing timestamp', request=request,
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
response_class = HTTPNoContent
file = self.get_DiskFile_obj(self.devices, device, partition, account, container,
obj, self.logger, disk_chunk_size=self.disk_chunk_size)
if 'x-if-delete-at' in request.headers and \
int(request.headers['x-if-delete-at']) != \
int(file.metadata.get('X-Delete-At') or 0):
return HTTPPreconditionFailed(request=request,
body='X-If-Delete-At and X-Delete-At do not match')
orig_timestamp = file.metadata.get('X-Timestamp')
if file.is_deleted():
response_class = HTTPNotFound
metadata = {
'X-Timestamp': request.headers['X-Timestamp'], 'deleted': True,
}
with file.mkstemp() as (fd, tmppath):
old_delete_at = int(file.metadata.get('X-Delete-At') or 0)
if old_delete_at:
self.delete_at_update('DELETE', old_delete_at, account,
container, obj, request.headers, device)
file.put(fd, tmppath, metadata, extension='.ts')
file.unlinkold(metadata['X-Timestamp'])
if not orig_timestamp or \
orig_timestamp < request.headers['x-timestamp']:
self.container_update('DELETE', account, container, obj,
request.headers, {'x-timestamp': metadata['X-Timestamp'],
'x-trans-id': request.headers.get('x-trans-id', '-')},
device)
resp = response_class(request=request)
return resp
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
"""
try:
device, partition, suffix = split_path(
unquote(request.path), 2, 3, True)
except ValueError, e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
if self.mount_check and not check_mount(self.devices, device):
return Response(status='507 %s is not mounted' % device)
path = os.path.join(self.devices, device, DATADIR, partition)
if not os.path.exists(path):
mkdirs(path)
suffixes = suffix.split('-') if suffix else []
_junk, hashes = tpool.execute(tpooled_get_hashes, path,
recalculate=suffixes)
# See tpooled_get_hashes "Hack".
if isinstance(hashes, BaseException):
raise hashes
return Response(body=pickle.dumps(hashes))
def plugin(self, env):
if env.get('Gluster_enabled', False):
self.fs_object = env.get('fs_object')
self.devices = env.get('root')
self.mount_check = False
else:
self.fs_object = None
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
self.plugin(env)
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8')
else:
try:
if hasattr(self, req.method):
res = getattr(self, req.method)(req)
else:
res = HTTPMethodNotAllowed()
except (Exception, Timeout):
self.logger.exception(_('ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
if self.log_requests:
log_line = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f' % (
req.remote_addr,
time.strftime('%d/%b/%Y:%H:%M:%S +0000',
time.gmtime()),
req.method, req.path, res.status.split()[0],
res.content_length or '-', req.referer or '-',
req.headers.get('x-trans-id', '-'),
req.user_agent or '-',
trans_time)
if req.method == 'REPLICATE':
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
return res(env, start_response)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
|
mja054/swift_plugin
|
swift/obj/server.py
|
Python
|
apache-2.0
| 39,651
|
""" Cisco_IOS_XR_ipv4_acl_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-acl package configuration.
This module contains definitions
for the following management objects\:
ipv4\-acl\-and\-prefix\-list\: IPv4 ACL configuration data
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class NextHopTypeEnum(Enum):
"""
NextHopTypeEnum
Next\-hop type.
.. data:: none_next_hop = 0
None next-hop.
.. data:: regular_next_hop = 1
Regular next-hop.
.. data:: default_next_hop = 2
Default next-hop.
"""
none_next_hop = 0
regular_next_hop = 1
default_next_hop = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['NextHopTypeEnum']
class Ipv4AclAndPrefixList(object):
"""
IPv4 ACL configuration data
.. attribute:: accesses
Table of access lists. Entries in this table and the AccessListExistenceTable table must be kept consistent
**type**\: :py:class:`Accesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses>`
.. attribute:: log_update
Control access lists log updates
**type**\: :py:class:`LogUpdate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.LogUpdate>`
.. attribute:: prefixes
Table of ACL prefix lists. Entries in this table and the PrefixListExistenceTable table must be kept consistent
**type**\: :py:class:`Prefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.accesses = Ipv4AclAndPrefixList.Accesses()
self.accesses.parent = self
self.log_update = Ipv4AclAndPrefixList.LogUpdate()
self.log_update.parent = self
self.prefixes = Ipv4AclAndPrefixList.Prefixes()
self.prefixes.parent = self
class Accesses(object):
"""
Table of access lists. Entries in this table
and the AccessListExistenceTable table must be
kept consistent
.. attribute:: access
An ACL
**type**\: list of :py:class:`Access <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.access = YList()
self.access.parent = self
self.access.name = 'access'
class Access(object):
"""
An ACL
.. attribute:: access_list_name <key>
Access list name \- 64 characters max
**type**\: str
.. attribute:: access_list_entries
ACL entry table; contains list of ACEs
**type**\: :py:class:`AccessListEntries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.access_list_name = None
self.access_list_entries = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries()
self.access_list_entries.parent = self
class AccessListEntries(object):
"""
ACL entry table; contains list of ACEs
.. attribute:: access_list_entry
An ACL entry; either a description (remark) or an ACE to match against
**type**\: list of :py:class:`AccessListEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.access_list_entry = YList()
self.access_list_entry.parent = self
self.access_list_entry.name = 'access_list_entry'
class AccessListEntry(object):
"""
An ACL entry; either a description (remark)
or an ACE to match against
.. attribute:: sequence_number <key>
Sequence number for this entry
**type**\: int
**range:** 1..2147483646
.. attribute:: capture
Enable capture
**type**\: bool
.. attribute:: counter_name
Counter name
**type**\: str
.. attribute:: destination_network
Destination network settings
**type**\: :py:class:`DestinationNetwork <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork>`
.. attribute:: destination_port
Destination port settings
**type**\: :py:class:`DestinationPort <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort>`
.. attribute:: destination_port_group
Destination port object group name
**type**\: str
**length:** 1..64
.. attribute:: destination_prefix_group
IPv4 destination network object group name
**type**\: str
**length:** 1..64
.. attribute:: dscp
DSCP settings
**type**\: :py:class:`Dscp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp>`
.. attribute:: fragment_offset
Fragment\-offset settings
**type**\: :py:class:`FragmentOffset <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.FragmentOffset>`
.. attribute:: fragments
Check non\-initial fragments. Item is mutually exclusive with TCP, SCTP, UDP, IGMP and ICMP comparions and with logging
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: grant
Whether to forward or drop packets matching the ACE
**type**\: :py:class:`Ipv4AclGrantEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclGrantEnumEnum>`
.. attribute:: icmp
ICMP settings
**type**\: :py:class:`Icmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp>`
.. attribute:: icmp_off
To turn off ICMP generation for deny ACEs
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: igmp_message_type
IGMP message type to match. Leave unspecified if no message type comparison is to be done
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclIgmpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclIgmpNumberEnum>`
----
**type**\: int
**range:** 0..255
----
.. attribute:: log_option
Whether and how to log matches against this entry
**type**\: :py:class:`Ipv4AclLoggingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclLoggingEnumEnum>`
.. attribute:: next_hop
Next\-hop settings
**type**\: :py:class:`NextHop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop>`
.. attribute:: packet_length
Packet length settings
**type**\: :py:class:`PacketLength <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength>`
.. attribute:: precedence
Precedence value to match (if a protocol was specified), leave unspecified if precedence comparion is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPrecedenceNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPrecedenceNumberEnum>`
----
**type**\: int
**range:** 0..7
----
.. attribute:: protocol
Protocol to match
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclProtocolNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclProtocolNumberEnum>`
----
**type**\: int
**range:** 0..255
----
.. attribute:: protocol2
Protocol2 to match
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclProtocolNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclProtocolNumberEnum>`
----
**type**\: int
**range:** 0..255
----
.. attribute:: protocol_operator
Protocol operator. Leave unspecified if no protocol comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
.. attribute:: qos_group
Set qos\-group number
**type**\: int
**range:** 0..512
.. attribute:: remark
Comments or a description for the access list
**type**\: str
.. attribute:: sequence_str
Sequence String for the ace
**type**\: str
**length:** 1..64
.. attribute:: source_network
Source network settings
**type**\: :py:class:`SourceNetwork <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork>`
.. attribute:: source_port
Source port settings
**type**\: :py:class:`SourcePort <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort>`
.. attribute:: source_port_group
Source port object group name
**type**\: str
**length:** 1..64
.. attribute:: source_prefix_group
IPv4 source network object group name
**type**\: str
**length:** 1..64
.. attribute:: tcp
TCP settings
**type**\: :py:class:`Tcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp>`
.. attribute:: time_to_live
TTL settings
**type**\: :py:class:`TimeToLive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.sequence_number = None
self.capture = None
self.counter_name = None
self.destination_network = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork()
self.destination_network.parent = self
self.destination_port = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort()
self.destination_port.parent = self
self.destination_port_group = None
self.destination_prefix_group = None
self.dscp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp()
self.dscp.parent = self
self.fragment_offset = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.FragmentOffset()
self.fragment_offset.parent = self
self.fragments = None
self.grant = None
self.icmp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp()
self.icmp.parent = self
self.icmp_off = None
self.igmp_message_type = None
self.log_option = None
self.next_hop = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop()
self.next_hop.parent = self
self.packet_length = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength()
self.packet_length.parent = self
self.precedence = None
self.protocol = None
self.protocol2 = None
self.protocol_operator = None
self.qos_group = None
self.remark = None
self.sequence_str = None
self.source_network = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork()
self.source_network.parent = self
self.source_port = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort()
self.source_port.parent = self
self.source_port_group = None
self.source_prefix_group = None
self.tcp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp()
self.tcp.parent = self
self.time_to_live = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive()
self.time_to_live.parent = self
class SourceNetwork(object):
"""
Source network settings.
.. attribute:: source_address
Source IPv4 address to match, leave unspecified for any
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_prefix_length
Prefix length to apply to source address (if specified), leave unspecified for no wildcarding
**type**\: int
**range:** 0..32
.. attribute:: source_wild_card_bits
Wildcard bits to apply to source address (if specified), leave unspecified for no wildcarding
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.source_address = None
self.source_prefix_length = None
self.source_wild_card_bits = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:source-network'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.source_address is not None:
return True
if self.source_prefix_length is not None:
return True
if self.source_wild_card_bits is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork']['meta_info']
class DestinationNetwork(object):
"""
Destination network settings.
.. attribute:: destination_address
Destination IPv4 address to match (if a protocol was specified), leave unspecified for any
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_prefix_length
Prefix length to apply to destination address (if specified), leave unspecified for no wildcarding
**type**\: int
**range:** 0..32
.. attribute:: destination_wild_card_bits
Wildcard bits to apply to destination address (if specified), leave unspecified for no wildcarding
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.destination_address = None
self.destination_prefix_length = None
self.destination_wild_card_bits = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:destination-network'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_address is not None:
return True
if self.destination_prefix_length is not None:
return True
if self.destination_wild_card_bits is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork']['meta_info']
class SourcePort(object):
"""
Source port settings.
.. attribute:: first_source_port
First source port for comparison, leave unspecified if source port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: second_source_port
Second source port for comparion, leave unspecified if source port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: source_operator
Source comparison operator . Leave unspecified if no source port comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.first_source_port = None
self.second_source_port = None
self.source_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:source-port'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.first_source_port is not None:
return True
if self.second_source_port is not None:
return True
if self.source_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort']['meta_info']
class DestinationPort(object):
"""
Destination port settings.
.. attribute:: destination_operator
Destination comparison operator. Leave unspecified if no destination port comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
.. attribute:: first_destination_port
First destination port for comparison, leave unspecified if destination port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: second_destination_port
Second destination port for comparion, leave unspecified if destination port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.destination_operator = None
self.first_destination_port = None
self.second_destination_port = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:destination-port'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.destination_operator is not None:
return True
if self.first_destination_port is not None:
return True
if self.second_destination_port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort']['meta_info']
class Icmp(object):
"""
ICMP settings.
.. attribute:: icmp_type_code
Well known ICMP message code types to match, leave unspecified if ICMP message code type comparion is not to be performed
**type**\: :py:class:`Ipv4AclIcmpTypeCodeEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclIcmpTypeCodeEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.icmp_type_code = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:icmp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.icmp_type_code is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp']['meta_info']
class Tcp(object):
"""
TCP settings.
.. attribute:: tcp_bits
TCP bits to match. Leave unspecified if comparison of TCP bits is not required
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: tcp_bits_mask
TCP bits mask to use for flexible TCP matching. Leave unspecified if tcp\-bits\-match\-operator is unspecified
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: tcp_bits_match_operator
TCP Bits match operator. Leave unspecified if flexible comparison of TCP bits is not required
**type**\: :py:class:`Ipv4AclTcpMatchOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpMatchOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.tcp_bits = None
self.tcp_bits_mask = None
self.tcp_bits_match_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:tcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.tcp_bits is not None:
return True
if self.tcp_bits_mask is not None:
return True
if self.tcp_bits_match_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp']['meta_info']
class PacketLength(object):
"""
Packet length settings.
.. attribute:: packet_length_max
Maximum packet length for comparion, leave unspecified if packet length comparison is not to be performed or if only the minimum packet length should be considered
**type**\: int
**range:** 0..65535
.. attribute:: packet_length_min
Minimum packet length for comparison, leave unspecified if packet length comparison is not to be performed or if only the maximum packet length should be considered
**type**\: int
**range:** 0..65535
.. attribute:: packet_length_operator
Packet length operator applicable if Packet length is to be compared. Leave unspecified if no packet length comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.packet_length_max = None
self.packet_length_min = None
self.packet_length_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:packet-length'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.packet_length_max is not None:
return True
if self.packet_length_min is not None:
return True
if self.packet_length_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength']['meta_info']
class TimeToLive(object):
"""
TTL settings.
.. attribute:: time_to_live_max
Maximum TTL for comparion, leave unspecified if TTL comparison is not to be performed or if only the minimum TTL should be considered
**type**\: int
**range:** 0..255
.. attribute:: time_to_live_min
TTL value for comparison OR Minimum TTL value for TTL range comparision, leave unspecified if TTL classification is not required
**type**\: int
**range:** 0..255
.. attribute:: time_to_live_operator
TTL operator is applicable if TTL is to be compared. Leave unspecified if TTL classification is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.time_to_live_max = None
self.time_to_live_min = None
self.time_to_live_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:time-to-live'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.time_to_live_max is not None:
return True
if self.time_to_live_min is not None:
return True
if self.time_to_live_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive']['meta_info']
class FragmentOffset(object):
"""
Fragment\-offset settings.
.. attribute:: fragment_offset_1
Fragment\-offset value for comparison or first fragment\-offset value for fragment\-offset range comparision, leave unspecified if fragment\-offset classification is not required
**type**\: int
**range:** 0..8191
.. attribute:: fragment_offset_2
Second fragment\-offset value for comparion, leave unspecified if fragment\-offset comparison is not to be performed or if only the first fragment\-offset should be considered
**type**\: int
**range:** 0..8191
.. attribute:: fragment_offset_operator
Fragment\-offset operator if fragment\-offset is to be compared. Leave unspecified if fragment\-offset classification is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.fragment_offset_1 = None
self.fragment_offset_2 = None
self.fragment_offset_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:fragment-offset'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.fragment_offset_1 is not None:
return True
if self.fragment_offset_2 is not None:
return True
if self.fragment_offset_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.FragmentOffset']['meta_info']
class NextHop(object):
"""
Next\-hop settings.
.. attribute:: next_hop_1
The first next\-hop settings
**type**\: :py:class:`NextHop1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1>`
.. attribute:: next_hop_2
The second next\-hop settings
**type**\: :py:class:`NextHop2 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2>`
.. attribute:: next_hop_3
The third next\-hop settings
**type**\: :py:class:`NextHop3 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3>`
.. attribute:: next_hop_type
The nexthop type
**type**\: :py:class:`NextHopTypeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.NextHopTypeEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.next_hop_1 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1()
self.next_hop_1.parent = self
self.next_hop_2 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2()
self.next_hop_2.parent = self
self.next_hop_3 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3()
self.next_hop_3.parent = self
self.next_hop_type = None
class NextHop1(object):
"""
The first next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-1'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1']['meta_info']
class NextHop2(object):
"""
The second next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-2'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2']['meta_info']
class NextHop3(object):
"""
The third next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-3'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.next_hop_1 is not None and self.next_hop_1._has_data():
return True
if self.next_hop_2 is not None and self.next_hop_2._has_data():
return True
if self.next_hop_3 is not None and self.next_hop_3._has_data():
return True
if self.next_hop_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop']['meta_info']
class Dscp(object):
"""
DSCP settings.
.. attribute:: dscp_max
Maximum DSCP value for comparion, leave unspecified if DSCP comparison is not to be performed or if only the minimum DSCP should be considered
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_min
DSCP value to match or minimum DSCP value for DSCP range comparison, leave unspecified if DSCP comparion is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_operator
DSCP operator is applicable only when DSCP range is configured. Leave unspecified if DSCP range is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.dscp_max = None
self.dscp_min = None
self.dscp_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:dscp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.dscp_max is not None:
return True
if self.dscp_min is not None:
return True
if self.dscp_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entry[Cisco-IOS-XR-ipv4-acl-cfg:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.sequence_number is not None:
return True
if self.capture is not None:
return True
if self.counter_name is not None:
return True
if self.destination_network is not None and self.destination_network._has_data():
return True
if self.destination_port is not None and self.destination_port._has_data():
return True
if self.destination_port_group is not None:
return True
if self.destination_prefix_group is not None:
return True
if self.dscp is not None and self.dscp._has_data():
return True
if self.fragment_offset is not None and self.fragment_offset._has_data():
return True
if self.fragments is not None:
return True
if self.grant is not None:
return True
if self.icmp is not None and self.icmp._has_data():
return True
if self.icmp_off is not None:
return True
if self.igmp_message_type is not None:
return True
if self.log_option is not None:
return True
if self.next_hop is not None and self.next_hop._has_data():
return True
if self.packet_length is not None and self.packet_length._has_data():
return True
if self.precedence is not None:
return True
if self.protocol is not None:
return True
if self.protocol2 is not None:
return True
if self.protocol_operator is not None:
return True
if self.qos_group is not None:
return True
if self.remark is not None:
return True
if self.sequence_str is not None:
return True
if self.source_network is not None and self.source_network._has_data():
return True
if self.source_port is not None and self.source_port._has_data():
return True
if self.source_port_group is not None:
return True
if self.source_prefix_group is not None:
return True
if self.tcp is not None and self.tcp._has_data():
return True
if self.time_to_live is not None and self.time_to_live._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.access_list_entry is not None:
for child_ref in self.access_list_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries']['meta_info']
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses/Cisco-IOS-XR-ipv4-acl-cfg:access[Cisco-IOS-XR-ipv4-acl-cfg:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.access_list_name is not None:
return True
if self.access_list_entries is not None and self.access_list_entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses']['meta_info']
class Prefixes(object):
"""
Table of ACL prefix lists. Entries in this
table and the PrefixListExistenceTable table
must be kept consistent
.. attribute:: prefix
Name of a prefix list
**type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.prefix = YList()
self.prefix.parent = self
self.prefix.name = 'prefix'
class Prefix(object):
"""
Name of a prefix list
.. attribute:: prefix_list_name <key>
Prefix list name \- max 32 characters
**type**\: str
.. attribute:: prefix_list_entries
Sequence of entries forming a prefix list
**type**\: :py:class:`PrefixListEntries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries>`
**presence node**\: True
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.prefix_list_entries = None
class PrefixListEntries(object):
"""
Sequence of entries forming a prefix list
.. attribute:: prefix_list_entry
A prefix list entry; either a description (remark) or a prefix to match against
**type**\: list of :py:class:`PrefixListEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries.PrefixListEntry>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self._is_presence = True
self.prefix_list_entry = YList()
self.prefix_list_entry.parent = self
self.prefix_list_entry.name = 'prefix_list_entry'
class PrefixListEntry(object):
"""
A prefix list entry; either a description
(remark) or a prefix to match against
.. attribute:: sequence_number <key>
Sequence number of prefix list
**type**\: int
**range:** 1..2147483646
.. attribute:: exact_prefix_length
If exact prefix length matching specified, set the length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: grant
Whether to forward or drop packets matching the prefix list
**type**\: :py:class:`Ipv4AclGrantEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclGrantEnumEnum>`
.. attribute:: match_exact_length
Set to perform an exact prefix length match. Item is mutually exclusive with minimum and maximum length match items
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: match_max_length
Set to perform a maximum length prefix match . Item is mutually exclusive with exact length match item
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: match_min_length
Set to perform a minimum length prefix match . Item is mutually exclusive with exact length match item
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: max_prefix_length
If maximum length prefix matching specified, set the maximum length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: min_prefix_length
If minimum length prefix matching specified, set the minimum length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: netmask
Mask of IPv4 address prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix
IPv4 address prefix to match
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remark
Comments or a description for the prefix list. Item is mutually exclusive with all others in the object
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.sequence_number = None
self.exact_prefix_length = None
self.grant = None
self.match_exact_length = None
self.match_max_length = None
self.match_min_length = None
self.max_prefix_length = None
self.min_prefix_length = None
self.netmask = None
self.prefix = None
self.remark = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-entry[Cisco-IOS-XR-ipv4-acl-cfg:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.sequence_number is not None:
return True
if self.exact_prefix_length is not None:
return True
if self.grant is not None:
return True
if self.match_exact_length is not None:
return True
if self.match_max_length is not None:
return True
if self.match_min_length is not None:
return True
if self.max_prefix_length is not None:
return True
if self.min_prefix_length is not None:
return True
if self.netmask is not None:
return True
if self.prefix is not None:
return True
if self.remark is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries.PrefixListEntry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self._is_presence:
return True
if self.prefix_list_entry is not None:
for child_ref in self.prefix_list_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries']['meta_info']
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:prefixes/Cisco-IOS-XR-ipv4-acl-cfg:prefix[Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.prefix_list_name is not None:
return True
if self.prefix_list_entries is not None and self.prefix_list_entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.prefix is not None:
for child_ref in self.prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes']['meta_info']
class LogUpdate(object):
"""
Control access lists log updates
.. attribute:: rate
Log update rate (log msgs per second)
**type**\: int
**range:** 1..1000
.. attribute:: threshold
Log update threshold (number of hits)
**type**\: int
**range:** 1..2147483647
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2016-11-07'
def __init__(self):
self.parent = None
self.rate = None
self.threshold = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:log-update'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.rate is not None:
return True
if self.threshold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.LogUpdate']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.accesses is not None and self.accesses._has_data():
return True
if self.log_update is not None and self.log_update._has_data():
return True
if self.prefixes is not None and self.prefixes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList']['meta_info']
|
111pontes/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_acl_cfg.py
|
Python
|
apache-2.0
| 83,998
|
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirios'
from twitter.pants.base.generator import TemplateData
import unittest
class TemplateDataTest(unittest.TestCase):
def setUp(self):
self.data = TemplateData(foo = 'bar', baz = 42)
def test_member_access(self):
try:
self.data.bip
self.fail("Access to undefined template data slots should raise")
except AttributeError:
# expected
pass
def test_member_mutation(self):
try:
self.data.baz = 1 / 137
self.fail("Mutation of a template data's slots should not be allowed")
except AttributeError:
# expected
pass
def test_extend(self):
self.assertEqual(self.data.extend(jake = 0.3), TemplateData(baz = 42, foo = 'bar', jake = 0.3))
def test_equals(self):
self.assertEqual(self.data, TemplateData(baz = 42).extend(foo = 'bar'))
|
foursquare/commons-old
|
tests/python/twitter/pants/base/test-generator.py
|
Python
|
apache-2.0
| 1,738
|
import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template4",\
path_list=[[TestAction.delete_volume, "vm1-volume1"], \
[TestAction.reboot_vm, "vm1"], \
[TestAction.create_volume, "volume1", "=scsi"], \
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.create_volume_backup, "volume1", "backup1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.cleanup_ps_cache], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume_snapshot, "volume1", 'snapshot1'], \
[TestAction.detach_volume, "volume1"], \
[TestAction.clone_vm, "vm1", "vm2", "=full"], \
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_backup, "backup1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.reboot_vm, "vm1"]])
|
zstackorg/zstack-woodpecker
|
integrationtest/vm/multihosts/volumes/paths/path119.py
|
Python
|
apache-2.0
| 870
|
from servicemanager.actions import actions
from servicemanager.smcontext import SmApplication, SmContext, ServiceManagerException
from servicemanager.smprocess import SmProcess
from servicemanager.service.smplayservice import SmPlayService
from servicemanager.serviceresolver import ServiceResolver
import pytest
from .testbase import TestBase
class TestActions(TestBase):
def test_start_and_stop_one(self):
context = SmContext(SmApplication(self.config_dir_override), None, False, False)
result = actions.start_one(context, "TEST_ONE", False, True, False, None, port=None)
self.assertTrue(result)
self.waitForCondition((lambda: len(context.get_service("TEST_ONE").status())), 1)
context.kill("TEST_ONE", True)
self.assertEqual(context.get_service("TEST_ONE").status(), [])
def test_start_and_stop_one_with_append_args(self):
context = SmContext(SmApplication(self.config_dir_override), None, False, False)
actions.start_one(context, "TEST_FOUR", False, True, False, None, None, ["2"])
self.waitForCondition((lambda: len(context.get_service("TEST_FOUR").status())), 1)
context.kill("TEST_FOUR", True)
self.assertEqual(context.get_service("TEST_FOUR").status(), [])
@pytest.mark.online
def test_dropwizard_from_source(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "DROPWIZARD_NEXUS_END_TO_END_TEST"
actions.start_and_wait(
service_resolver,
context,
[servicetostart],
False,
False,
False,
None,
port=None,
seconds_to_wait=90,
append_args=None,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_dropwizard_from_jar(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
self.startFakeNexus()
servicetostart = "DROPWIZARD_NEXUS_END_TO_END_TEST"
actions.start_and_wait(
service_resolver,
context,
[servicetostart],
False,
True,
False,
None,
port=None,
seconds_to_wait=90,
append_args=None,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
@pytest.mark.online
def test_play_from_source(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "PLAY_NEXUS_END_TO_END_TEST"
port = None
secondsToWait = 90
append_args = None
actions.start_and_wait(
service_resolver, context, [servicetostart], True, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_play_from_default_run_from_source(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "PLAY_NEXUS_END_TO_END_DEFAULT_SOURCE_TEST"
port = None
secondsToWait = 90
append_args = None
actions.start_and_wait(
service_resolver, context, [servicetostart], False, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_play_from_source_default(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "PLAY_NEXUS_END_TO_END_TEST"
port = None
secondsToWait = 90
append_args = None
actions.start_and_wait(
service_resolver, context, [servicetostart], False, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_successful_play_from_jar_without_waiting(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
append_args = None
try:
servicetostart = ["PLAY_NEXUS_END_TO_END_TEST"]
actions.start_and_wait(
service_resolver,
context,
servicetostart,
False,
fatJar,
release,
proxy,
port,
seconds_to_wait,
append_args,
)
finally:
context.kill_everything(True)
def test_successful_play_default_run_from_jar_without_waiting(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
source = False
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
append_args = None
try:
servicetostart = ["PLAY_NEXUS_END_TO_END_DEFAULT_JAR_TEST"]
actions.start_and_wait(
service_resolver,
context,
servicetostart,
source,
fatJar,
release,
proxy,
port,
seconds_to_wait,
append_args,
)
finally:
context.kill_everything(True)
def test_successful_play_from_jar_without_waiting_with_append_args(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
servicetostart = ["PLAY_NEXUS_END_TO_END_TEST"]
appendArgs = {"PLAY_NEXUS_END_TO_END_TEST": ["-DFoo=Bar"]}
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
actions.start_and_wait(
service_resolver, context, servicetostart, False, fatJar, release, proxy, port, seconds_to_wait, appendArgs,
)
service = SmPlayService(context, "PLAY_NEXUS_END_TO_END_TEST")
self.waitForCondition(lambda: len(SmProcess.processes_matching(service.pattern)), 1)
processes = SmProcess.processes_matching(service.pattern)
self.assertTrue("-DFoo=Bar" in processes[0].args)
def test_failing_play_from_jar(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
try:
servicetostart = ["BROKEN_PLAY_PROJECT"]
actions.start_and_wait(
service_resolver,
context,
servicetostart,
source=False,
fatjar=True,
release=False,
proxy=None,
port=None,
seconds_to_wait=2,
append_args=None,
)
self.fail("Did not expect the project to startup.")
except ServiceManagerException as sme:
self.assertEqual("Timed out starting service(s): BROKEN_PLAY_PROJECT", sme.args[0])
finally:
context.kill_everything(True)
def test_start_and_stop_one_duplicate(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
actions.start_and_wait(
service_resolver,
context,
["TEST_ONE"],
False,
False,
False,
None,
port=None,
seconds_to_wait=90,
append_args=None,
)
self.assertIsNotNone(context.get_service("TEST_ONE").status())
result = actions.start_one(context, "TEST_ONE", False, True, False, None, port=None)
self.assertFalse(result)
context.kill("TEST_ONE", True)
self.assertEqual(context.get_service("TEST_ONE").status(), [])
def test_assets_server(self):
context = SmContext(SmApplication(self.config_dir_override), None, False, False)
context.kill_everything(True)
self.startFakeArtifactory()
actions.start_one(
context, "PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", False, True, False, None, port=None,
)
self.assertIsNotNone(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status())
context.kill("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", wait=True)
self.assertEqual(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status(), [])
def test_wait_on_assets_server(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeArtifactory()
port = None
seconds_to_wait = 5
append_args = None
actions.start_and_wait(
service_resolver,
context,
["PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND"],
False,
True,
False,
None,
port,
seconds_to_wait,
append_args,
)
self.assertIsNotNone(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status())
context.kill("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", True)
self.assertEqual(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status(), [])
def test_python_server_offline(self):
context = SmContext(SmApplication(self.config_dir_override), None, True, False)
port = None
append_args = None
actions.start_one(
context, "PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", False, True, False, None, port, append_args,
)
self.assertIsNotNone(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status())
context.kill("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", True)
self.assertEqual(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status(), [])
|
hmrc/service-manager
|
test/it/test_actions.py
|
Python
|
apache-2.0
| 11,909
|
from __future__ import print_function
import os
import sys
from subprocess import Popen, PIPE
from getpass import getpass
from shutil import rmtree
import argparse
# inspired by https://github.com/mitsuhiko/flask/blob/master/scripts/make-release.py
def set_filename_version(filename, version_number):
with open(filename, 'w+') as f:
f.write("version = '{}'\n".format(version_number))
def set_init_version(version_str):
info('Setting __init__.py version to %s', version_str)
set_filename_version('eralchemy/version.py', version_str)
def rm(filename):
info('Delete {}'.format(filename))
rmtree(filename, ignore_errors=True)
def build_and_upload():
rm('ERAlchemy.egg-info')
rm('build')
rm('dist')
Popen(['pandoc', '--from=markdown', '--to=rst', 'readme.md', '--output=readme.rst'],
stdout=PIPE).wait()
Popen([sys.executable, 'setup.py', 'bdist_wheel', '--universal'], stdout=PIPE).wait()
Popen([sys.executable, 'setup.py', 'sdist'], stdout=PIPE).wait()
pypi_pwd = getpass(prompt='Pypi Password: ')
Popen(['twine', 'upload', 'dist/*', '-u', 'alexis.benoist', '-p', pypi_pwd]).wait()
Popen(['open', 'https://pypi.python.org/pypi/ERAlchemy'])
Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines()
Popen(['git', 'push', '--tags']).wait()
def fail(message, *args):
print('Error:', message % args, file=sys.stderr)
sys.exit(1)
def info(message, *args):
print('Error:', message % args, file=sys.stderr)
def git_is_clean():
return Popen(['git', 'diff', '--quiet']).wait() == 0
def make_git_commit(message, *args):
message = message % args
Popen(['git', 'commit', '-am', message]).wait()
def make_git_tag(tag):
info('Tagging "%s"', tag)
Popen(['git', 'tag', tag]).wait()
def version_str_to_lst(v):
return [int(s) for s in v.split('.')]
def version_lst_to_str(v):
return '.'.join(str(n) for n in v)
def parse_args():
""" Parse the args, returns if the type of update:
Major, minor, fix
"""
parser = argparse.ArgumentParser()
parser.add_argument('-M', action='store_true')
parser.add_argument('-m', action='store_true')
parser.add_argument('-f', action='store_true')
args = parser.parse_args()
major, minor, fix = args.M, args.m, args.f
if major + minor + fix != 1:
fail('Please select one and only one action.')
return major, minor, fix
def get_current_version():
with open('eralchemy/version.py') as f:
lines = f.readlines()
namespace = {}
exec(lines[0], namespace)
return version_str_to_lst(namespace['version'])
def get_git_tags():
return set(Popen(['git', 'tag'], stdout=PIPE).communicate()[0].splitlines())
def get_next_version(major, minor, fix, current_version):
if major:
return [current_version[0] + 1, 0, 0]
if minor:
return [current_version[0], current_version[1] + 1, 0]
if fix:
return [current_version[0], current_version[1], current_version[2] + 1]
raise UserWarning()
def main():
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
current_version = get_current_version()
major, minor, fix = parse_args()
next_version = get_next_version(major, minor, fix, current_version)
next_version_str = version_lst_to_str(next_version)
tags = get_git_tags()
if next_version_str in tags:
fail('Version "%s" is already tagged', next_version_str)
if not git_is_clean():
fail('You have uncommitted changes in git')
set_init_version(next_version_str)
make_git_commit('Bump version number to %s', next_version_str)
make_git_tag('v' + next_version_str)
build_and_upload()
if __name__ == '__main__':
main()
|
Alexis-benoist/eralchemy
|
script/make_release.py
|
Python
|
apache-2.0
| 3,766
|
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_util as test_util
volume = None
disconnect = False
host = None
def test():
global disconnect, volume, host
# query&get clusters
cond = res_ops.gen_query_conditions('name', '=', "cluster1")
cluster1 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
cond = res_ops.gen_query_conditions('name', '=', "cluster2")
cluster2 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
# query&get hosts
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster1.uuid)
cluster1_host = res_ops.query_resource(res_ops.HOST, cond)
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster2.uuid)
cluster2_host = res_ops.query_resource(res_ops.HOST, cond)
# disconnect mn_host1
host = cluster1_host[0]
host_ops.update_kvm_host(host.uuid, 'username', "root1")
try:
host_ops.reconnect_host(host.uuid)
except:
test_util.test_logger("host: [%s] is disconnected" % host.uuid)
disconnect = True
# create_volume on 2 clusters
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
systemtags1 = ["volumeProvisioningStrategy::ThickProvisioning", "capability::virtio-scsi",
"miniStorage::clusterUuid::%s" % cluster1.uuid]
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_name("cluster1_volume")
volume_creation_option.set_primary_storage_uuid(ps.uuid)
volume_creation_option.set_system_tags(systemtags1)
volume_creation_option.set_diskSize(2 * 1024 * 1024 * 1024)
try:
volume_inv = vol_ops.create_volume_from_diskSize(volume_creation_option)
except Exception as e:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
print e.message.encode("utf-8")
def error_cleanup():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
def env_recover():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
|
zstackio/zstack-woodpecker
|
integrationtest/vm/mini/multiclusters/test_disconnect_host_volume_create_negative1.py
|
Python
|
apache-2.0
| 2,411
|
# -*- coding: utf-8 -*-
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict = current_dict
self.past_dict = past_dict
self.set_current = set(current_dict.keys())
self.set_past = set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return (set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o]))
def unchanged(self):
return (set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o]))
class NmapDiff(DictDiffer):
"""
NmapDiff compares two objects of same type to enable the user to check:
- what has changed
- what has been added
- what has been removed
- what was kept unchanged
NmapDiff inherit from DictDiffer which makes the actual comparaison.
The different methods from DictDiffer used by NmapDiff are the
following:
- NmapDiff.changed()
- NmapDiff.added()
- NmapDiff.removed()
- NmapDiff.unchanged()
Each of the returns a python set() of key which have changed in the
compared objects. To check the different keys that could be returned,
refer to the get_dict() method of the objects you which to
compare (i.e: libnmap.objects.NmapHost, NmapService,...).
"""
def __init__(self, nmap_obj1, nmap_obj2):
"""
Constructor of NmapDiff:
- Checks if the two objects are of the same class
- Checks if the objects are "comparable" via a call to id() (dirty)
- Inherits from DictDiffer and
"""
if(nmap_obj1.__class__ != nmap_obj2.__class__ or
nmap_obj1.id != nmap_obj2.id):
raise NmapDiffException("Comparing objects with non-matching id")
self.object1 = nmap_obj1.get_dict()
self.object2 = nmap_obj2.get_dict()
DictDiffer.__init__(self, self.object1, self.object2)
def __repr__(self):
return ("added: [{0}] -- changed: [{1}] -- "
"unchanged: [{2}] -- removed [{3}]".format(self.added(),
self.changed(),
self.unchanged(),
self.removed()))
class NmapDiffException(Exception):
def __init__(self, msg):
self.msg = msg
|
pyphrb/myweb
|
app/plugin/nmap/libnmap/diff.py
|
Python
|
apache-2.0
| 2,896
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for computing and applying pressure."""
from typing import Callable, Optional
import jax.numpy as jnp
import jax.scipy.sparse.linalg
from jax_cfd.base import array_utils
from jax_cfd.base import boundaries
from jax_cfd.base import fast_diagonalization
from jax_cfd.base import finite_differences as fd
from jax_cfd.base import grids
Array = grids.Array
GridArray = grids.GridArray
GridArrayVector = grids.GridArrayVector
GridVariable = grids.GridVariable
GridVariableVector = grids.GridVariableVector
BoundaryConditions = grids.BoundaryConditions
# Specifying the full signatures of Callable would get somewhat onerous
# pylint: disable=g-bare-generic
# TODO(pnorgaard) Implement bicgstab for non-symmetric operators
def solve_cg(v: GridVariableVector,
q0: GridVariable,
rtol: float = 1e-6,
atol: float = 1e-6,
maxiter: Optional[int] = None) -> GridArray:
"""Conjugate gradient solve for the pressure such that continuity is enforced.
Returns a pressure correction `q` such that `div(v - grad(q)) == 0`.
The relationship between `q` and our actual pressure estimate is given by
`p = q * density / dt`.
Args:
v: the velocity field.
q0: an initial value, or "guess" for the pressure correction. A common
choice is the correction from the previous time step. Also specifies the
boundary conditions on `q`.
rtol: relative tolerance for convergence.
atol: absolute tolerance for convergence.
maxiter: optional int, the maximum number of iterations to perform.
Returns:
A pressure correction `q` such that `div(v - grad(q))` is zero.
"""
# TODO(jamieas): add functionality for non-uniform density.
rhs = fd.divergence(v)
def laplacian_with_bcs(array: GridArray) -> GridArray:
variable = grids.GridVariable(array, q0.bc)
return fd.laplacian(variable)
q, _ = jax.scipy.sparse.linalg.cg(
laplacian_with_bcs,
rhs,
x0=q0.array,
tol=rtol,
atol=atol,
maxiter=maxiter)
return q
def solve_fast_diag(v: GridVariableVector,
q0: Optional[GridVariable] = None,
implementation: Optional[str] = None) -> GridArray:
"""Solve for pressure using the fast diagonalization approach."""
del q0 # unused
if not boundaries.has_all_periodic_boundary_conditions(*v):
raise ValueError('solve_fast_diag() expects periodic velocity BC')
grid = grids.consistent_grid(*v)
rhs = fd.divergence(v)
laplacians = list(map(array_utils.laplacian_matrix, grid.shape, grid.step))
pinv = fast_diagonalization.psuedoinverse(
laplacians, rhs.dtype,
hermitian=True, circulant=True, implementation=implementation)
return grids.applied(pinv)(rhs)
def projection(
v: GridVariableVector,
solve: Callable = solve_fast_diag,
) -> GridVariableVector:
"""Apply pressure projection to make a velocity field divergence free."""
grid = grids.consistent_grid(*v)
pressure_bc = boundaries.get_pressure_bc_from_velocity(v)
q0 = grids.GridArray(jnp.zeros(grid.shape), grid.cell_center, grid)
q0 = grids.GridVariable(q0, pressure_bc)
q = solve(v, q0)
q = grids.GridVariable(q, pressure_bc)
q_grad = fd.forward_difference(q)
v_projected = tuple(
grids.GridVariable(u.array - q_g, u.bc) for u, q_g in zip(v, q_grad))
return v_projected
|
google/jax-cfd
|
jax_cfd/base/pressure.py
|
Python
|
apache-2.0
| 3,942
|
import unittest
import graph
class BreadthFirstSearchTest(unittest.TestCase):
__runSlowTests = False
def testTinyGraph(self):
g = graph.Graph.from_file('tinyG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(7, bfs.count())
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertFalse(bfs.connected(8))
self.assertIsNone(bfs.path_to(8))
self.assertFalse(bfs.connected(9))
self.assertIsNone(bfs.path_to(9))
self.assertFalse(bfs.connected(12))
self.assertIsNone(bfs.path_to(12))
self.assertEqual([2, 0], bfs.path_to(2))
self.assertEqual(1, bfs.distance(2))
self.assertEqual([3, 5, 0], bfs.path_to(3))
self.assertEqual(2, bfs.distance(3))
self.assertEqual([4, 5, 0], bfs.path_to(4))
self.assertEqual(2, bfs.distance(4))
self.assertEqual([5, 0], bfs.path_to(5))
self.assertEqual(1, bfs.distance(5))
def testMedGraph(self):
g = graph.Graph.from_file('mediumG.txt')
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(250, bfs.count())
self.assertTrue(bfs.connected(123))
self.assertEqual(9, bfs.distance(123))
self.assertEqual([123, 246, 244, 207, 122, 92, 171, 165, 68, 0], bfs.path_to(123))
def testTinyDG(self):
g = graph.Graph.from_file('tinyDG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(6, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone(bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([2, 4, 5, 0], bfs.path_to(2))
self.assertEqual(3, bfs.distance(2))
def testTinyDAG(self):
g = graph.Graph.from_file('tinyDAG.txt', directed=True)
bfs = graph.BreadthFirstSearch(g, 0)
self.assertEqual(9, bfs.count())
self.assertTrue(bfs.connected(4))
self.assertIsNotNone(bfs.path_to(4))
self.assertFalse(bfs.connected(7))
self.assertIsNone(bfs.path_to(7))
self.assertEqual([12, 9, 6, 0], bfs.path_to(12))
self.assertEqual(3, bfs.distance(12))
if __name__ == '__main__':
unittest.main()
|
RobMcZag/python-algorithms3
|
graph/tests/bfs_test.py
|
Python
|
apache-2.0
| 2,306
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Verify that different ways of loading datasets lead to the same result.
This test utility accepts the same command line parameters as neon. It
downloads the CIFAR-10 dataset and saves it as individual JPEG files. It then
proceeds to fit and evaluate a model using two different ways of loading the
data. Macrobatches are written to disk as needed.
run as follows:
python compare.py -e 1 -r 0 -b cpu
"""
import os
import numpy as np
from neon.data import DataIterator
from neon.initializers import Uniform
from neon.layers import Affine, Conv, Pooling, GeneralizedCost
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Misclassification, Rectlin, Softmax, CrossEntropyMulti
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
from neon.data import load_cifar10, ImageLoader
from neon.util.batch_writer import BatchWriter
from PIL import Image
from glob import glob
trainimgs = 'trainimgs'
testimgs = 'testimgs'
def process_dataset(data, labels, inputpath, leafdir):
datadir = os.path.join(inputpath, leafdir)
print('Saving images to %s' % datadir)
os.mkdir(datadir)
ulabels = np.unique(labels)
for ulabel in ulabels:
os.mkdir(os.path.join(datadir, str(ulabel)))
for idx in range(data.shape[0]):
im = data[idx].reshape((3, 32, 32))
im = np.uint8(np.transpose(im, axes=[1, 2, 0]).copy())
im = Image.fromarray(im)
path = os.path.join(datadir, str(labels[idx][0]), str(idx) + '.jpg')
im.save(path, format='JPEG', subsampling=0, quality=95)
def process(inputpath):
(X_train, y_train), (X_test, y_test), nclass = load_cifar10(inputpath,
normalize=False)
process_dataset(X_train, y_train, inputpath, trainimgs)
process_dataset(X_test, y_test, inputpath, testimgs)
def load_dataset(basepath, datadir, shuffle):
path = os.path.join(basepath, datadir)
if not os.path.exists(path):
process(basepath)
subdirs = glob(os.path.join(path, '*'))
labelnames = sorted(map(lambda x: os.path.basename(x), subdirs))
inds = range(len(labelnames))
labeldict = {key: val for key, val in zip(labelnames, inds)}
lines = []
for subdir in subdirs:
subdirlabel = labeldict[os.path.basename(subdir)]
files = glob(os.path.join(subdir, '*.jpg'))
lines += [(filename, subdirlabel) for filename in files]
assert(len(lines) > 0)
data = None
if shuffle:
np.random.seed(0)
np.random.shuffle(lines)
for idx in range(len(lines)):
im = np.asarray(Image.open(lines[idx][0]))[:, :, ::-1]
im = np.transpose(im, axes=[2, 0, 1]).ravel()
if data is None:
data = np.empty((len(lines), im.shape[0]), dtype='float32')
labels = np.empty((len(lines), 1), dtype='int32')
data[idx] = im
labels[idx] = lines[idx][1]
return (data, labels)
def load_cifar10_imgs(path):
(X_train, y_train) = load_dataset(path, trainimgs, shuffle=True)
(X_test, y_test) = load_dataset(path, testimgs, shuffle=False)
return (X_train, y_train), (X_test, y_test), 10
def write_batches(args, macrodir, datadir, val_pct):
if os.path.exists(macrodir):
return
print('Writing batches to %s' % macrodir)
bw = BatchWriter(out_dir=macrodir,
image_dir=os.path.join(args.data_dir, datadir),
target_size=32, macro_size=1024,
file_pattern='*.jpg', validation_pct=val_pct)
bw.run()
def run(args, train, test):
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
momentum_coef=0.9,
stochastic_round=args.rounding)
layers = [Conv((5, 5, 16), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Conv((5, 5, 32), init=init_uni, activation=Rectlin(), batch_norm=True),
Pooling((2, 2)),
Affine(nout=500, init=init_uni, activation=Rectlin(), batch_norm=True),
Affine(nout=10, init=init_uni, activation=Softmax())]
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
mlp = Model(layers=layers)
callbacks = Callbacks(mlp, train, eval_set=test, **args.callback_args)
mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
err = mlp.eval(test, metric=Misclassification())*100
print('Misclassification error = %.2f%%' % err)
return err
def test_iterator():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
(X_train, y_train), (X_test, y_test), nclass = load_cifar10_imgs(path=args.data_dir)
train = DataIterator(X_train, y_train, nclass=nclass, lshape=(3, 32, 32))
test = DataIterator(X_test, y_test, nclass=nclass, lshape=(3, 32, 32))
return run(args, train, test)
def test_loader():
parser = NeonArgparser(__doc__)
args = parser.parse_args()
train_dir = os.path.join(args.data_dir, 'macrotrain')
test_dir = os.path.join(args.data_dir, 'macrotest')
write_batches(args, train_dir, trainimgs, 0)
write_batches(args, test_dir, testimgs, 1)
train = ImageLoader(set_name='train', do_transforms=False, inner_size=32,
repo_dir=train_dir)
test = ImageLoader(set_name='validation', do_transforms=False, inner_size=32,
repo_dir=test_dir)
train.init_batch_provider()
test.init_batch_provider()
err = run(args, train, test)
test.exit_batch_provider()
train.exit_batch_provider()
return err
assert test_iterator() == test_loader()
|
nhynes/neon
|
neon/data/loader/test/compare.py
|
Python
|
apache-2.0
| 6,514
|