text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from indico.modules.events.timetable.controllers.display import (RHTimetable, RHTimetableEntryInfo,
RHTimetableExportDefaultPDF, RHTimetableExportPDF)
from indico.modules.events.timetable.controllers.legacy import (RHLegacyTimetableAddBreak,
RHLegacyTimetableAddContribution,
RHLegacyTimetableAddSession,
RHLegacyTimetableAddSessionBlock,
RHLegacyTimetableBreakREST,
RHLegacyTimetableDeleteEntry,
RHLegacyTimetableEditEntry,
RHLegacyTimetableEditEntryDateTime,
RHLegacyTimetableEditEntryTime,
RHLegacyTimetableEditSession, RHLegacyTimetableFitBlock,
RHLegacyTimetableGetUnscheduledContributions,
RHLegacyTimetableMoveEntry, RHLegacyTimetableReschedule,
RHLegacyTimetableScheduleContribution,
RHLegacyTimetableShiftEntries,
RHLegacyTimetableSwapEntries)
from indico.modules.events.timetable.controllers.manage import (RHCloneContribution, RHManageSessionTimetable,
RHManageTimetable, RHManageTimetableEntryInfo,
RHTimetableREST)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('timetable', __name__, template_folder='templates', virtual_template_folder='events/timetable',
url_prefix='/event/<int:event_id>')
# Management
_bp.add_url_rule('/manage/timetable/', 'management', RHManageTimetable)
_bp.add_url_rule('/manage/timetable/', 'timetable_rest', RHTimetableREST, methods=('POST',))
_bp.add_url_rule('/manage/timetable/<int:entry_id>', 'timetable_rest', RHTimetableREST, methods=('PATCH', 'DELETE'))
_bp.add_url_rule('/manage/timetable/session/<int:session_id>/', 'manage_session', RHManageSessionTimetable)
# Timetable legacy operations
_bp.add_url_rule('/manage/timetable/add-session', 'add_session', RHLegacyTimetableAddSession, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/timetable/break/<int:break_id>', 'legacy_break_rest', RHLegacyTimetableBreakREST,
methods=('PATCH',))
with _bp.add_prefixed_rules('/manage/timetable/session/<int:session_id>', '/manage/timetable'):
_bp.add_url_rule('/', 'session_rest', RHLegacyTimetableEditSession,
methods=('PATCH',))
_bp.add_url_rule('/entry/<int:entry_id>/info', 'entry_info_manage', RHManageTimetableEntryInfo)
_bp.add_url_rule('/entry/<int:entry_id>/delete', 'delete_entry', RHLegacyTimetableDeleteEntry, methods=('POST',))
_bp.add_url_rule('/entry/<int:entry_id>/move', 'move_entry', RHLegacyTimetableMoveEntry,
methods=('GET', 'POST'))
_bp.add_url_rule('/entry/<int:entry_id>/shift', 'shift_entries', RHLegacyTimetableShiftEntries, methods=('POST',))
_bp.add_url_rule('/entry/<int:entry_id>/swap', 'swap_entries', RHLegacyTimetableSwapEntries, methods=('POST',))
_bp.add_url_rule('/entry/<int:entry_id>/edit/', 'edit_entry', RHLegacyTimetableEditEntry, methods=('GET', 'POST'))
_bp.add_url_rule('/entry/<int:entry_id>/edit/time', 'edit_entry_time', RHLegacyTimetableEditEntryTime,
methods=('GET', 'POST'))
_bp.add_url_rule('/entry/<int:entry_id>/edit/datetime', 'edit_entry_datetime', RHLegacyTimetableEditEntryDateTime,
methods=('POST',))
_bp.add_url_rule('/block/<block_id>/schedule', 'schedule', RHLegacyTimetableScheduleContribution, methods=('POST',))
_bp.add_url_rule('/block/<block_id>/fit', 'fit_session_block', RHLegacyTimetableFitBlock, methods=('POST',))
_bp.add_url_rule('/not-scheduled', 'not_scheduled', RHLegacyTimetableGetUnscheduledContributions)
_bp.add_url_rule('/schedule', 'schedule', RHLegacyTimetableScheduleContribution, methods=('POST',))
_bp.add_url_rule('/reschedule', 'reschedule', RHLegacyTimetableReschedule, methods=('POST',))
_bp.add_url_rule('/add-break', 'add_break', RHLegacyTimetableAddBreak, methods=('GET', 'POST'))
_bp.add_url_rule('/add-contribution', 'add_contribution', RHLegacyTimetableAddContribution, methods=('GET', 'POST'))
_bp.add_url_rule('/add-session-block', 'add_session_block', RHLegacyTimetableAddSessionBlock,
methods=('GET', 'POST'))
_bp.add_url_rule('/clone-contribution', 'clone_contribution', RHCloneContribution, methods=('POST',))
# Display
_bp.add_url_rule('/timetable/', 'timetable', RHTimetable)
_bp.add_url_rule('/timetable/pdf', 'export_pdf', RHTimetableExportPDF, methods=('GET', 'POST'))
_bp.add_url_rule('/timetable/timetable.pdf', 'export_default_pdf', RHTimetableExportDefaultPDF)
_bp.add_url_rule('/timetable/entry/<int:entry_id>/info', 'entry_info', RHTimetableEntryInfo)
# Legacy URLs
_compat_bp = IndicoBlueprint('compat_timetable', __name__)
_compat_bp.add_url_rule('/conferenceTimeTable.py', 'timetable_modpython', make_compat_redirect_func(_bp, 'timetable'))
|
{
"content_hash": "4e32b2e39f9c24fb45eb90b85db5d874",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 120,
"avg_line_length": 80.51388888888889,
"alnum_prop": 0.6008280144902536,
"repo_name": "pferreir/indico",
"id": "9ad3def4c1773f51302e9afdb0377c79297bc081",
"size": "6011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/events/timetable/blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1394116"
},
{
"name": "JavaScript",
"bytes": "2078347"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "4993798"
},
{
"name": "SCSS",
"bytes": "475126"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'my-key'
}
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
webapp2.Route('/',
handler='user_profile.handler.index.index_handler.Index')
], debug=True, config=config)
|
{
"content_hash": "9148993dbf467b2abbed264e5ce35a53",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 75,
"avg_line_length": 20.38888888888889,
"alnum_prop": 0.6512261580381471,
"repo_name": "parmardhruv3041/salemybook",
"id": "7559950318a836b54c3553f3528fd86b99995d54",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "162"
},
{
"name": "Python",
"bytes": "572"
}
],
"symlink_target": ""
}
|
import e32
import _recorder
ENotReady = _recorder.ENotReady
EOpen = _recorder.EOpen
EPlaying = _recorder.EPlaying
ERecording = _recorder.ERecording
KMdaRepeatForever = _recorder.KMdaRepeatForever
TTS_PREFIX = "(tts)"
class Sound(object):
def __init__(self):
self._player=_recorder.Player()
def open(filename):
def open_cb(previous, current, err):
callback_error[0]=err
lock.signal()
player=Sound()
lock=e32.Ao_lock()
callback_error=[0]
player._player.bind(open_cb)
player._player.open_file(unicode(filename))
lock.wait()
if callback_error[0]:
raise SymbianError,(callback_error[0],
"Error opening file: "+e32.strerror(callback_error[0]))
return player
open=staticmethod(open)
def _say(text):
def say_cb(previous, current, err):
callback_error[0]=err
lock.signal()
player=Sound()
lock=e32.Ao_lock()
callback_error=[0]
player._player.bind(say_cb)
player._player.say(text)
lock.wait()
if callback_error[0]:
raise SymbianError,(callback_error[0],
"Error: "+e32.strerror(callback_error[0]))
_say=staticmethod(_say)
def play(self, times=1, interval=0, callback=None):
def play_cb(previous, current, err):
#This is called first with EPlaying meaning that the playing started
#and with EOpen meaning that the playing stopped.
callback_error[0]=err
if callback!=None:
if (current==EPlaying or current==EOpen):
lock.signal()
callback(previous, current, err)
elif (current==EPlaying or current==EOpen) and callback==None:
lock.signal()
if self.state()!=EOpen:
raise RuntimeError,("Sound not in correct state, state: %d" % (self.state()))
lock=e32.Ao_lock()
callback_error=[0]
self._player.bind(play_cb)
if not times==KMdaRepeatForever:
times=times-1
self._player.play(times, interval)
lock.wait()
if callback_error[0]:
raise SymbianError,(callback_error[0],
"Error playing file: "+e32.strerror(callback_error[0]))
def record(self):
def rec_cb(previous, current, err):
callback_error[0]=err
lock.signal()
if self.state()!=EOpen:
raise RuntimeError,("Sound not in correct state, state: %d" % (self.state()))
lock=e32.Ao_lock()
callback_error=[0]
self._player.bind(rec_cb)
self._player.record()
lock.wait()
if callback_error[0]:
raise SymbianError,(callback_error[0],
"Error while recording: "+e32.strerror(callback_error[0]))
def stop(self):
self._player.stop()
def close(self):
self._player.close_file()
def state(self):
return self._player.state()
def max_volume(self):
return self._player.max_volume()
def set_volume(self, volume):
if volume<0:
volume=0
elif volume>self._player.max_volume():
volume=self._player.max_volume()
return self._player.set_volume(volume)
def current_volume(self):
return self._player.current_volume()
def duration(self):
return self._player.duration()
def set_position(self, position):
self._player.set_position(position)
def current_position(self):
return self._player.current_position()
def say(text, prefix=TTS_PREFIX):
if type(text) is unicode:
text = text.encode('utf8')
return Sound._say(prefix+text)
|
{
"content_hash": "26d59b4bebc0ed5c97f1838dc3b0442c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 90,
"avg_line_length": 35.626168224299064,
"alnum_prop": 0.5739769150052466,
"repo_name": "pymo/pymo",
"id": "8a3544c1a91abc160f35ef31721902815e33dd99",
"size": "4422",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "symbian/PythonForS60_1.9.6/module-repo/dev-modules/audio/audio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3425"
},
{
"name": "C",
"bytes": "581274"
},
{
"name": "C++",
"bytes": "151108"
},
{
"name": "Clarion",
"bytes": "2743"
},
{
"name": "Groff",
"bytes": "13374"
},
{
"name": "HTML",
"bytes": "240526"
},
{
"name": "Java",
"bytes": "153187"
},
{
"name": "Makefile",
"bytes": "144854"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "27673732"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "29384"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import pytest
from marshmallow_jsonapi import utils
@pytest.mark.parametrize(
"tag,val",
[
("<id>", "id"),
("<author.last_name>", "author.last_name"),
("<comment.author.first_name>", "comment.author.first_name"),
("True", None),
("", None),
],
)
def test_tpl(tag, val):
assert utils.tpl(tag) == val
|
{
"content_hash": "d970636e06e6d736a08d2e509b7f98f4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 21.058823529411764,
"alnum_prop": 0.547486033519553,
"repo_name": "marshmallow-code/marshmallow-jsonapi",
"id": "2e10ac499186d2cf5a52d798773e7f66ef854ec0",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104489"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Invitation.invited_at'
db.add_column('core_invitation', 'invited_at',
self.gf('django.db.models.fields.DateField')(auto_now_add=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Invitation.invited_at'
db.delete_column('core_invitation', 'invited_at')
models = {
'core.invitation': {
'Meta': {'object_name': 'Invitation'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'invited_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "'7666174c-a3b2-4d1b-b94b-81f8b5b1274f'", 'max_length': '36', 'primary_key': 'True'})
}
}
complete_apps = ['core']
|
{
"content_hash": "c841c3378ec4740a2957887b713bed5e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 158,
"avg_line_length": 39.107142857142854,
"alnum_prop": 0.5926940639269407,
"repo_name": "jacobjbollinger/sorbet",
"id": "ba5265997a6beca5f919367afeb5e1239438baa5",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sorbet/core/migrations/0004_auto__add_field_invitation_invited_at.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('push', '0014_auto_20161115_2310'),
]
operations = [
migrations.CreateModel(
name='ApplicationModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('application_name', models.CharField(max_length=20)),
('username', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='notificationmodel',
name='execute_datetime',
field=models.CharField(default='2017/01/04 04:52', max_length=16),
),
]
|
{
"content_hash": "1987aa16cc0c162e3bc655b0fae26142",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 114,
"avg_line_length": 30.346153846153847,
"alnum_prop": 0.5754119138149556,
"repo_name": "nnsnodnb/django-mbaas",
"id": "ba91fd1037a66e610b93d06f5b650ac8f87b66b7",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "push/migrations/0015_auto_20170104_0452.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "52557"
},
{
"name": "JavaScript",
"bytes": "5938"
},
{
"name": "Python",
"bytes": "65164"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from decimal import Decimal
import django.db.models.deletion
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crm', '0001_initial'),
('contracts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(help_text=b'Enter a short code to describe the type of activity that took place.', unique=True, max_length=5)),
('name', models.CharField(help_text=b'Now enter a more meaningful name for the activity.', max_length=50)),
('billable', models.BooleanField(default=True)),
],
options={
'ordering': ('name',),
'db_table': 'timepiece_activity',
'verbose_name_plural': 'activities',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ActivityGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('activities', models.ManyToManyField(related_name='activity_group', to='entries.Activity')),
],
options={
'db_table': 'timepiece_activitygroup',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default=b'unverified', max_length=24, choices=[(b'not-invoiced', b'Not Invoiced'), (b'invoiced', b'Invoiced'), (b'verified', b'Verified'), (b'approved', b'Approved'), (b'unverified', b'Unverified')])),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField(db_index=True, null=True, blank=True)),
('seconds_paused', models.PositiveIntegerField(default=0)),
('pause_time', models.DateTimeField(null=True, blank=True)),
('comments', models.TextField(blank=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('hours', models.DecimalField(default=0, max_digits=8, decimal_places=2)),
('activity', models.ForeignKey(related_name='entries', to='entries.Activity')),
('entry_group', models.ForeignKey(related_name='entries', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='contracts.EntryGroup', null=True)),
],
options={
'ordering': ('-start_time',),
'db_table': 'timepiece_entry',
'verbose_name_plural': 'entries',
'permissions': (('can_clock_in', 'Can use Pendulum to clock in'), ('can_pause', 'Can pause and unpause log entries'), ('can_clock_out', 'Can use Pendulum to clock out'), ('view_entry_summary', 'Can view entry summary page'), ('view_payroll_summary', 'Can view payroll summary page'), ('approve_timesheet', 'Can approve a verified timesheet')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('slug', models.CharField(unique=True, max_length=255)),
],
options={
'db_table': 'timepiece_location',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProjectHours',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('week_start', models.DateField(verbose_name=b'start of week')),
('hours', models.DecimalField(default=0, max_digits=8, decimal_places=2, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))])),
('published', models.BooleanField(default=False)),
('project', models.ForeignKey(to='crm.Project')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'timepiece_projecthours',
'verbose_name': 'project hours entry',
'verbose_name_plural': 'project hours entries',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='projecthours',
unique_together=set([('week_start', 'project', 'user')]),
),
migrations.AddField(
model_name='entry',
name='location',
field=models.ForeignKey(related_name='entries', to='entries.Location'),
preserve_default=True,
),
migrations.AddField(
model_name='entry',
name='project',
field=models.ForeignKey(related_name='entries', to='crm.Project'),
preserve_default=True,
),
migrations.AddField(
model_name='entry',
name='user',
field=models.ForeignKey(related_name='timepiece_entries', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
{
"content_hash": "43d396c3dbf4c9e30df9e20e23371731",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 359,
"avg_line_length": 48.516666666666666,
"alnum_prop": 0.5623497080041223,
"repo_name": "caktus/django-timepiece",
"id": "51028b07507f3ab84ea80f8b08fcf0711cc75d29",
"size": "5846",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "timepiece/entries/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23745"
},
{
"name": "HTML",
"bytes": "235951"
},
{
"name": "JavaScript",
"bytes": "202697"
},
{
"name": "Python",
"bytes": "562382"
}
],
"symlink_target": ""
}
|
"""
An API to insert and retrieve metadata on cloud artifacts.
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApiPgpSignedAttestation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'signature': 'str',
'content_type': 'PgpSignedAttestationContentType',
'pgp_key_id': 'str'
}
attribute_map = {
'signature': 'signature',
'content_type': 'content_type',
'pgp_key_id': 'pgp_key_id'
}
def __init__(self, signature=None, content_type=None, pgp_key_id=None): # noqa: E501
"""ApiPgpSignedAttestation - a model defined in Swagger""" # noqa: E501
self._signature = None
self._content_type = None
self._pgp_key_id = None
self.discriminator = None
if signature is not None:
self.signature = signature
if content_type is not None:
self.content_type = content_type
if pgp_key_id is not None:
self.pgp_key_id = pgp_key_id
@property
def signature(self):
"""Gets the signature of this ApiPgpSignedAttestation. # noqa: E501
The raw content of the signature, as output by gpg or equivalent. Since this message only supports attached signatures, the payload that was signed must be attached. While the signature format supported is dependent on the verification implementation, currently only ASCII-armored (`--armor` to gpg), non-clearsigned (`--sign` rather than `--clearsign` to gpg) are supported. Concretely, `gpg --sign --armor --output=signature.gpg payload.json` will create the signature content expected in this field in `signature.gpg` for the `payload.json` attestation payload. # noqa: E501
:return: The signature of this ApiPgpSignedAttestation. # noqa: E501
:rtype: str
"""
return self._signature
@signature.setter
def signature(self, signature):
"""Sets the signature of this ApiPgpSignedAttestation.
The raw content of the signature, as output by gpg or equivalent. Since this message only supports attached signatures, the payload that was signed must be attached. While the signature format supported is dependent on the verification implementation, currently only ASCII-armored (`--armor` to gpg), non-clearsigned (`--sign` rather than `--clearsign` to gpg) are supported. Concretely, `gpg --sign --armor --output=signature.gpg payload.json` will create the signature content expected in this field in `signature.gpg` for the `payload.json` attestation payload. # noqa: E501
:param signature: The signature of this ApiPgpSignedAttestation. # noqa: E501
:type: str
"""
self._signature = signature
@property
def content_type(self):
"""Gets the content_type of this ApiPgpSignedAttestation. # noqa: E501
Type (e.g. schema) of the attestation payload that was signed. The verifier must ensure that the provided type is one that the verifier supports, and that the attestation payload is a valid instantiation of that type (e.g. by validating a JSON schema). # noqa: E501
:return: The content_type of this ApiPgpSignedAttestation. # noqa: E501
:rtype: PgpSignedAttestationContentType
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this ApiPgpSignedAttestation.
Type (e.g. schema) of the attestation payload that was signed. The verifier must ensure that the provided type is one that the verifier supports, and that the attestation payload is a valid instantiation of that type (e.g. by validating a JSON schema). # noqa: E501
:param content_type: The content_type of this ApiPgpSignedAttestation. # noqa: E501
:type: PgpSignedAttestationContentType
"""
self._content_type = content_type
@property
def pgp_key_id(self):
"""Gets the pgp_key_id of this ApiPgpSignedAttestation. # noqa: E501
The cryptographic fingerprint of the key used to generate the signature, as output by, e.g. `gpg --list-keys`. This should be the version 4, full 160-bit fingerprint, expressed as a 40 character hexidecimal string. See https://tools.ietf.org/html/rfc4880#section-12.2 for details. Implementations may choose to acknowledge \"LONG\", \"SHORT\", or other abbreviated key IDs, but only the full fingerprint is guaranteed to work. In gpg, the full fingerprint can be retrieved from the `fpr` field returned when calling --list-keys with --with-colons. For example: ``` gpg --with-colons --with-fingerprint --force-v4-certs \\ --list-keys attester@example.com tru::1:1513631572:0:3:1:5 pub:...<SNIP>... fpr:::::::::24FF6481B76AC91E66A00AC657A93A81EF3AE6FB: ``` Above, the fingerprint is `24FF6481B76AC91E66A00AC657A93A81EF3AE6FB`. # noqa: E501
:return: The pgp_key_id of this ApiPgpSignedAttestation. # noqa: E501
:rtype: str
"""
return self._pgp_key_id
@pgp_key_id.setter
def pgp_key_id(self, pgp_key_id):
"""Sets the pgp_key_id of this ApiPgpSignedAttestation.
The cryptographic fingerprint of the key used to generate the signature, as output by, e.g. `gpg --list-keys`. This should be the version 4, full 160-bit fingerprint, expressed as a 40 character hexidecimal string. See https://tools.ietf.org/html/rfc4880#section-12.2 for details. Implementations may choose to acknowledge \"LONG\", \"SHORT\", or other abbreviated key IDs, but only the full fingerprint is guaranteed to work. In gpg, the full fingerprint can be retrieved from the `fpr` field returned when calling --list-keys with --with-colons. For example: ``` gpg --with-colons --with-fingerprint --force-v4-certs \\ --list-keys attester@example.com tru::1:1513631572:0:3:1:5 pub:...<SNIP>... fpr:::::::::24FF6481B76AC91E66A00AC657A93A81EF3AE6FB: ``` Above, the fingerprint is `24FF6481B76AC91E66A00AC657A93A81EF3AE6FB`. # noqa: E501
:param pgp_key_id: The pgp_key_id of this ApiPgpSignedAttestation. # noqa: E501
:type: str
"""
self._pgp_key_id = pgp_key_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiPgpSignedAttestation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiPgpSignedAttestation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
{
"content_hash": "12cbdf1f5fbd7296cd14711c0abf1843",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 852,
"avg_line_length": 48.421052631578945,
"alnum_prop": 0.6526570048309178,
"repo_name": "grafeas/client-python",
"id": "779f18eb24705ad2ad6622ea0bc6c045e042c6ae",
"size": "8297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grafeas/models/api_pgp_signed_attestation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "558375"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
"""
Utilities and compatibility abstraction.
Licensed under MIT
Copyright (c) 2015 - 2018 Isaac Muse <isaacmuse@gmail.com>
"""
import sys
import struct
PY2 = (2, 0) <= sys.version_info < (3, 0)
PY3 = (3, 0) <= sys.version_info < (4, 0)
PY34 = (3, 4) <= sys.version_info
PY36 = (3, 6) <= sys.version_info
PY37 = (3, 7) <= sys.version_info
FMT_FIELD = 0
FMT_INDEX = 1
FMT_ATTR = 2
FMT_CONV = 3
FMT_SPEC = 4
if PY3:
from functools import lru_cache # noqa F401
import copyreg # noqa F401
string_type = str
binary_type = bytes
unichar = chr
iter_range = range # noqa F821
else:
from backports.functools_lru_cache import lru_cache # noqa F401
import copy_reg as copyreg # noqa F401
string_type = unicode # noqa F821
binary_type = str # noqa F821
unichar = unichr # noqa F821
iter_range = xrange # noqa F821
class StringIter(object):
"""Preprocess replace tokens."""
def __init__(self, text):
"""Initialize."""
self._string = text
self._index = 0
def __iter__(self):
"""Iterate."""
return self
def __next__(self):
"""Python 3 iterator compatible next."""
return self.iternext()
@property
def index(self):
"""Get Index."""
return self._index
def rewind(self, count):
"""Rewind index."""
if count > self._index: # pragma: no cover
raise ValueError("Can't rewind past beginning!")
self._index -= count
def iternext(self):
"""Iterate through characters of the string."""
try:
char = self._string[self._index]
self._index += 1
except IndexError:
raise StopIteration
return char
# Python 2 iterator compatible next.
next = __next__ # noqa A002
def uchr(i):
"""Allow getting Unicode character on narrow python builds."""
try:
return unichar(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def uord(c):
"""Get Unicode ordinal."""
if len(c) == 2:
high, low = [ord(p) for p in c]
ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000
else:
ordinal = ord(c)
return ordinal
def _to_bstr(l):
"""Convert to byte string."""
if isinstance(l, string_type):
l = l.encode('ascii', 'backslashreplace')
elif not isinstance(l, binary_type):
l = string_type(l).encode('ascii', 'backslashreplace')
return l
def format(m, l, capture, binary):
"""Perform a string format."""
for fmt_type, value in capture[1:]:
if fmt_type == FMT_ATTR:
# Attribute
l = getattr(l, value)
elif fmt_type == FMT_INDEX:
# Index
l = l[value]
elif fmt_type == FMT_CONV:
if binary:
# Conversion
if value in ('r', 'a'):
l = repr(l).encode('ascii', 'backslashreplace')
elif value == 's':
# If the object is not string or byte string already
l = _to_bstr(l)
else:
# Conversion
if value == 'a':
l = ascii(l)
elif value == 'r':
l = repr(l)
elif value == 's':
# If the object is not string or byte string already
l = string_type(l)
elif fmt_type == FMT_SPEC:
# Integers and floats don't have an explicit 's' format type.
if value[3] and value[3] == 's':
if isinstance(l, int): # pragma: no cover
raise ValueError("Unknown format code 's' for object of type 'int'")
if isinstance(l, float): # pragma: no cover
raise ValueError("Unknown format code 's' for object of type 'float'")
# Ensure object is a byte string
l = _to_bstr(l) if binary else string_type(l)
spec_type = value[1]
if spec_type == '^':
l = l.center(value[2], value[0])
elif spec_type == ">":
l = l.rjust(value[2], value[0])
else:
l = l.ljust(value[2], value[0])
# Make sure the final object is a byte string
return _to_bstr(l) if binary else string_type(l)
class Immutable(object):
"""Immutable."""
__slots__ = tuple()
def __init__(self, **kwargs):
"""Initialize."""
for k, v in kwargs.items():
super(Immutable, self).__setattr__(k, v)
def __setattr__(self, name, value):
"""Prevent mutability."""
raise AttributeError('Class is immutable!')
|
{
"content_hash": "da833869050aa8c8d68cda29b20f1a72",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 90,
"avg_line_length": 25.75,
"alnum_prop": 0.52701561840439,
"repo_name": "archifix/settings",
"id": "e3c9fa0fd9ea35865affcb269d915e6da7ff229c",
"size": "4738",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sublime/Packages/backrefs/st3/backrefs/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7148"
},
{
"name": "JavaScript",
"bytes": "247915"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "PowerShell",
"bytes": "414"
},
{
"name": "Python",
"bytes": "11559828"
},
{
"name": "Shell",
"bytes": "34878"
}
],
"symlink_target": ""
}
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "notepad.settings")
application = get_wsgi_application()
|
{
"content_hash": "0f38e5aee8bf98cbf306c0d4a9247e81",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.8012048192771084,
"repo_name": "rajujha373/OurNoticeBoardv2.0",
"id": "abca1d58a9c9d2cb8710316547639fdc18db2064",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "localhost_settings/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21918"
},
{
"name": "Python",
"bytes": "29075"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from . import tests
import sys
import unittest
from ctypes import Structure, c_float, c_double, c_uint8, CFUNCTYPE
from llvm import core as lc
from llvm import ee as le
from .support import (skip_if_win32, skip_if_not_win32, skip_if_not_32bits,
skip_if_not_64bits, skip_if_not_intel_cpu, TestCase)
class TwoDoubleOneByte(Structure):
_fields_ = ('x', c_double), ('y', c_double), ('z', c_uint8)
def __repr__(self):
return '<x=%f y=%f z=%d>' % (self.x, self.y, self.z)
class TwoDouble(Structure):
_fields_ = ('x', c_double), ('y', c_double)
def __repr__(self):
return '<x=%f y=%f>' % (self.x, self.y)
class TwoFloat(Structure):
_fields_ = ('x', c_float), ('y', c_float)
def __repr__(self):
return '<x=%f y=%f>' % (self.x, self.y)
class OneByte(Structure):
_fields_ = [('x', c_uint8)]
def __repr__(self):
return '<x=%d>' % (self.x,)
@skip_if_not_intel_cpu
@skip_if_win32
class TestStructSystemVABI(TestCase):
'''
Non microsoft convention
'''
#----------------------------------------------------------------------
# 64 bits
@skip_if_not_64bits
def test_bigger_than_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_64bits
def test_just_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
struct_type = lc.Type.struct([double_type, double_type])
func_type = lc.Type.function(struct_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDouble, TwoDouble)
cfunc = cfunctype(ptr)
arg = TwoDouble(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_64bits
def test_two_halfwords(self):
'''Arguments smaller or equal to a word is packed into a word.
Passing as struct { float, float } occupies two XMM registers instead
of one.
The output must be in XMM.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.vector(float_type, 2)
func_type = lc.Type.function(struct_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
constint = lambda x: lc.Constant.int(lc.Type.int(), x)
e1, e2 = [builder.extract_element(arg, constint(i))
for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_element(lc.Constant.undef(struct_type), se1,
constint(0))
ret = builder.insert_element(ret, se2, constint(1))
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
#----------------------------------------------------------------------
# 32 bits
@skip_if_not_32bits
def test_structure_abi_32_1(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_32bits
def test_structure_abi_32_2(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_32bits
def test_structure_abi_32_3(self):
'''x86 is simple. Always pass structure as memory.
'''
m = lc.Module.new('test_struct_arg')
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1 = builder.extract_value(arg, 0)
se1 = builder.mul(e1, e1)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(OneByte, OneByte)
cfunc = cfunctype(ptr)
arg = OneByte(x=8)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertEqual(arg.x * arg.x, ret.x)
tests.append(TestStructSystemVABI)
@skip_if_not_intel_cpu
@skip_if_not_win32
class TestStructMicrosoftABI(TestCase):
'''
Microsoft convention
'''
#----------------------------------------------------------------------
# 64 bits
@skip_if_not_64bits
def test_bigger_than_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
@skip_if_not_64bits
def test_just_two_words_64(self):
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
struct_type = lc.Type.struct([double_type, double_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDouble, TwoDouble)
cfunc = cfunctype(ptr)
arg = TwoDouble(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_64bits
def test_two_halfwords(self):
'''Arguments smaller or equal to a word is packed into a word.
Floats structure are not passed on the XMM.
Treat it as a i64.
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
abi_type = lc.Type.int(64)
func_type = lc.Type.function(abi_type, [abi_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = func.args[0]
struct_ptr = builder.alloca(struct_type)
struct_int_ptr = builder.bitcast(struct_ptr, lc.Type.pointer(abi_type))
builder.store(arg, struct_int_ptr)
arg = builder.load(struct_ptr)
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, struct_ptr)
ret = builder.load(struct_int_ptr)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
#----------------------------------------------------------------------
# 32 bits
@skip_if_not_32bits
def test_one_word_register(self):
'''Argument is passed by memory.
Return value is passed by register.
'''
m = lc.Module.new('test_struct_arg')
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(struct_type, [struct_ptr_type])
func = m.add_function(func_type, name='foo')
# pass structure by value
func.args[0].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[0])
e1 = builder.extract_value(arg, 0)
se1 = builder.mul(e1, e1)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
builder.ret(ret)
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(OneByte, OneByte)
cfunc = cfunctype(ptr)
arg = OneByte(x=8)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertEqual(arg.x * arg.x, ret.x)
@skip_if_not_32bits
def test_two_floats(self):
'''Argument is passed by register.
Return in 2 registers
'''
m = lc.Module.new('test_struct_arg')
float_type = lc.Type.float()
struct_type = lc.Type.struct([float_type, float_type])
abi_type = lc.Type.int(64)
func_type = lc.Type.function(abi_type, [struct_type])
func = m.add_function(func_type, name='foo')
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
out_ptr = builder.alloca(struct_type)
arg = func.args[0]
e1, e2 = [builder.extract_value(arg, i) for i in range(2)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
builder.store(ret, out_ptr)
out_int_ptr = builder.bitcast(out_ptr, lc.Type.pointer(abi_type))
builder.ret(builder.load(out_int_ptr))
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoFloat, TwoFloat)
cfunc = cfunctype(ptr)
arg = TwoFloat(x=1.321321, y=6.54352)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
@skip_if_not_32bits
def test_bigger_than_two_words(self):
'''Pass in memory.
'''
m = lc.Module.new('test_struct_arg')
double_type = lc.Type.double()
uint8_type = lc.Type.int(8)
struct_type = lc.Type.struct([double_type, double_type, uint8_type])
struct_ptr_type = lc.Type.pointer(struct_type)
func_type = lc.Type.function(lc.Type.void(),
[struct_ptr_type, struct_ptr_type])
func = m.add_function(func_type, name='foo')
# return value pointer
func.args[0].add_attribute(lc.ATTR_STRUCT_RET)
# pass structure by value
func.args[1].add_attribute(lc.ATTR_BY_VAL)
# define function body
builder = lc.Builder.new(func.append_basic_block(''))
arg = builder.load(func.args[1])
e1, e2, e3 = [builder.extract_value(arg, i) for i in range(3)]
se1 = builder.fmul(e1, e2)
se2 = builder.fdiv(e1, e2)
ret = builder.insert_value(lc.Constant.undef(struct_type), se1, 0)
ret = builder.insert_value(ret, se2, 1)
ret = builder.insert_value(ret, e3, 2)
builder.store(ret, func.args[0])
builder.ret_void()
del builder
# verify
m.verify()
print(m)
# use with ctypes
engine = le.EngineBuilder.new(m).create()
ptr = engine.get_pointer_to_function(func)
cfunctype = CFUNCTYPE(TwoDoubleOneByte, TwoDoubleOneByte)
cfunc = cfunctype(ptr)
arg = TwoDoubleOneByte(x=1.321321, y=6.54352, z=128)
ret = cfunc(arg)
print(arg)
print(ret)
self.assertClose(arg.x * arg.y, ret.x)
self.assertClose(arg.x / arg.y, ret.y)
self.assertEqual(arg.z, ret.z)
tests.append(TestStructMicrosoftABI)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "0e248650a7737560613545395f025280",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 79,
"avg_line_length": 30.363503649635035,
"alnum_prop": 0.5646906101254868,
"repo_name": "llvmpy/llvmpy",
"id": "5e48ea84df740e6a54de0548ea14302f1b82b9ba",
"size": "20799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "llvm/tests/test_struct_args.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10456"
},
{
"name": "C++",
"bytes": "58044"
},
{
"name": "CSS",
"bytes": "12590"
},
{
"name": "HTML",
"bytes": "851926"
},
{
"name": "JavaScript",
"bytes": "4102"
},
{
"name": "LLVM",
"bytes": "35445"
},
{
"name": "Makefile",
"bytes": "1862"
},
{
"name": "Python",
"bytes": "720443"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
import logging as _logging
from scli import prompt
from scli import config_file
from scli.constants import ParameterName
from scli.constants import ParameterSource
from scli.constants import EnvironmentHealth
from scli.constants import EnvironmentStatus
from scli.constants import ServiceDefault
from scli.constants import ServiceRegionId
from scli.constants import ValidationSeverity
from scli.exception import EBSCliException
from scli.operation.base import OperationBase
from scli.operation.base import OperationResult
from scli.parameter import Parameter
from scli.resources import CreateEnvironmentOpMessage
from scli.resources import DescribeEnvironmentOpMessage
from scli.resources import TerminateEnvironmentOpMessage
from scli.resources import UpdateEnvironmentOptionSettingOpMessage
from scli.resources import ValidationMessage
from scli.resources import WaitForCreateEnvironmentFinishOpMessage
from scli.resources import WaitForTerminateEnvironmentFinishOpMessage
from scli.resources import WaitForUpdateEnvOptionSettingFinishOpMessage
from scli.terminal.base import TerminalBase
from lib.elasticbeanstalk.exception import AlreadyExistException
from lib.elasticbeanstalk.request import TemplateSpecification
from lib.rds import rds_utils
from lib.utility import misc
log = _logging.getLogger('cli.op')
class DescribeEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.ApplicationName,
ParameterName.EnvironmentName,
}
_output_parameters = {
ParameterName.EnvironmentName
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
prompt.action(DescribeEnvironmentOpMessage.Start.format(env_name))
response = eb_client.describe_environments(app_name,
env_name,
include_deleted = 'false')
log.info('Received response for DescribeEnvironemnts call.')
self._log_api_result(self.__class__.__name__, 'DescribeEnvironments', response.result)
if len(response.result) > 0: # If have result
env_info = response.result[0]
message = DescribeEnvironmentOpMessage.Result.format(env_info.cname,
env_info.status,
env_info.health)
prompt.result(message)
prompt.info(DescribeEnvironmentOpMessage.Detail.format(env_info.environment_name,
env_info.environment_id,
env_info.solution_stack_name,
env_info.version_label,
env_info.date_created,
env_info.date_updated,
env_info.description))
# If not Green, pull the most recent warning and error events
if env_info.health in [EnvironmentHealth.Red, EnvironmentHealth.Yellow] \
or (env_info.status == EnvironmentStatus.Ready \
and env_info.health == EnvironmentHealth.Grey):
events = eb_client.describe_events(app_name,
env_name,
max_records = ServiceDefault.STATUS_EVENT_MAX_NUM,
severity = ServiceDefault.STATUS_EVENT_LEVEL)
if len(events.result) > 0:
# Having one error event
for event in events.result:
msg = '{0}\t{1}\t{2}'.format(event.event_date,
event.severity,
event.message)
log.info('Found last error event: {0}'.format(msg))
prompt.plain(msg)
# Display RDS instance host info
try:
logical_id, rds_property = rds_utils.retrieve_rds_instance_property\
(parameter_pool, env_name)
if rds_property is not None:
prompt.result(DescribeEnvironmentOpMessage.RdsInfo.format\
(logical_id,
rds_property.endpoint.address,
rds_property.endpoint.port))
prompt.info(DescribeEnvironmentOpMessage.RdsDetail.format\
(rds_property.engine + ' ' + rds_property.engine_version,
rds_property.allocated_storage,
rds_property.db_instance_class,
rds_property.multi_az,
rds_property.master_username,
rds_property.instance_create_time,
rds_property.db_instance_status))
except BaseException as ex:
log.error('Encountered error when retrieve environment resources: {0}.'.format(ex))
raise
else:
# No result. Environment not exist.
message = DescribeEnvironmentOpMessage.NoEnvironment.format(env_name)
prompt.result(message)
ret_result = OperationResult(self, response.request_id, message, response.result)
return ret_result
class CreateEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.ApplicationName,
ParameterName.ApplicationVersionName,
ParameterName.EnvironmentName,
ParameterName.SolutionStack,
ParameterName.RdsEnabled,
}
_output_parameters = {
ParameterName.EnvironmentName,
ParameterName.EnvironmentId,
ParameterName.CreateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName)
version_name = parameter_pool.get_value(ParameterName.ApplicationVersionName)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
stack_name = parameter_pool.get_value(ParameterName.SolutionStack)
# Try load option setting file if exist
option_file_location = parameter_pool.get_value(ParameterName.OptionSettingFile)
option_settings = config_file.load_env_option_setting_file(option_file_location,
quiet = True)
if option_settings is not None and len(option_settings) > 0:
prompt.info(CreateEnvironmentOpMessage.UsingOptionSetting.format(option_file_location))
else:
option_settings = []
option_remove = set()
spec = TemplateSpecification()
rds_utils.rds_handler(parameter_pool, spec, stack_name, option_settings, option_remove)
self._option_setting_handler(option_settings, option_remove)
prompt.action(CreateEnvironmentOpMessage.Start.format(env_name))
try:
response = eb_client.create_environment(application = app_name,
environment = env_name,
solution_stack = stack_name,
version_label = version_name,
option_settings = option_settings,
option_remove = option_remove,
template_specification = spec,
)
except AlreadyExistException:
log.info('Environment "{0}" already exist.'.format(env_name))
prompt.result(CreateEnvironmentOpMessage.AlreadyExist.format(env_name))
ret_result = OperationResult(self,
None,
CreateEnvironmentOpMessage.AlreadyExist.format(env_name),
None)
else:
log.info('Received response for CreateEnvironemnt call.')
prompt.info(CreateEnvironmentOpMessage.Succeed)
prompt.result(CreateEnvironmentOpMessage.WaitAfterLaunch.format(env_name))
self._log_api_result(self.__class__.__name__, 'CreateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.CreateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
CreateEnvironmentOpMessage.Succeed,
response.result)
return ret_result
def _rds_creation(self):
pass
class WaitForCreateEnvironmentFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.CreateEnvironmentRequestID,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForFinishTimeout)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay)
create_request_id = parameter_pool.get_value(ParameterName.CreateEnvironmentRequestID)\
if parameter_pool.has(ParameterName.CreateEnvironmentRequestID) else None
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = create_request_id,
pending_status = EnvironmentStatus.Launching,
expected_health = None,
operation_name = self.__class__.__name__,
action_name = WaitForCreateEnvironmentFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'false',
initial_delay = 0)
# After polling
status = result[0].status
health = result[0].health
cname = result[0].cname
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.\nURL is "{3}".'.\
format(env_name, status, health, cname))
if status.lower() == EnvironmentStatus.Ready.lower() \
and health.lower() == EnvironmentHealth.Green.lower():
prompt.info(WaitForCreateEnvironmentFinishOpMessage.Succeed.format(env_name))
prompt.result(WaitForCreateEnvironmentFinishOpMessage.Result.format(cname))
else:
prompt.info(WaitForCreateEnvironmentFinishOpMessage.Timeout.format(env_name))
ret_result = OperationResult(self,
None,
WaitForCreateEnvironmentFinishOpMessage.Result.\
format(cname, status, health),
result)
return ret_result
class TerminateEnvironmentOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
}
_output_parameters = {
ParameterName.TerminateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
prompt.action(TerminateEnvironmentOpMessage.Start.format(env_name))
try:
response = eb_client.terminate_environment(env_name)
except:
raise
else:
log.info('Received response for TerminateEnvironemnt call.')
prompt.result(TerminateEnvironmentOpMessage.Succeed.format(env_name))
self._log_api_result(self.__class__.__name__, 'TerminateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.TerminateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
TerminateEnvironmentOpMessage.Succeed,
response.result)
return ret_result
class WaitForTerminateEnvironmentFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.TerminateEnvironmentRequestID,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForFinishTimeout)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay)
terminate_request_id = parameter_pool.get_value(ParameterName.TerminateEnvironmentRequestID)\
if parameter_pool.has(ParameterName.TerminateEnvironmentRequestID) else None
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = terminate_request_id,
pending_status = EnvironmentStatus.Terminating,
expected_health = None,
operation_name = self.__class__.__name__,
action_name = WaitForTerminateEnvironmentFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'true',
initial_delay = ServiceDefault.TERMINATE_ENV_POLL_DELAY)
# After polling
status = result[0].status
health = result[0].health
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.'.format\
(env_name, status, health))
if status.lower() == EnvironmentStatus.Terminated.lower():
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Succeed.format(env_name))
else:
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Timeout.format(env_name))
prompt.result(WaitForTerminateEnvironmentFinishOpMessage.Status.format(status, health))
ret_result = OperationResult(self,
None,
WaitForTerminateEnvironmentFinishOpMessage.Result.format(status),
result)
return ret_result
class UpdateEnvOptionSettingOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.OptionSettingFile,
ParameterName.RdsEnabled,
}
_output_parameters = {
ParameterName.TerminateEnvironmentRequestID,
}
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
app_name = parameter_pool.get_value(ParameterName.ApplicationName)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
stack_name = parameter_pool.get_value(ParameterName.SolutionStack)
prompt.action(UpdateEnvironmentOptionSettingOpMessage.Start.format(env_name))
location = parameter_pool.get_value(ParameterName.OptionSettingFile)
option_settings = config_file.load_env_option_setting_file(location, quiet = True)
if option_settings is not None and len(option_settings) > 0:
prompt.info(UpdateEnvironmentOptionSettingOpMessage.UsingOptionSetting.format(location))
else:
option_settings = []
option_remove = set()
spec = TemplateSpecification()
rds_utils.rds_handler(parameter_pool, spec, stack_name, option_settings, option_remove)
self._option_setting_handler(option_settings, option_remove)
self._validate_change(parameter_pool, eb_client, app_name, env_name,
option_settings, option_remove, spec)
try:
response = eb_client.update_environment(env_name,
option_settings = option_settings,
option_remove = option_remove,
template_specification = spec)
except:
raise
else:
log.info('Received response for UpdateEnvironemnt call.')
prompt.result(UpdateEnvironmentOptionSettingOpMessage.Succeed.format(env_name))
self._log_api_result(self.__class__.__name__, 'UpdateEnvironment', response.result)
parameter_pool.put(Parameter(ParameterName.UpdateEnvironmentRequestID,
response.request_id,
ParameterSource.OperationOutput))
ret_result = OperationResult(self,
response.request_id,
UpdateEnvironmentOptionSettingOpMessage.Succeed.format(env_name),
response.result)
return ret_result
def _validate_change(self, parameter_pool, eb_client, app_name, env_name,
option_settings, option_remove, template_spec):
response = eb_client.validate_configuration_settings(app_name, option_settings,
environment_name = env_name,
option_remove = option_remove,
template_specification = template_spec)
warning_count = 0
error_count = 0
for message in response.result:
if misc.string_equal_ignore_case(message.severity, ValidationSeverity.SeverityError):
error_count = error_count + 1
else:
warning_count = warning_count + 1
prompt.error(ValidationMessage.ValidateSettingError.format\
(message.severity, message.namespace, message.option_name, message.message))
if error_count > 0:
log.info('Validating configuration setting failed. Abort command.')
raise EBSCliException()
elif warning_count > 0:
if parameter_pool.has(ParameterName.Force) \
and parameter_pool.get_value(ParameterName.Force) == ServiceDefault.ENABLED:
pass
elif not TerminalBase.ask_confirmation(UpdateEnvironmentOptionSettingOpMessage.Continue):
log.info('User cancelled command.')
raise EBSCliException()
else:
log.info('Validating configuration setting passed.')
class WaitForUpdateEnvOptionSettingFinishOperation(OperationBase):
_input_parameters = {
ParameterName.AwsAccessKeyId,
ParameterName.AwsSecretAccessKey,
ParameterName.ServiceEndpoint,
ParameterName.EnvironmentName,
ParameterName.WaitForFinishTimeout,
ParameterName.PollDelay,
}
_output_parameters = set()
def execute(self, parameter_pool):
eb_client = self._get_eb_client(parameter_pool)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
wait_timeout = parameter_pool.get_value(ParameterName.WaitForUpdateTimeout)
poll_delay = parameter_pool.get_value(ParameterName.PollDelay)
# update_request_id = parameter_pool.get_value(ParameterName.UpdateEnvironmentRequestID)\
# if parameter_pool.has(ParameterName.UpdateEnvironmentRequestID) else None
result = self._wait_for_env_operation_finish(
eb_client = eb_client,
env_name = env_name,
original_request_id = None,
pending_status = EnvironmentStatus.Updating,
expected_health = EnvironmentHealth.Green,
operation_name = self.__class__.__name__,
action_name = WaitForUpdateEnvOptionSettingFinishOpMessage.Action,
wait_timeout = wait_timeout,
poll_delay = poll_delay,
include_deleted = 'false',
initial_delay = ServiceDefault.UPDATE_ENV_POLL_DELAY)
# After polling
status = result[0].status
health = result[0].health
cname = result[0].cname
log.info('Stopped polling. Environment "{0}" is now {1}, health is {2}.\nURL is "{3}".'.\
format(env_name, status, health, cname))
if status.lower() == EnvironmentStatus.Ready.lower() \
and health.lower() == EnvironmentHealth.Green.lower():
prompt.result(WaitForUpdateEnvOptionSettingFinishOpMessage.Succeed.format(env_name))
else:
prompt.result(WaitForUpdateEnvOptionSettingFinishOpMessage.Timeout.format(env_name))
prompt.info(WaitForUpdateEnvOptionSettingFinishOpMessage.Result.\
format(cname, status, health))
ret_result = OperationResult(self,
None,
WaitForUpdateEnvOptionSettingFinishOpMessage.Result.\
format(cname, status, health),
result)
return ret_result
|
{
"content_hash": "dab3cfc4e0ccddb57af776a677ba68f4",
"timestamp": "",
"source": "github",
"line_count": 499,
"max_line_length": 134,
"avg_line_length": 50.565130260521045,
"alnum_prop": 0.540068167406468,
"repo_name": "JoaoVasques/aws-devtool",
"id": "8ffb18ea535aa4c649847d645c2815ba22a76d2a",
"size": "25965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eb/linux/python3/scli/operation/environment_operations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "31565"
},
{
"name": "Python",
"bytes": "6266200"
},
{
"name": "Ruby",
"bytes": "159495"
},
{
"name": "Shell",
"bytes": "1895"
}
],
"symlink_target": ""
}
|
import asyncio
import aiobotocore
from datetime import datetime
import io
import umsgpack
_records = []
_max_qlen = 0
_ev_flush = None
def init(loop, opts):
global _max_qlen, _ev_flush
assert opts['codec'] in ('msgpack', 'text')
ev = asyncio.Event(loop=loop)
_ev_flush = ev
interval = opts.get('flush_interval', 30)
if interval > 0:
asyncio.ensure_future(s3_flush_timer(loop, interval, ev), loop=loop)
_max_qlen = opts.get('max_queue_length', 0)
asyncio.ensure_future(s3_flusher(loop, opts, ev), loop=loop)
async def s3_flush_timer(loop, interval, ev):
global _records, _max_qlen
while True:
await asyncio.sleep(interval)
if len(_records) > 0:
ev.set()
async def s3_flusher(loop, opts, ev):
global _records
buffer = io.BytesIO()
part_count = 1
while True:
await ev.wait()
ev.clear()
print('s3: flushing {} entries...'.format(len(_records)))
if opts['codec'] == 'msgpack':
packer = umsgpack.Packer()
for rec in _records:
buffer.write(packer.pack(rec.data))
elif opts['codec'] == 'text':
for rec in _records:
print(str(rec).encode('utf8'), file=buffer)
_records.clear() # must be cleared before any await
session = aiobotocore.get_session(loop=loop)
client = session.create_client('s3', region_name=opts['region'],
aws_secret_access_key=opts['secret_key'],
aws_access_key_id=opts['access_key'])
now = datetime.now()
ts_month = now.strftime('%Y-%m')
ts_monthday = now.strftime('%Y-%m-%d')
ts_time = now.strftime('%Y-%m-%dT%H.%M.%S')
key = '{}/{}/{}/{}.part{}.msgpack'.format(opts['key_prefix'],
ts_month, ts_monthday, ts_time,
part_count)
resp = await client.put_object(Bucket=opts['bucket'],
Key=key,
Body=buffer.getvalue(),
ACL='private')
buffer.seek(0, io.SEEK_SET)
buffer.truncate(0)
part_count += 1
def enqueue(records):
global _records, _max_qlen
_records.extend(records)
if _max_qlen == 0 or (_max_qlen > 0 and len(_records) >= _max_qlen):
_ev_flush.set()
|
{
"content_hash": "1c13660b3878fa8ea6bd8428ddca400b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 81,
"avg_line_length": 36.4264705882353,
"alnum_prop": 0.5248284214775939,
"repo_name": "lablup/logger",
"id": "76bf9f2d093ccbcac605eec24b892b6db4118015",
"size": "2502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logger/outputs/s3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8771"
},
{
"name": "Shell",
"bytes": "705"
}
],
"symlink_target": ""
}
|
import random
import gym
import Traci_1_cross_env.Traci_1_cross_env
import numpy as np
from collections import deque
from sklearn.linear_model import SGDRegressor
from baselines import logger, logger_utils
class LinearQFunction:
def __init__(self, gamma, n_actions):
# Hyperparameters
self.gamma = gamma
# Get a model for each action to take, based upon schema (feature vector)
self.schema = ["right_cars_waiting", "left_cars_waiting", "top_cars_waiting", "bottom_cars_waiting",
"traffic_light_status"]
self.n_actions = n_actions
self.models = [SGDRegressor(loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, max_iter=None, tol=None,
shuffle=True, verbose=0, epsilon=0.1,
random_state=None, learning_rate="invscaling", eta0=0.001,
power_t=0.25, warm_start=False, average=False, n_iter=None) for _ in
range(self.n_actions)]
base_x = [[1 for _ in self.schema]]
base_y = [0]
for model in self.models:
model.fit(base_x, base_y)
def get_q(self, features):
features = np.array(features)
q = [model.predict([features]) for model in self.models]
return q
def get_best_q(self, features):
q = np.array(self.get_q(features))
#print("q", q)
return int(np.random.choice(np.where(q == q.max())[0])), np.max(q)
def train(self, features, actions, rewards, new_features):
examples = list(zip(features, actions, rewards, new_features))
targets = [
r + self.gamma * self.get_best_q(new_f)[1]
for f, a, r, new_f in examples
]
examples = list(zip(features, actions, targets))
for action in range(self.n_actions):
x, y = [], []
for f, a, t in examples:
if a == action:
x.append(f)
y.append(t)
if len(x) > 0:
x = np.array(x).astype(np.float)
y = np.array(y).astype(np.float)
self.models[action].partial_fit(x, y)
env = gym.make('Traci_1_cross_env-v0')
print("made gym")
print_timestep_freq = 100
logger.reset()
logger_path = logger_utils.path_with_date("/tmp/Traci_1_cross_env-v0", "Traci_1_cross_env-v0")
logger.configure(logger_path, ["tensorboard", "stdout"])
gamma = 0.99
n_actions = env.action_space.n
n_episodes = 1000000
epsilon = 0.4
epsilon_decay = 0.9999
print_every = 500
train_every = 100
mini_batch_size = 32
max_batch_size = mini_batch_size*100
batch = deque(maxlen=max_batch_size)
qf = LinearQFunction(gamma=gamma, n_actions=n_actions)
s = env.reset()
for episode in range(1, n_episodes):
a = np.random.randint(0, n_actions) if np.random.rand() < epsilon else qf.get_best_q(s)[0]
sn, r, done, info = env.step(a)
batch.append((s, a, r, sn))
if episode % train_every == 0:
mini_batch = random.sample(batch, mini_batch_size)
batch_unpacked = list(zip(*mini_batch))
qf.train(*batch_unpacked)
s = np.copy(sn)
epsilon *= epsilon_decay
if done:
env.reset()
if episode % print_every == 0:
print("action taken {}".format(a))
print("epsilon {}".format(epsilon))
print("state {}".format(s))
print("reward", r)
|
{
"content_hash": "b302e8af57d9005293efb5e4d4c433d2",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 108,
"avg_line_length": 34.1078431372549,
"alnum_prop": 0.576889910893935,
"repo_name": "peheje/baselines",
"id": "0b387b725fb92b7b1ad4bda89dc88785c728800b",
"size": "3545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traci/agents/train_traci_linear_sgd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "541110"
}
],
"symlink_target": ""
}
|
import sys
import os
import glob
import heapq
import codecs
import cPickle; BINARY=1
import stemmer; _stemmer=stemmer
from math import log, sqrt
from time import time
from random import random, choice
from itertools import izip, chain
from bisect import insort
from StringIO import StringIO
from codecs import open
try:
MODULE = os.path.dirname(__file__)
except:
MODULE = ""
try: from pattern.en.inflect import singularize, conjugate
except:
try:
import sys; sys.path.insert(0, os.path.join(MODULE, ".."))
from en.inflect import singularize, conjugate
except:
singularize = lambda w: w
conjugate = lambda w,t: w
#--- STRING FUNCTIONS ------------------------------------------------------------------------------
def decode_utf8(string):
""" Returns the given string as a unicode string (if possible).
"""
if isinstance(string, str):
for encoding in (("utf-8",), ("windows-1252",), ("utf-8", "ignore")):
try:
return string.decode(*encoding)
except:
pass
return string
return unicode(string)
def encode_utf8(string):
""" Returns the given string as a Python byte string (if possible).
"""
if isinstance(string, unicode):
try:
return string.encode("utf-8")
except:
return string
return str(string)
def lreplace(a, b, string):
""" Replaces the head of the string.
"""
if string.startswith(a):
return b + string[len(a):]
return string
def rreplace(a, b, string):
""" Replaces the tail of the string.
"""
if string.endswith(a):
return string[:len(string)-len(a)] + b
return string
def filename(path, map={"_":" "}):
""" Returns the basename of the file at the given path, without the extension.
For example: /users/tom/desktop/corpus/aesthetics.txt => aesthetics.
"""
f = os.path.splitext(os.path.basename(path))[0]
for k in map:
f = f.replace(k, map[k])
return f
def shi(i, base="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"):
""" Returns a short string hash for a given int.
"""
s = []
while i > 0:
i, r = divmod(i, len(base))
s.append(base[r])
return "".join(reversed(s))
#--- LIST FUNCTIONS --------------------------------------------------------------------------------
def shuffled(list):
""" Yields a copy of the given list with the items in random order.
"""
return sorted(list, key=lambda x: random())
def chunk(list, n):
""" Yields n successive equal-sized chunks from the given list.
"""
i = 0
for m in xrange(n):
j = i + len(list[m::n])
yield list[i:j]
i=j
#--- READ-ONLY DICTIONARY --------------------------------------------------------------------------
class ReadOnlyError(Exception):
pass
# Read-only dictionary, used for Document.terms and Document.vector.
# These can't be updated because it invalidates the cache.
class readonlydict(dict):
@classmethod
def fromkeys(cls, k, default=None):
d=readonlydict((k, default) for k in k); return d
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def __setitem__(self, k, v):
raise ReadOnlyError
def __delitem__(self, k):
raise ReadOnlyError
def pop(self, k, default=None):
raise ReadOnlyError
def popitem(self, kv):
raise ReadOnlyError
def clear(self):
raise ReadOnlyError
def update(self, kv):
raise ReadOnlyError
def setdefault(self, k, default=None):
if k in self:
return self[k]
raise ReadOnlyError
# Read-only list, used for Corpus.documents.
class readonlylist(list):
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
def __setitem__(self, i, v):
raise ReadOnlyError
def __delitem__(self, i):
raise ReadOnlyError
def append(self, v):
raise ReadOnlyError
def insert(self, i, v):
raise ReadOnlyError
def extend(self, v):
raise ReadOnlyError
def remove(self, v):
raise ReadOnlyError
def pop(self, i):
raise ReadOnlyError
#### DOCUMENT ######################################################################################
#--- STOP WORDS ------------------------------------------------------------------------------------
stopwords = _stopwords = dict.fromkeys(
open(os.path.join(MODULE, "stopwords.txt")).read().split(", "), True)
# The following words could also be meaningful nouns:
#for w in ["mine", "us", "will", "can", "may", "might"]:
# stopwords.pop(w)
#--- WORD COUNT ------------------------------------------------------------------------------------
PUNCTUATION = "*#[]():;,.!?\n\r\t\f- "
def words(string, filter=lambda w: w.isalpha() and len(w)>1, punctuation=PUNCTUATION, **kwargs):
""" Returns a list of words from the given string.
Common punctuation marks are stripped from words.
"""
if isinstance(string, unicode):
string = string.replace(u"’", u"'")
words = string.replace("\n", "\n ")
words = (rreplace("'s", "", w.strip(punctuation)) for w in words.split(" "))
words = [w for w in words if filter is None or filter(w) is not False]
return words
PORTER, LEMMA = "porter", "lemma"
def stem(word, stemmer=PORTER, **kwargs):
""" Returns the base form of the word when counting words in count().
With stemmer=PORTER, the Porter2 stemming algorithm is used.
With stemmer=LEMMA, either uses Word.lemma or inflect.singularize().
"""
if isinstance(word, basestring):
word = decode_utf8(word.lower())
if stemmer is None:
return word
if stemmer == PORTER:
return _stemmer.stem(word, **kwargs)
if stemmer == LEMMA:
if word.__class__.__name__ == "Word":
if word.lemma is not None:
return word.lemma
if word.pos == "NNS":
return singularize(word.string.lower())
if word.pos.startswith("VB"):
return conjugate(word.string.lower(), "infinitive") or word
return singularize(word)
if type(stemmer).__name__ == "function":
return decode_utf8(stemmer(word))
return word
def count(words=[], top=None, threshold=0, stemmer=None, exclude=[], stopwords=False, **kwargs):
""" Returns a dictionary of (word, count)-items, in lowercase.
Words in the exclude list and stop words are not counted.
Words whose count falls below (or equals) the given threshold are excluded.
Words that are not in the given top most counted are excluded.
"""
# An optional dict-parameter can be used to specify a subclass of dict,
# e.g., count(words, dict=readonlydict) as used in Document.
count = kwargs.get("dict", dict)()
for w in words:
if w.__class__.__name__ == "Word":
w = w.string.lower()
if isinstance(w, basestring):
w = w.lower()
if (stopwords or not w in _stopwords) and not w in exclude:
if stemmer is not None:
w = stem(w, stemmer, **kwargs)
dict.__setitem__(count, w, (w in count) and count[w]+1 or 1)
for k in count.keys():
if count[k] <= threshold:
dict.__delitem__(count, k)
if top is not None:
count = count.__class__(heapq.nsmallest(top, count.iteritems(), key=lambda (k,v): (-v,k)))
return count
#--- DOCUMENT --------------------------------------------------------------------------------------
# Document is a bag of words in which each word is a feature.
# Document is represented as a vector of weighted (TF-IDF) features.
__UID = 0
__SESSION = shi(int(time()*1000)) # Avoids collision with pickled documents.
def _uid():
""" Returns a string id, for example: "NPIJYaS-1", "NPIJYaS-2", ...
The string part is based on the current time, the number suffix is auto-incremental.
"""
global __UID; __UID+=1; return __SESSION+"-"+str(__UID)
# Term relevancy weight:
TF, TFIDF, TF_IDF = "tf", "tf-idf", "tf-idf"
class Document(object):
# Document(string="", filter, punctuation, top, threshold, stemmer, exclude, stopwords, name, type)
def __init__(self, string="", **kwargs):
""" A dictionary of (word, count)-items parsed from the string.
Punctuation marks are stripped from the words.
Stop words in the exclude list are excluded from the document.
Only words whose count exceeds the threshold and who are in the top are included in the document.
"""
kwargs.setdefault("filter", lambda w: w.isalpha() and len(w)>1)
kwargs.setdefault("threshold", 0)
kwargs.setdefault("dict", readonlydict)
# A string of words, map to read-only dict of (word, count)-items.
if isinstance(string, basestring):
w = words(string, **kwargs)
w = count(w, **kwargs)
v = None
# A list of words, map to read-only dict of (word, count)-items.
elif isinstance(string, (list, tuple)) and not string.__class__.__name__ == "Text":
w = string
w = count(w, **kwargs)
v = None
# A Vector of (word, TF weight)-items, copy as document vector.
elif isinstance(string, Vector) and string.weight == TF:
w = string
w = kwargs["dict"](w)
v = Vector(w)
# A Vector of (word, TF-IDF weight)-items, copy as document vector.
elif isinstance(string, Vector) and string.weight == TF_IDF:
w = string
w = kwargs["dict"](w) # XXX term count is lost.
v = Vector(w)
# A dict of (word, count)-items, make read-only.
elif isinstance(string, dict):
w = string
w = kwargs["dict"](w)
v = None
# pattern.en.Sentence with Word objects, can use stemmer=LEMMA.
elif string.__class__.__name__ == "Sentence":
w = string.words
w = [w for w in w if kwargs["filter"](w.string)]
w = count(w, **kwargs)
v = None
# pattern.en.Text with Sentence objects, can use stemmer=LEMMA.
elif string.__class__.__name__ == "Text":
w = []; [w.extend(sentence.words) for sentence in string]
w = [w for w in w if kwargs["filter"](w.string)]
w = count(w, **kwargs)
v = None
else:
raise TypeError, "document string is not str, unicode, list, Vector, Sentence or Text."
self._id = _uid() # Document ID, used when comparing objects.
self._name = kwargs.get("name") # Name that describes the document content.
self._type = kwargs.get("type") # Type that describes the category or class of the document.
self._terms = w # Dictionary of (word, count)-items.
self._vector = v # Cached tf-idf vector.
self._count = None # Total number of words (minus stop words).
self._corpus = None # Corpus this document belongs to.
@classmethod
def open(cls, path, *args, **kwargs):
""" Creates and returns a new document from the given text file path.
"""
s = codecs.open(path, encoding=kwargs.get("encoding", "utf-8")).read()
return cls(s, *args, **kwargs)
@classmethod
def load(cls, path):
""" Returns a new Document from the given text file path.
The given text file must be generated with Document.save().
"""
# Open unicode file.
s = open(path, "rb").read()
s = s.lstrip(codecs.BOM_UTF8)
s = decode_utf8(s)
a = {}
v = {}
# Parse document name and type.
# Parse document terms and frequency.
for s in s.splitlines():
if s.startswith("#"): # comment
pass
elif s.startswith("@name:"):
a["name"] = s[len("@name:")+1:].replace("\\n", "\n")
elif s.startswith("@type:"):
a["type"] = s[len("@type:")+1:].replace("\\n", "\n")
else:
s = s.split(" ")
w, f = " ".join(s[:-1]), s[-1]
if f.isdigit():
v[w] = int(f)
else:
v[w] = float(f)
return cls(v, name=a.get("name"), type=a.get("type"))
def save(self, path):
""" Saves the terms in the document as a text file at the given path.
The advantage is that terms no longer need to be filtered or stemmed in Document.load().
"""
s = []
# Parse document name and type.
for k, v in (("@name:", self.name), ("@type:", self.type)):
if v is not None:
s.append("%s %s" % (k, v.replace("\n", "\\n")))
# Parse document terms and frequency.
for w, f in sorted(self.terms.items()):
if isinstance(f, int):
s.append("%s %i" % (w, f))
if isinstance(f, float):
s.append("%s %.3f" % (w, f))
s = "\n".join(s)
s = encode_utf8(s)
# Save unicode file.
f = open(path, "wb")
f.write(codecs.BOM_UTF8)
f.write(s)
f.close()
def _get_corpus(self):
return self._corpus
def _set_corpus(self, corpus):
self._vector = None
self._corpus and self._corpus._update()
self._corpus = corpus
self._corpus and self._corpus._update()
corpus = property(_get_corpus, _set_corpus)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def terms(self):
return self._terms
words = terms
@property
def features(self):
return self._terms.keys()
@property
def count(self):
# Yields the number of words (excluding stop words) in the document.
# Cache the word count so we can reuse it when calculating tf.
if not self._count: self._count = sum(self.terms.values())
return self._count
def __len__(self):
return len(self.terms)
def __iter__(self):
return iter(self.terms)
def __contains__(self, word):
return word in self.terms
def __getitem__(self, word):
return self.terms.__getitem__(word)
def get(self, word, default=None):
return self.terms.get(word, default)
def term_frequency(self, word):
""" Returns the term frequency of a word in the document.
tf = number of occurences of the word / number of words in document.
The more occurences of the word, the higher its tf weight.
"""
return float(self.terms.get(word, 0)) / (self.count or 1)
tf = term_frequency
def term_frequency_inverse_document_frequency(self, word, weight=TFIDF):
""" Returns the word relevancy as tf*idf.
The relevancy is a measure of how frequent the word occurs in the document,
compared to its frequency in other documents in the corpus.
If the document is not part of a corpus, returns tf weight.
"""
w = self.tf(word)
if weight == TFIDF:
# Use tf if no corpus, or idf==None (happens when the word is not in the corpus).
w *= self.corpus and self.corpus.idf(word) or 1
return w
tf_idf = tfidf = term_frequency_inverse_document_frequency
@property
def vector(self):
""" Yields a dictionary of (word, relevancy)-items from the document, based on tf-idf.
"""
if not self._vector:
# See the Vector class below = a dict with extra functionality (copy, norm).
# Corpus.weight (TFIDF or TF) determines how the weights will be calculated.
# When a document is added/deleted from a corpus, the cached vector is deleted.
if getattr(self.corpus, "weight", TF) == TFIDF:
w, f = TFIDF, self.tf_idf
else:
w, f = TF, self.tf
self._vector = Vector(((w, f(w)) for w in self.terms), weight=w)
return self._vector
def keywords(self, top=10, normalized=True):
""" Returns a sorted list of (relevancy, word)-tuples that are top keywords in the document.
With normalized=True, weights are normalized between 0.0 and 1.0 (their sum will be 1.0).
"""
n = normalized and sum(self.vector.itervalues()) or 1.0
v = ((f/n, w) for w, f in self.vector.iteritems())
v = heapq.nsmallest(top, v, key=lambda v: (-v[0], v[1]))
return v
def cosine_similarity(self, document):
""" Returns the similarity between the two documents as a number between 0.0-1.0.
If both documents are in the same corpus the calculations are cached for reuse.
"""
if self.corpus:
return self.corpus.cosine_similarity(self, document)
if document.corpus:
return document.corpus.cosine_similarity(self, document)
# Merge terms to ensure that the values are in the same order:
W = set(self.terms.keys()) | set(document.terms.keys())
v1 = [self.terms.get(w, 0) for w in W]
v2 = [document.terms.get(w, 0) for w in W]
return cosine_similarity(v1, v2)
similarity = cosine_similarity
def copy(self):
d = Document(name=self.name, type=self.type); dict.update(d.terms, self.terms); return d
def __eq__(self, document):
return isinstance(document, Document) and self.id == document.id
def __ne__(self, document):
return not self.__eq__(document)
def __repr__(self):
return "Document(id=%s%s)" % (
repr(self._id), self.name and ", name=%s" % repr(self.name) or "")
#--- VECTOR ----------------------------------------------------------------------------------------
# Document vector, using a sparse representation (i.e., dictionary with only features > 0).
# Sparse representation is fast, usually even faster than LSA,
# since LSA creates a dense vector with non-zero values.
# Average feature length:
# sum(len(d.vector) for d in corpus.documents) / float(len(corpus))
class Vector(readonlydict):
id = 1
def __init__(self, *args, **kwargs):
""" Vector is a dictionary of (word, weight)-items based on the terms in a Document.
"""
self.id = Vector.id; Vector.id+=1
self.weight = kwargs.pop("weight", TFIDF) # Vector weights based on tf or tf-idf?
self._norm = None
readonlydict.__init__(self, *args, **kwargs)
@property
def features(self):
return self.keys()
@property
def l2_norm(self):
""" Yields the Frobenius matrix norm.
n = the square root of the sum of the absolute squares of the values.
The matrix norm is used to normalize (0.0-1.0) cosine similarity between documents.
"""
if not self._norm: self._norm = l2_norm(self.itervalues())
return self._norm
norm = frobenius_norm = l2_norm
def copy(self):
return Vector(self, weight=self.weight)
def __call__(self, vector={}):
if isinstance(vector, (Document, Corpus)):
vector = vector.vector
# Return a copy of the vector, updated with values from the other vector.
# Only keys that appear in this vector will be updated (i.e. no new keys are added).
V = self.copy(); dict.update(V, ((k, v) for k, v in vector.iteritems() if k in V)); return V
# These functions are useful if you work with a bare matrix instead of Document and Corpus.
# Given vectors must be lists of values (not iterators).
def tf_idf(vectors):
idf = []
for i in range(len(vectors[0])):
idf.append(log(len(vectors) / (sum(1.0 for v in vectors if v[i]) or 1)))
return [[v[i] * idf[i] for i in range(len(v))] for v in vectors]
def cosine_similarity(vector1, vector2):
return sum(a*b for a, b in izip(vector1, vector2)) / (l2_norm(vector1) * l2_norm(vector2) or 1)
def l2_norm(vector):
return sum(x**2 for x in vector) ** 0.5
def entropy(vector):
s = float(sum(vector)) or 1
return -sum(x/s * log(x/s, 2) for x in vector if x != 0)
#### CORPUS ########################################################################################
#--- CORPUS ----------------------------------------------------------------------------------------
# Export formats:
ORANGE, WEKA = "orange", "weka"
# LSA reduction methods:
NORM, TOP300 = "norm", "top300"
# Feature selection methods:
IG = INFOGAIN = "infogain"
KLD = "kullback-leibler"
# Clustering methods:
KMEANS, HIERARCHICAL, ALL = "k-means", "hierarchical", "all"
class Corpus(object):
def __init__(self, documents=[], weight=TFIDF):
""" A corpus is a collection of documents,
where each document is a bag of (word, count)-items.
Documents can be compared for similarity.
"""
self.description = "" # Description of the dataset, author e-mail, etc.
self._documents = readonlylist() # List of documents (read-only).
self._index = {} # Document.name => Document
self._df = {} # Cache of document frequency per word.
self._similarity = {} # Cache of ((D1.id,D2.id), weight)-items (cosine similarity).
self._divergence = {} # Cache of Kullback-leibler divergence per (word1, word2).
self._ig = {} # Cache of (word, information gain)-items.
self._vector = None # Cache of corpus vector with all the words in the corpus.
self._lsa = None # LSA matrix with reduced dimensionality.
self._weight = weight # Weight used in Document.vector (TF-IDF or TF).
self._update()
self.extend(documents)
@property
def documents(self):
return self._documents
@property
def terms(self):
return self.vector.keys()
features = words = terms
@property
def classes(self):
return list(set(d.type for d in self.documents))
def _get_lsa(self):
return self._lsa
def _set_lsa(self, v=None):
self._lsa = v
self._update()
lsa = property(_get_lsa, _set_lsa)
def _get_weight(self):
return self._weight
def _set_weight(self, w):
self._weight = w
self._update() # Clear the cache.
weight = property(_get_weight, _set_weight)
@classmethod
def build(cls, path, *args, **kwargs):
""" Builds the corpus from a folder of text documents (e.g. path="folder/*.txt").
Each file is split into words and the words are counted.
"""
name = kwargs.pop("name", lambda path: None)
documents = []
for f in glob.glob(path):
documents.append(Document.open(f, *args, **kwargs))
documents[-1]._name = name(f)
return cls(documents)
@classmethod
def load(cls, path):
""" Loads the corpus from a pickle file created with Corpus.save().
"""
return cPickle.load(open(path))
def save(self, path, update=False):
""" Saves the corpus as a pickle file at the given path.
It can be loaded with Corpus.load().
This is faster because the words in the documents do not need to be stemmed again,
and cached vectors and similarities are stored
"""
if update:
for d1 in self.documents:
for d2 in self.documents:
self.cosine_similarity(d1, d2) # Update the entire cache before saving.
m = dict.fromkeys((d.id for d in self.documents), True)
for id1, id2 in self._similarity.keys():
if id1 not in m \
or id2 not in m:
self._similarity.pop((id1, id2)) # Remove Corpus.search() query cache.
cPickle.dump(self, open(path, "w"), BINARY)
def export(self, path, format=ORANGE, **kwargs):
""" Exports the corpus as a file for other machine learning applications,
e.g., Orange or Weka which both have a GUI and are faster.
"""
# Note: the Document.vector space is exported without cache or LSA concept space.
keys = sorted(self.vector.keys())
s = []
# Orange tab format:
if format == ORANGE:
s.append("\t".join(keys + ["m#name", "c#type"]))
for document in self.documents:
v = document.vector
v = [v.get(k, 0) for k in keys]
v = "\t".join(x==0 and "0" or "%.4f" % x for x in v)
v = "%s\t%s\t%s" % (v, document.name or "", document.type or "")
s.append(v)
# Weka ARFF format:
if format == WEKA:
s.append("@RELATION %s" % kwargs.get("name", hash(self)))
s.append("\n".join("@ATTRIBUTE %s NUMERIC" % k for k in keys))
s.append("@ATTRIBUTE class {%s}" % ",".join(set(d.type or "" for d in self.documents)))
s.append("@DATA")
for document in self.documents:
v = document.vector
v = [v.get(k, 0) for k in keys]
v = ",".join(x==0 and "0" or "%.4f" % x for x in v)
v = "%s,%s" % (v, document.type or "")
s.append(v)
s = "\n".join(s)
f = open(path, "w", encoding="utf-8")
f.write(decode_utf8(s))
f.close()
def _update(self):
# Ensures that all document relevancy vectors are recalculated
# when a document is added or deleted in the corpus (= new words or less words).
self._df = {}
self._similarity = {}
self._divergence = {}
self._ig = {}
self._vector = None
self._lsa = None
for document in self.documents:
document._vector = None
def __len__(self):
return len(self.documents)
def __iter__(self):
return iter(self.documents)
def __getitem__(self, i):
return self.documents.__getitem__(i)
def __delitem__(self, i):
d = list.pop(self.documents, i)
d._corpus = None
self._index.pop(d.name, None)
self._update()
def clear(self):
self._documents = readonlylist()
self._update()
def append(self, document):
""" Appends the given Document to the corpus, setting the corpus as its parent.
The corpus is updated, meaning that the cache of vectors and similarities is cleared
(relevancy and similarity weights will be different now that there is a new document).
"""
if not isinstance(document, Document):
raise TypeError, "Corpus.append() expects a Document."
document._corpus = self
if document.name is not None:
self._index[document.name] = document
list.append(self.documents, document)
self._update()
def extend(self, documents):
""" Extends the corpus with the given list of documents.
Clears the cache of vectors and similarities.
"""
for document in documents:
document._corpus = self
if document.name is not None:
self._index[document.name] = document
list.extend(self.documents, documents)
self._update()
def remove(self, document):
""" Removes the given Document from the corpus (sets Document.corpus=None).
"""
self.__delitem__(self.documents.index(document))
def document(self, name):
""" Returns the Document with the given name.
"""
# This assumes document names are unique.
if name in self._index:
return self._index[name]
def document_frequency(self, word):
""" Returns the document frequency of a word.
Returns 0 if there are no documents in the corpus (e.g. no word frequency).
df = number of documents containing the word / number of documents.
The more occurences of the word across the corpus, the higher its df weight.
"""
if len(self.documents) == 0:
return 0
if len(self._df) == 0:
# Caching document frequency for each word gives a 300x performance boost
# (calculate all of them once). Drawback: if you need TF-IDF for just one document.
for d in self.documents:
for w in d.terms:
self._df[w] = (w in self._df) and self._df[w]+1 or 1
for w in self._df:
self._df[w] /= float(len(self.documents))
return self._df.get(word, 0.0)
df = document_frequency
def inverse_document_frequency(self, word):
""" Returns the inverse document frequency of a word.
Returns None if the word is not in the corpus, or if there are no documents in the corpus.
Using the natural logarithm:
idf = log(1/df)
The more occurences of the word, the lower its idf weight (log() makes it grow slowly).
"""
df = self.df(word)
return df != 0 and log(1.0/df) or None
idf = inverse_document_frequency
@property
def vector(self):
""" Returns a dictionary of (word, 0)-items from the corpus.
It includes all words from all documents (i.e. it is the dimension of the vector space).
If a document is given, sets the document word relevancy values in the vector.
"""
# Note:
# - Corpus.vector is the dictionary of all (word, 0)-items.
# - Corpus.vector(document) returns a copy with the document's word relevancy values in it.
# - This is the full document vector, opposed to the sparse Document.vector.
# Words in a document that are not in the corpus vector are ignored
# (e.g. the document was not in the corpus, this can be the case in Corpus.search() for example).
# See Vector.__call__() why this is possible.
if not self._vector:
self._vector = Vector((w, 0) for w in chain(*(d.terms for d in self.documents)))
return self._vector
@property
def vectors(self):
""" Yields a list of all document vectors.
"""
return [d.vector for d in self.documents]
@property
def density(self):
""" Yields the overall word coverage as a number between 0.0-1.0.
"""
return float(sum(len(d.vector) for d in self.documents)) / len(self.vector)**2
# Following methods rely on Document.vector:
# frequent sets, cosine similarity, nearest neighbors, search, clustering,
# latent semantic analysis, divergence.
def frequent_concept_sets(self, threshold=0.5):
""" Returns a dictionary of (set(words), frequency)
of word combinations with a frequency above the given threshold.
"""
return apriori([d.terms for d in self.documents], support=threshold)
sets = frequent = frequent_concept_sets
def cosine_similarity(self, document1, document2):
""" Returns the similarity between two documents in the corpus as a number between 0.0-1.0.
The weight is based on the document relevancy vectors (i.e. tf-idf of words in the text).
cos = dot(v1,v2) / (norm(v1) * norm(v2))
"""
# If we already calculated the similarity between the given documents,
# it is available in cache for reuse.
id1 = document1.id
id2 = document2.id
if (id1, id2) in self._similarity: return self._similarity[(id1, id2)]
if (id2, id1) in self._similarity: return self._similarity[(id2, id1)]
# Calculate the matrix multiplication of the document vectors.
#v1 = self.vector(document1)
#v2 = self.vector(document2)
#s = cosine_similarity(v1.itervalues(), v2.itervalues()) / (v1.norm * v2.norm or 1)
if not getattr(self, "lsa", None):
# This is exponentially faster for sparse vectors:
v1 = document1.vector
v2 = document2.vector
s = sum(v1.get(w, 0) * f for w, f in v2.iteritems()) / (v1.norm * v2.norm or 1)
else:
# Using LSA concept space:
v1 = id1 in self.lsa and self.lsa[id1] or self._lsa.transform(document1)
v2 = id2 in self.lsa and self.lsa[id2] or self._lsa.transform(document2)
s = sum(a*b for a, b in izip(v1.itervalues(), v2.itervalues())) / (v1.norm * v2.norm or 1)
# Cache the similarity weight for reuse.
self._similarity[(id1, id2)] = s
return s
similarity = cosine_similarity
def nearest_neighbors(self, document, top=10):
""" Returns a list of (weight, document)-tuples in the corpus,
sorted by similarity to the given document.
"""
v = ((self.cosine_similarity(document, d), d) for d in self.documents)
# Filter the input document from the matches.
# Filter documents that scored 0 and return the top.
v = [(w, d) for w, d in v if w > 0 and d.id != document.id]
v = heapq.nsmallest(top, v, key=lambda v: (-v[0],v[1]))
return v
similar = related = neighbors = nn = nearest_neighbors
def vector_space_search(self, words=[], **kwargs):
""" Returns related documents from the corpus, as a list of (weight, document)-tuples.
The given words can be a string (one word), a list or tuple of words, or a Document.
"""
top = kwargs.pop("top", 10)
if not isinstance(words, (list, tuple, Document)):
words = [words]
if not isinstance(words, Document):
kwargs.setdefault("threshold", 0) # Same stemmer as other documents should be given.
words = Document(" ".join(words), **kwargs)
words._corpus = self # So we can calculate tf-idf.
# Documents that are not in the corpus consisting only of words that are not in the corpus
# have no related documents in the corpus.
if len([True for w in words if w in self.vector]) == 0:
return []
return self.nearest_neighbors(words, top)
search = vector_space_search
def distance(self, document1, document2, *args, **kwargs):
""" Returns the distance (COSINE, EUCLIDEAN, ...) between two document vectors (0.0-1.0).
"""
return distance(document1.vector, document2.vector, *args, **kwargs)
def cluster(self, documents=ALL, method=KMEANS, **kwargs):
""" Clustering is an unsupervised machine learning method for grouping similar documents.
For k-means clustering, returns a list of k clusters (each is a list of documents).
For hierarchical clustering, returns a list of documents and Cluster objects.
A Cluster is a list of documents and other clusters, with a Cluster.flatten() method.
"""
if documents == ALL:
documents = self.documents
if not getattr(self, "lsa", None):
# Using document vectors:
vectors, keys = [d.vector for d in documents], self.vector.keys()
else:
# Using LSA concept space:
vectors, keys = [self.lsa[d.id] for d in documents], range(len(self.lsa))
# Create a dictionary of vector.id => Document.
# We'll need it to map the clustered vectors back to the actual documents.
map = dict((v.id, documents[i]) for i, v in enumerate(vectors))
kw = kwargs.pop
if method in (KMEANS, "kmeans"):
# def cluster(self, method=KMEANS, k=10, iterations=10)
clusters = k_means(vectors, kw("k", 10), kw("iterations", 10), keys=keys, **kwargs)
clusters = [[map[v.id] for v in cluster] for cluster in clusters]
if method == HIERARCHICAL:
# def cluster(self, method=HIERARCHICAL, k=1, iterations=1000)
clusters = hierarchical(vectors, kw("k", 1), kw("iterations", 1000), keys=keys, **kwargs)
clusters.traverse(visit=lambda cluster: \
[cluster.__setitem__(i, map[v.id]) for i, v in enumerate(cluster) if not isinstance(v, Cluster)])
return clusters
def latent_semantic_analysis(self, dimensions=NORM):
""" Creates LSA concept vectors by reducing dimensions.
The concept vectors are then used in Corpus.cosine_similarity() and Corpus.cluster().
The reduction can be undone by setting Corpus.lsa=False.
"""
self._lsa = LSA(self, k=dimensions)
self._similarity = {}
reduce = latent_semantic_analysis
def information_gain(self, word):
""" Returns the information gain for the given feature,
by looking at how much it contributes to each document type (class).
High IG means low entropy (predictability), e.g., interesting for feature selection.
"""
if not self._ig:
# Based on Vincent Van Asch, http://www.clips.ua.ac.be/~vincent/scripts/textgain.py
# For classes {xi...xn} and features {yi...yn}:
# IG(X,Y) = H(X) - H(X|Y)
# H(X) = -sum(p(x) * log2(x) for x in X)
# H(X|Y) = sum(p(y) * H(X|Y=y) for y in Y)
# H(X|Y=y) = -sum(p(x) * log2(x) for x in X if y in x)
# H is the entropy for a list of probabilities.
# Lower entropy indicates predictability, i.e., some values are more probable.
# H([0.50,0.50]) = 1.00
# H([0.75,0.25]) = 0.81
H = entropy
# X = document type (class) distribution.
# "How many documents have class xi?"
X = dict.fromkeys(self.classes, 0)
for d in self.documents:
X[d.type] += 1
# Y = document feature distribution.
# "How many documents have feature yi?"
Y = dict.fromkeys(self.features, 0)
for d in self.documents:
for y, v in d.vector.items():
if v > 0:
Y[y] += 1 # Discrete: feature is present (1) or not (0).
Y = dict((y, Y[y] / float(len(self.documents))) for y in Y)
# XY = features by class distribution.
# "How many documents of class xi have feature yi?"
XY = dict.fromkeys(self.features, {})
for d in self.documents:
for y, v in d.vector.items():
if v != 0:
XY[y][d.type] = XY[y].get(d.type, 0) + 1
# IG.
for y in self.features:
self._ig[y] = H(X.values()) - Y[y] * H(XY[y].values())
return self._ig[word]
IG = ig = infogain = gain = information_gain
def kullback_leibler_divergence(self, word1, word2, cached=True, _vectors=[], _map={}):
""" Returns the difference between two given features (i.e. words from Corpus.terms),
on average over all document vectors, using symmetric Kullback-Leibler divergence.
Higher values represent more distinct features.
"""
if not (word1, word2) in self._divergence:
kl1 = 0
kl2 = 0
# It is not log() that is "slow", but the document.vector getter and dict.__contains__().
# If you use KLD in a loop, collect the vectors once and pass them to _vectors (2x faster),
# or pass a (word, vectors-that-contain-word) dictionary to _map (7x faster).
for v in _map.get(word1) or _vectors or (d.vector for d in self.documents):
if word1 in v:
kl1 += v[word1] * (log(v[word1], 2) - log(v.get(word2, 0.000001), 2))
for v in _map.get(word2) or _vectors or (d.vector for d in self.documents):
if word2 in v:
kl2 += v[word2] * (log(v[word2], 2) - log(v.get(word1, 0.000001), 2))
# Cache the calculations for reuse.
# This is not always possible, e.g., 10,000 features = 5GB cache.
# The measurement is symmetric, so we also know KL(word2, word1).
if cached is True:
self._divergence[(word1, word2)] = \
self._divergence[(word2, word1)] = (kl1 + kl2) / 2
return self._divergence[(word1, word2)]
relative_entropy = kl = kld = kullback_leibler_divergence
def feature_selection(self, top=100, method=INFOGAIN, verbose=False):
""" Returns the top unpredictable ("original") features (terms), using information gain.
This is a subset of Corpus.terms that can be used to build a Classifier
that is faster (less features = less matrix columns) but quite efficient.
"""
if method == IG:
subset = sorted(((self.information_gain(w), w) for w in self.terms), reverse=True)
subset = [w for ig, w in subset[:top]]
return subset
if method == KLD:
v = [d.vector for d in self.documents]
m = dict((w, filter(lambda v: w in v, v)) for w in self.terms)
D = {}
for i, w1 in enumerate(self.terms):
for j, w2 in enumerate(self.terms[i+1:]):
d = self.kullback_leibler_divergence(w1, w2, _vectors=v, _map=m)
D[w1] = w1 in D and D[w1]+d or d
D[w2] = w2 in D and D[w2]+d or d
if verbose:
# kullback-leibler 80.0% (550/700)
print method + " " + ("%.1f"%(float(i) / len(self.terms) * 100)).rjust(4) + "% " \
+ "(%s/%s)" % (i+1, len(self.terms))
subset = sorted(((d, w) for w, d in D.iteritems()), reverse=True)
subset = [w for d, w in subset[:top]]
return subset
def filter(self, features=[]):
""" Returns a new Corpus with documents only containing the given list of words,
for example a subset returned from Corpus.feature_selection().
"""
features = dict.fromkeys(features, True)
corpus = Corpus(weight=self.weight)
corpus.extend([
Document(dict((w, f) for w, f in d.terms.iteritems() if w in features),
name = d.name,
type = d.type) for d in self.documents])
return corpus
#### FREQUENT CONCEPT SETS #########################################################################
# Agrawal R. & Srikant R. (1994), Fast algorithms for mining association rules in large databases.
# Based on: https://gist.github.com/1423287
class Apriori:
def __init__(self):
self._candidates = []
self._support = {}
def C1(self, sets):
""" Returns the unique words from all sets as a list of (hashable) frozensets.
"""
return [frozenset([v]) for v in set(chain(*sets))]
def Ck(self, sets):
""" For the given sets of length k, returns combined candidate sets of length k+1.
"""
Ck = []
for i, s1 in enumerate(sets):
for j, s2 in enumerate(sets[i+1:]):
if set(list(s1)[:-1]) == set(list(s2)[:-1]):
Ck.append(s1 | s2)
return Ck
def Lk(self, sets, candidates, support=0.0):
""" Prunes candidate sets whose frequency < support threshold.
Returns a dictionary of (candidate set, frequency)-items.
"""
Lk, x = {}, 1.0 / (len(sets) or 1) # relative count
for s1 in candidates:
for s2 in sets:
if s1.issubset(s2):
Lk[s1] = s1 in Lk and Lk[s1]+x or x
return dict((s, f) for s, f in Lk.items() if f >= support)
def __call__(self, sets, support=0.5):
""" Returns a dictionary of (set(features), frequency)-items.
The given support (0.0-1.0) is the relative amount of documents
in which a combination of word must appear.
"""
C1 = self.C1(sets)
L1 = self.Lk(sets, C1, support)
self._candidates = [L1.keys()]
self._support = L1
while True:
# Terminate when no further extensions are found.
if len(self._candidates[-1]) == 0:
break
# Extend frequent subsets one item at a time.
Ck = self.Ck(self._candidates[-1])
Lk = self.Lk(sets, Ck, support)
self._candidates.append(Lk.keys())
self._support.update(Lk)
return self._support
apriori = Apriori()
#### LATENT SEMANTIC ANALYSIS ######################################################################
# Based on:
# http://en.wikipedia.org/wiki/Latent_semantic_analysis
# http://blog.josephwilk.net/projects/latent-semantic-analysis-in-python.html
class LSA:
def __init__(self, corpus, k=NORM):
""" Latent Semantic Analysis is a statistical machine learning method
based on singular value decomposition (SVD).
Related terms in the corpus are grouped into "concepts".
Documents then get a concept vector that is an approximation of the original vector,
but with reduced dimensionality so that cosine similarity and clustering run faster.
"""
import numpy
matrix = [corpus.vector(d).values() for d in corpus.documents]
matrix = numpy.array(matrix)
# Singular value decomposition, where u * sigma * vt = svd(matrix).
# Sigma is the diagonal matrix of singular values,
# u has document rows and concept columns, vt has concept rows and term columns.
u, sigma, vt = numpy.linalg.svd(matrix, full_matrices=False)
# Delete the smallest coefficients in the diagonal matrix (i.e., at the end of the list).
# The difficulty and weakness of LSA is knowing how many dimensions to reduce
# (generally L2-norm is used).
if k == NORM:
k = int(round(numpy.linalg.norm(sigma)))
if k == TOP300:
k = max(0, len(sigma) - 300)
if isinstance(k, int):
k = max(0, len(sigma) - k)
if type(k).__name__ == "function":
k = max(0, int(k(sigma)))
#print numpy.dot(u, numpy.dot(numpy.diag(sigma), vt))
# Apply dimension reduction.
# The maximum length of a concept vector = the number of documents.
assert k < len(corpus.documents), \
"can't create more dimensions than there are documents"
tail = lambda list, i: range(len(list)-i, len(list))
u, sigma, vt = (
numpy.delete(u, tail(u[0], k), axis=1),
numpy.delete(sigma, tail(sigma, k), axis=0),
numpy.delete(vt, tail(vt, k), axis=0)
)
# Store as Python dict and lists so we can cPickle it.
self.corpus = corpus
self._terms = dict(enumerate(corpus.vector().keys())) # Vt-index => word.
self.u, self.sigma, self.vt = (
dict((d.id, Vector((i, float(x)) for i, x in enumerate(v))) for d, v in izip(corpus, u)),
list(sigma),
[[float(x) for x in v] for v in vt]
)
@property
def terms(self):
# Yields a list of all words, identical to LSA.corpus.vector.keys()
return self._terms.values()
features = terms
@property
def concepts(self):
""" Yields a list of all concepts, each a dictionary of (word, weight)-items.
"""
# Round the weight so 9.0649330400000009e-17 becomes a more meaningful 0.0.
return [dict((self._terms[i], round(w, 15)) for i, w in enumerate(concept)) for concept in self.vt]
@property
def vectors(self):
""" Yields a dictionary of (Document.id, concepts),
where each concept is a dictionary of (concept_index, weight)-items.
"""
return self.u
def __getitem__(self, id):
return self.u[id]
def __contains__(self, id):
return id in self.u
def __iter__(self):
return iter(self.u)
def __len__(self):
return len(self.u)
def transform(self, document):
""" Given a document not in the corpus, returns a vector in LSA concept space.
"""
if document.id in self.u:
return self.u[document.id]
if document.id in _lsa_transform_cache:
return _lsa_transform_cache[document.id]
import numpy
v = self.corpus.vector(document)
v = [v[self._terms[i]] for i in range(len(v))]
v = numpy.dot(numpy.dot(numpy.linalg.inv(numpy.diag(self.sigma)), self.vt), v)
v = _lsa_transform_cache[document.id] = Vector(enumerate(v))
return v
# LSA cache for Corpus.search() shouldn't be stored with Corpus.save(),
# so it is a global variable instead of a property of the LSA class.
_lsa_transform_cache = {}
#def iter2array(iterator, typecode):
# a = numpy.array([iterator.next()], typecode)
# shape0 = a.shape[1:]
# for (i, item) in enumerate(iterator):
# a.resize((i+2,) + shape0)
# a[i+1] = item
# return a
#def filter(matrix, min=0):
# columns = numpy.max(matrix, axis=0)
# columns = [i for i, v in enumerate(columns) if v <= min] # Indices of removed columns.
# matrix = numpy.delete(matrix, columns, axis=1)
# return matrix, columns
#### CLUSTERING ####################################################################################
# Clustering assigns vectors to subsets based on a distance measure,
# which determines how "similar" two vectors are.
# For example, for (x,y) coordinates in 2D we could use Euclidean distance ("as the crow flies");
# for document vectors we could use cosine similarity.
def features(vectors):
""" Returns a set of unique keys from all the given Vectors.
"""
return set(k for k in chain(*vectors))
def mean(iterator, length):
""" Returns the arithmetic mean of the values in the given iterator.
"""
return sum(iterator) / float(length or 1)
def centroid(vectors, keys=[]):
""" Returns the center of the list of vectors
(e.g., the geometric center of a list of (x,y)-coordinates forming a polygon).
Since vectors are sparse, the list of all keys (=Corpus.vector) must be given.
"""
c = []
for v in vectors:
if isinstance(v, Cluster):
c.extend(v.flatten())
elif isinstance(v, Document):
c.append(v.vector)
else:
c.append(v)
if not keys:
keys = features(c)
c = [(k, mean((v.get(k, 0) for v in c), len(c))) for k in keys]
c = Vector((k, v) for k, v in c if v != 0)
return c
COSINE, EUCLIDEAN, MANHATTAN, HAMMING = \
"cosine", "euclidean", "manhattan", "hamming"
def distance(v1, v2, method=COSINE):
""" Returns the distance between two vectors.
"""
if method == COSINE:
return 1 - sum(v1.get(w,0) * f for w, f in v2.iteritems()) / (v1.norm * v2.norm or 1.0)
if method == EUCLIDEAN: # Squared distance is 1.5x faster.
return sum((v1.get(w,0) - v2.get(w,0))**2 for w in set(chain(v1, v2)))
if method == MANHATTAN:
return sum(abs(v1.get(w,0) - v2.get(w,0)) for w in set(chain(v1, v2)))
if method == HAMMING:
d = sum(not (w in v1 and w in v2 and v1[w] == v2[w]) for w in set(chain(v1, v2)))
d = d / float(max(len(v1), len(v2)) or 1)
return d
if isinstance(method, type(distance)):
# Given method is a function of the form: distance(v1, v2) => float.
return method(v1, v2)
_distance = distance
class DistanceMap:
def __init__(self, method=COSINE):
""" A lazy map of cached distances between vectors.
"""
self.method = method
self._cache = {}
def distance(self, v1, v2):
""" Returns the cached distance between two vectors.
"""
try:
d = self._cache[(v1.id, v2.id)]
except KeyError:
d = self._cache[(v1.id, v2.id)] = distance(v1, v2, method=self.method)
return d
#--- K-MEANS ---------------------------------------------------------------------------------------
# Fast, no guarantee of convergence or optimal solution (random starting clusters).
# 3000 vectors with 100 features (LSA, density 1.0): 1 minute with k=100 (20 iterations).
# 3000 vectors with 200 features (LSA, density 1.0): 3 minutes with k=100 (20 iterations).
# Initialization methods:
RANDOM, KMPP = "random", "kmeans++"
def k_means(vectors, k=None, iterations=10, distance=COSINE, **kwargs):
""" Returns a list of k clusters,
where each cluster is a list of similar vectors (Lloyd's algorithm).
There is no guarantee of convergence or optimal solution.
"""
init = kwargs.get("seed", kwargs.get("initialization", RANDOM))
keys = kwargs.get("keys") or list(features(vectors))
if k is None:
k = sqrt(len(vectors) / 2)
if k < 2:
return [[v for v in vectors]]
if init == KMPP:
clusters = kmpp(vectors, k, distance)
else:
clusters = [[] for i in xrange(int(k))]
for i, v in enumerate(sorted(vectors, key=lambda x: random())):
# Randomly partition the vectors across k clusters.
clusters[i % int(k)].append(v)
# Cache the distance calculations between vectors (4x faster).
map = DistanceMap(method=distance); distance = map.distance
converged = False
while not converged and iterations > 0 and k > 0:
# Calculate the center of each cluster.
centroids = [centroid(cluster, keys) for cluster in clusters]
# Triangle inequality: one side is shorter than the sum of the two other sides.
# We can exploit this to avoid costly distance() calls (up to 3x faster).
p = 0.5 * kwargs.get("p", 0.8) # "Relaxed" triangle inequality (cosine distance is a semimetric) 0.25-0.5.
D = {}
for i in range(len(centroids)):
for j in range(i, len(centroids)): # center1–center2 < center1–vector + vector–center2 ?
D[(i,j)] = D[(j,i)] = p * distance(centroids[i], centroids[j])
# For every vector in every cluster,
# check if it is nearer to the center of another cluster (if so, assign it).
# When visualized, this produces a Voronoi diagram.
converged = True
for i in xrange(len(clusters)):
for v in clusters[i]:
nearest, d1 = i, distance(v, centroids[i])
for j in xrange(len(clusters)):
if D[(i,j)] < d1: # Triangle inequality (Elkan, 2003).
d2 = distance(v, centroids[j])
if d2 < d1:
nearest = j
if nearest != i: # Other cluster is nearer.
clusters[nearest].append(clusters[i].pop(clusters[i].index(v)))
converged = False
iterations -= 1; #print iterations
return clusters
kmeans = k_means
def kmpp(vectors, k, distance=COSINE):
""" The k-means++ initialization algorithm, with the advantage that:
- it generates better clusterings than standard k-means (RANDOM) on virtually all data sets,
- it runs faster than standard k-means on average,
- it has a theoretical approximation guarantee.
"""
# Cache the distance calculations between vectors (4x faster).
map = DistanceMap(method=distance); distance = map.distance
# David Arthur, 2006, http://theory.stanford.edu/~sergei/slides/BATS-Means.pdf
# Based on:
# http://www.stanford.edu/~darthur/kmpp.zip
# http://yongsun.me/2008/10/k-means-and-k-means-with-python
# Choose one center at random.
# Calculate the distance between each vector and the nearest center.
centroids = [choice(vectors)]
d = [distance(v, centroids[0]) for v in vectors]
s = sum(d)
for _ in range(int(k) - 1):
# Choose a random number y between 0 and d1 + d2 + ... + dn.
# Find vector i so that: d1 + d2 + ... + di >= y > d1 + d2 + ... + dj.
# Perform a number of local tries so that y yields a small distance sum.
i = 0
for _ in range(int(2 + log(k))):
y = random() * s
for i1, v1 in enumerate(vectors):
if y <= d[i1]:
break
y -= d[i1]
s1 = sum(min(d[j], distance(v1, v2)) for j, v2 in enumerate(vectors))
if s1 < s:
s, i = s1, i1
# Add vector i as a new center.
# Repeat until we have chosen k centers.
centroids.append(vectors[i])
d = [min(d[i], distance(v, centroids[-1])) for i, v in enumerate(vectors)]
s = sum(d)
# Assign points to the nearest center.
clusters = [[] for i in xrange(int(k))]
for v1 in vectors:
d = [distance(v1, v2) for v2 in centroids]
clusters[d.index(min(d))].append(v1)
return clusters
#--- HIERARCHICAL ----------------------------------------------------------------------------------
# Slow, optimal solution guaranteed in O(len(vectors)^3).
# 100 vectors with 6 features (density 1.0): 0.1 seconds.
# 1000 vectors with 6 features (density 1.0): 1 minute.
# 3000 vectors with 6 features (density 1.0): 15 minutes.
class Cluster(list):
def __init__(self, *args, **kwargs):
list.__init__(self, *args, **kwargs)
@property
def depth(self):
""" Yields the maximum depth of nested clusters.
Cluster((1, Cluster((2, Cluster((3, 4)))))).depth => 2.
"""
return max([0] + [1+n.depth for n in self if isinstance(n, Cluster)])
def flatten(self, depth=1000):
""" Flattens nested clusters to a list, down to the given depth.
Cluster((1, Cluster((2, Cluster((3, 4)))))).flatten(1) => [1, 2, Cluster(3, 4)].
"""
a = []
for item in self:
if isinstance(item, Cluster) and depth > 0:
a.extend(item.flatten(depth-1))
else:
a.append(item)
return a
def traverse(self, visit=lambda cluster: None):
""" Calls the visit() function on this and each nested cluster.
"""
visit(self)
for item in self:
if isinstance(item, Cluster):
item.traverse(visit)
def __repr__(self):
return "Cluster(%s)" % list.__repr__(self)[1:-1]
def hierarchical(vectors, k=1, iterations=1000, distance=COSINE, **kwargs):
""" Returns a Cluster containing k items (vectors or clusters with nested items).
With k=1, the top-level cluster contains a single cluster.
"""
keys = kwargs.get("keys", list(features(vectors)))
clusters = Cluster((v for v in sorted(vectors, key=lambda x: random())))
centroids = [(v.id, v) for v in clusters]
map = {}
for _ in range(iterations):
if len(clusters) <= max(k, 1):
break
nearest, d0 = None, None
for i, (id1, v1) in enumerate(centroids):
for j, (id2, v2) in enumerate(centroids[i+1:]):
# Cache the distance calculations between vectors.
# Code is identical to DistanceMap.distance(),
# but it is faster in the inner loop to use it directly.
try:
d = map[(id1, id2)]
except KeyError:
d = map[(id1, id2)] = _distance(v1, v2, method=distance)
if d0 is None or d < d0:
nearest, d0 = (i, j+i+1), d
# Pairs of nearest clusters are merged as we move up the hierarchy:
i, j = nearest
merged = Cluster((clusters[i], clusters[j]))
clusters.pop(j)
clusters.pop(i)
clusters.append(merged)
# Cache the center of the new cluster.
v = centroid(merged.flatten(), keys)
centroids.pop(j)
centroids.pop(i)
centroids.append((v.id, v))
return clusters
#v1 = Vector(wings=0, beak=0, claws=1, paws=1, fur=1) # cat
#v2 = Vector(wings=0, beak=0, claws=0, paws=1, fur=1) # dog
#v3 = Vector(wings=1, beak=1, claws=1, paws=0, fur=0) # bird
#print hierarchical([v1, v2, v3])
#### CLASSIFIER ####################################################################################
#--- CLASSIFIER BASE CLASS -------------------------------------------------------------------------
class Classifier:
def __init__(self):
self._classes = []
self._features = []
@property
def features(self):
return self._features
@property
def terms(self):
return self.features
@property
def classes(self):
return self._classes
@property
def types(self):
return self.classes
@property
def binary(self):
""" Yields True if the classifier has exactly two prediction classes.
"""
return sorted(self.classes) in ([False, True], [0, 1])
def train(self, document, type=None):
# Must be implemented in a subclass.
pass
def classify(self, document):
# Must be implemented in a subclass.
return None
def _vector(self, document, type=None):
""" Returns a (type, Vector)-tuple for the given document.
If the document is part of a LSA-reduced corpus, returns the LSA concept vector.
If the given type is None, returns document.type (if a Document is given).
"""
if isinstance(document, Document):
if type is None:
type = document.type
if document.corpus and document.corpus.lsa:
return type, document.corpus.lsa[document.id] # LSA concept vector.
return type, document.vector
if isinstance(document, dict):
return type, Vector(document)
if isinstance(document, (list, tuple)):
return type, Document(document, filter=None, stopwords=True).vector
if isinstance(document, basestring):
return type, Document(document, filter=None, stopwords=True).vector
@classmethod
def test(cls, corpus=[], d=0.65, folds=1, **kwargs):
""" Returns an (accuracy, precision, recall, F-score)-tuple for the given corpus.
The corpus is a list of documents or (wordlist, type)-tuples.
2/3 of the data will be used as training material and tested against the other 1/3.
With folds > 1, K-fold cross-validation is performed.
For example: in 10-fold cross-validation ten tests are performed,
each using a different 1/10 of the corpus as testing data.
For non-binary classifiers, precision, recall and F-score are None.
"""
corpus = [isinstance(x, Document) and (x, x.type) or x for x in corpus]
corpus = shuffled(corpus) # Avoid a list sorted by type (because we take successive folds).
classes = set(type for document, type in corpus)
binary = len(classes) == 2 and sorted(classes) in ([False,True], [0,1])
m = [0, 0, 0, 0] # accuracy | precision | recall | F1-score.
K = max(folds, 1)
for k in range(K):
classifier = cls(**kwargs)
t = len(corpus) / float(K) # Documents per fold.
i = int(round(k * t)) # Corpus start index.
j = int(round(k * t + t)) # Corpus stop index.
if K == 1:
i = int(len(corpus) * d)
j = int(len(corpus))
for document, type in corpus[:i] + corpus[j:]:
# Train with 9/10 of the corpus, using 1/10 fold for testing.
classifier.train(document, type)
TP = TN = FP = FN = 0
if not binary:
# If the classifier predicts classes other than True/False,
# we can only measure accuracy.
for document, type in corpus[i:j]:
if classifier.classify(document) == type:
TP += 1
m[0] += TP / float(j-i)
else:
# For binary classifiers, calculate the confusion matrix
# to measure precision and recall.
for document, b1 in corpus[i:j]:
b2 = classifier.classify(document)
if b1 and b2:
TP += 1 # true positive
elif not b1 and not b2:
TN += 1 # true negative
elif not b1 and b2:
FP += 1 # false positive (type I error)
elif b1 and not b2:
FN += 1 # false negative (type II error)
#print "%s\t%s\t%s\t%s\t%s\t%s" % (b1, b2, TP, TN, FP, FN)
m[0] += float(TP+TN) / ((TP+TN+FP+FN) or 1)
m[1] += float(TP) / ((TP+FP) or 1)
m[2] += float(TP) / ((TP+FN) or 1)
m = [v/K for v in m]
m[3] = binary and 2 * m[1] * m[2] / ((m[1] + m[2]) or 1) or 0 # F1-score.
return binary and tuple(m) or (m[0], None, None, None)
@classmethod
def k_fold_cross_validation(cls, corpus=[], k=10, **kwargs):
return cls.test(corpus, kwargs.pop("d", 0.65), k, **kwargs)
crossvalidate = cross_validate = k_fold_cross_validation
def save(self, path):
cPickle.dump(self, open(path, "w"), BINARY)
@classmethod
def load(cls, path):
return cPickle.load(open(path))
#--- NAIVE BAYES CLASSIFIER ------------------------------------------------------------------------
# Based on: Magnus Lie Hetland, http://hetland.org/coding/python/nbayes.py
# We can't include these in the NaiveBayes class description,
# because you can't pickle functions:
# NBid1: store word index, used with aligned=True
# NBid1: ignore word index, used with aligned=False.
NBid1 = lambda type, v, i: (type, v, i)
NBid2 = lambda type, v, i: (type, v, 1)
class NaiveBayes(Classifier):
def __init__(self, aligned=False):
""" Naive Bayes is a simple supervised learning method for text classification.
For example: if we have a set of documents of movie reviews (training data),
and we know the star rating of each document,
we can predict the star rating for other movie review documents.
With aligned=True, the word index is taken into account when training on lists of words.
"""
self._aligned = aligned
self._classes = {} # Frequency of each class (or type).
self._features = {} # Frequency of each feature, as (type, feature, value)-tuples.
self._count = 0 # Number of training instances.
@property
def classes(self):
return self._classes.keys()
@property
def features(self):
return list(set(k[1] for k in self._features.iterkeys()))
def train(self, document, type=None):
""" Trains the classifier with the given document of the given type (i.e., class).
A document can be a Document object or a list of words (or other hashable items).
If no type is given, Document.type will be used instead.
"""
id = self._aligned and NBid1 or NBid2
type, vector = self._vector(document, type=type)
self._classes[type] = self._classes.get(type, 0) + 1
for i, (w, f) in enumerate(vector.iteritems()):
self._features[id(type, w, i)] = self._features.get(id(type, w, i), 0) + f
self._count += 1
def classify(self, document):
""" Returns the type with the highest probability for the given document
(a Document object or a list of words).
If the training documents come from a LSA-reduced corpus,
the given document must be Corpus.lsa.transform(document).
"""
id = self._aligned and NBid1 or NBid2
def g(document, type):
# Bayesian discriminant, proportional to posterior probability.
g = 1.0 * self._classes[type] / self._count
for i, (w, f) in enumerate(self._vector(document)[1].iteritems()):
g /= self._classes[type]
g *= self._features.get(id(type, w, i), 0)
g *= f
return g
try:
return max((g(document, type), type) for type in self._classes)[1]
except ValueError: # max() arg is an empty sequence
return None
Bayes = NaiveBayes
#--- SUPPORT VECTOR MACHINE ------------------------------------------------------------------------
# pattern.vector comes bundled with LIBSVM 3.11.
# http://www.csie.ntu.edu.tw/~cjlin/libsvm/
#
# Precompiled binaries for 32-bit Windows and Mac OS X, and 64-bit Mac OS X and Ubuntu are included.
# - If these don't work, you need to download and compile LIBSVM from source.
# - Mac OS X may complain, if so, rename "-soname" to "-install_name" in libsvm/Makefile.
# - Put the shared library (i.e., "libsvm.dll", "libsvm.so") in pattern/vector/svm/.
# - If the shared library is named "libsvm.so.2", strip the ".2".
# SVM type.
SVC = CLASSIFICATION = 0
SVR = REGRESSION = 3
SVO = DETECTION = 2 # One-class SVM: X belongs to the class or not?
# SVM kernel functions.
# The simplest way to divide two clusters is a straight line.
# If the clusters are separated by a curved line,
# separation may be easier in higher dimensions (using a kernel).
LINEAR = 0 # Straight line => u' * v
POLYNOMIAL = 1 # Curved line => (gamma * u' * v + coef0) ** degree
RADIAL = RBF = 2 # Curved path => exp(-gamma * |u-v| ** 2)
class SVM(Classifier):
def __init__(self, *args, **kwargs):
""" Support Vector Machine is a supervised learning method, where
training documents are represented as points in an n-dimensional space.
The SVM constructs a number of "hyperplanes" that subdivide the space.
Optional parameters include:
type=CLASSIFICATION, kernel=LINEAR,
degree=3, gamma=1/len(SVM.features), coeff0=0,
cost=1, epsilon=0.01,
cache=100,
probability=False,
debug=False
"""
import svm
self._libsvm = svm
self._vectors = []
self._model = None
if len(args) > 0:
kwargs.setdefault("type", args[0])
if len(args) > 1:
kwargs.setdefault("kernel", args[1])
for k, v in (
( "type", CLASSIFICATION),
( "kernel", LINEAR),
( "degree", 3),
( "gamma", 0),
( "coeff0", 0),
( "cost", 1),
( "epsilon", 0.1),
( "nu", 0.5),
( "cache", 100),
("probability", False),
( "debug", False)): setattr(self, k, kwargs.get(k, v))
@property
def classes(self):
return list(set(type for type, v in self._vectors))
@property
def features(self):
return list(features(v for type, v in self._vectors))
@property
def support_vectors(self):
return self._model and self._model[0].get_SV() or []
sv = support_vectors
def _libsvm_train(self):
""" Calls libsvm.svm_train() to create a model.
Vector classes and features (i.e., words) are mapped to integers.
"""
M = [v for type, v in self._vectors] # List of vectors.
H1 = dict((w, i) for i, w in enumerate(self.features)) # Feature => integer hash.
H2 = dict((w, i) for i, w in enumerate(self.classes)) # Class => integer hash.
H3 = dict((i, w) for i, w in enumerate(self.classes)) # Class reversed hash.
x = [dict((H1[k], v) for k, v in v.items()) for v in M] # Hashed vectors.
y = [H2[type] for type, v in self._vectors] # Hashed classes.
o = "-s %s -t %s -d %s -g %s -r %s -c %s -p %s -n %s -m %s -b %s %s" % (
self.type, self.kernel, self.degree, self.gamma, self.coeff0, self.cost, self.epsilon, self.nu,
self.cache,
self.probability is True and 1 or 0,
self.debug is False and "-q" or ""
)
# Cache the model and the feature hash.
# SVM.train() will remove the cached model (since it needs to be retrained).
self._model = (self._libsvm.svm_train(y, x, o), H1, H2, H3)
def _libsvm_predict(self, document):
""" Calls libsvm.svm_predict() with the cached model.
For CLASSIFICATION, returns a predicted class.
For CLASSIFICATION with probability=True, returns a list of (weight, class)-tuples.
For REGRESSION, returns a float.
"""
if self._model is None:
return None
if self.debug is False:
# Redirect stdout to a file stream.
so, sys.stdout = sys.stdout, StringIO()
M = self._model[0]
H1 = self._model[1]
H2 = self._model[2]
H3 = self._model[3]
v = self._vector(document)[1]
v = dict((H1.get(k, len(H1)+i), v) for i, (k,v) in enumerate(v.items()))
p = self._libsvm.svm_predict([0], [v], M, "-b %s" % int(self.probability))
t = M.get_svm_type()
if self.debug is False:
sys.stdout = so
if t == CLASSIFICATION and self.probability is True:
return [(H3[i], w) for i, w in enumerate(p[2][0])]
if t == CLASSIFICATION:
return H3.get(int(p[0][0]))
if t == REGRESSION:
return p[0][0]
if t == DETECTION:
return p[0][0] > 0 # -1 = outlier => return False
return p[0][0]
def train(self, document, type=None):
""" Trains the classifier with the given document of the given type (i.e., class).
A document can be a Document object or a list of words (or other hashable items).
If no type is given, Document.type will be used instead.
"""
self._model = None
self._vectors.append(self._vector(document, type=type))
def classify(self, document):
""" Returns the type with the highest probability for the given document
(a Document object or a list of words).
"""
if self._model is None:
self._libsvm_train()
return self._libsvm_predict(document)
def save(self, path):
tmp = (self._libsvm, self._model)
self._libsvm = None
self._model = None # Retrains after Classifier.load().
Classifier.save(self, path)
self._libsvm, \
self._model = tmp
@classmethod
def load(cls, path):
import svm
classifier = cPickle.load(open(path))
classifier._libsvm = svm
classifier._libsvm_train()
return classifier
#--- K-NEAREST NEIGHBOR CLASSIFIER -----------------------------------------------------------------
class NearestNeighbor(Classifier):
def __init__(self, k=10, distance=COSINE):
""" k-nearest neighbor (kNN) is a simple supervised learning method for text classification.
Documents are classified by a majority vote of nearest neighbors (cosine distance)
in the training corpus.
"""
self.k = k # Number of nearest neighbors to observe.
self.distance = distance # COSINE, EUCLIDEAN, ...
self._vectors = [] # Training instances.
self._kdtree = None
@property
def classes(self):
return list(set(type for type, v in self._vectors))
@property
def features(self):
return list(features(v for type, v in self._vectors))
def train(self, document, type=None):
""" Trains the classifier with the given document of the given type (i.e., class).
A document can be a Document object or a list of words (or other hashable items).
If no type is given, Document.type will be used instead.
"""
self._vectors.append(self._vector(document, type=type))
def classify(self, document):
""" Returns the type with the highest probability for the given document
(a Document object or a list of words).
"""
# Basic majority voting.
# Distance is calculated between the document vector and all training instances.
# This will make NearestNeighbor.test() slow in higher dimensions.
classes = {}
v1 = self._vector(document)[1]
# k-d trees are slower than brute-force for vectors with high dimensionality:
#if self._kdtree is None:
# self._kdtree = kdtree((v for type, v in self._vectors))
# self._kdtree.map = dict((v.id, type) for type, v in self._vectors)
#D = self._kdtree.nearest_neighbors(v1, self.k, self.distance)
D = ((distance(v1, v2, method=self.distance), type) for type, v2 in self._vectors)
D = ((d, type) for d, type in D if d < 1) # Nothing in common if distance=1.0.
D = heapq.nsmallest(self.k, D) # k-least distant.
for d, type in D:
classes.setdefault(type, 0)
classes[type] += 1 / (d or 0.0000000001)
try:
# Pick random winner if several candidates have equal highest score.
return choice([k for k, v in classes.iteritems() if v == max(classes.values()) > 0])
except IndexError:
return None
kNN = KNN = NearestNeighbor
#d1 = Document("cats have stripes, purr and drink milk", type="cat", threshold=0, stemmer=None)
#d2 = Document("cows are black and white, they moo and give milk", type="cow", threshold=0, stemmer=None)
#d3 = Document("birds have wings and can fly", type="bird", threshold=0, stemmer=None)
#knn = kNN()
#for d in (d1,d2,d3):
# knn.train(d)
#print knn.binary
#print knn.classes
#print knn.classify(Document("something that can fly", threshold=0, stemmer=None))
#print NearestNeighbor.test((d1,d2,d3), folds=2)
#### K-D TREE ######################################################################################
class KDTree:
_v1 = Vector({0:0})
_v2 = Vector({0:0})
def __init__(self, vectors, map={}):
""" A partitioned vector space that is (sometimes) faster for nearest neighbor search.
A k-d tree is an extension of a binary tree for k-dimensional data,
where every vector generates a hyperplane that splits the space into two subspaces.
The given list can contain Document or Vector objects.stu
"""
class Node:
def __init__(self, vector, left, right, axis):
self.vector, self.left, self.right, self.axis = vector, left, right, axis
def balance(vectors, depth=0, keys=None):
# Based on: http://en.wikipedia.org/wiki/Kd-tree
if not vectors:
return None
if not keys:
keys = sorted(features(vectors))
a = keys[depth % len(keys)] # splitting axis
v = sorted(vectors, key=lambda v: v.get(a, 0))
m = len(v) // 2 # median pivot
return Node(
vector = v[m],
left = balance(v[:m], depth+1, keys),
right = balance(v[m+1:], depth+1, keys),
axis = a)
self.map = map
self.root = balance([self._vector(v) for v in vectors])
def _vector(self, v):
""" Returns a Vector for the given document or vector.
"""
if isinstance(v, Document):
self.map.setdefault(v.vector.id, v); return v.vector
return v
def nearest_neighbors(self, vector, k=10, distance=COSINE):
""" Returns a list of (distance, vector)-tuples from the search space,
sorted nearest-first to the given vector.
"""
class NN(list):
def update(self, v1, v2):
d = _distance(v1, v2, method=distance)
if len(self) < k or self[-1][0] > d:
# Add nearer vectors to the sorted list.
insort(self, (d, v1))
def search(self, vector, k, best=NN()):
# Based on: http://code.google.com/p/python-kdtree/
if self is None:
return best
if self.left is self.right is None: # leaf
best.update(self.vector, vector)
return best
# Compare points in current dimension to select near and far subtree.
# We may need to search the far subtree too (see below).
if vector.get(self.axis) < self.vector.get(self.axis):
near, far = self.left, self.right
else:
near, far = self.right, self.left
# Recursively search near subtree down to leaf.
best = search(near, vector, k, best)
best.update(self.vector, vector)
# It's faster to reuse two Vectors than to create them:
dict.__setitem__(KDTree._v1, 0, self.vector.get(self.axis, 0))
dict.__setitem__(KDTree._v2, 0, vector.get(self.axis, 0))
KDTree._v1._norm = None # clear norm cache
KDTree._v2._norm = None
# If the hypersphere crosses the plane,
# there could be nearer points on the far side of the plane.
if _distance(KDTree._v1, KDTree._v2, method=distance) <= best[-1][0]:
best = search(far, vector, k, best)
return best
n = search(self.root, self._vector(vector), k+1)
n = [(d, self.map.get(v.id, v)) for d, v in n]
n = [(d, v) for d, v in n if v != vector][:k]
return n
nn = nearest_neighbors
kdtree = KDTree
#### GENETIC ALGORITHM #############################################################################
class GeneticAlgorithm:
def __init__(self, candidates=[], **kwargs):
""" A genetic algorithm is a stochastic search method based on natural selection.
Each generation, the fittest candidates are selected and recombined into a new generation.
With each new generation the system converges towards an optimal fitness.
"""
self.population = candidates
self.generation = 0
self._avg = None # Average fitness for this generation.
# GeneticAlgorithm.fitness(), crossover(), mutate() can be given as functions:
for f in ("fitness", "crossover", "mutate"):
if f in kwargs:
setattr(self, f, kwargs[f])
def fitness(self, candidate):
# Must be implemented in a subclass, returns 0.0-1.0.
return 1.0
def crossover(self, candidate1, candidate2, d=0.5):
# Must be implemented in a subclass.
return None
def mutate(self, candidate, d=0.1):
# Must be implemented in a subclass.
return None or candidate
def update(self, top=0.7, crossover=0.5, mutation=0.1, d=0.9):
""" Updates the population by selecting the top fittest candidates,
and recombining them into a new generation.
"""
# Selection.
p = sorted((self.fitness(x), x) for x in self.population) # Weakest-first.
a = self._avg = float(sum(f for f, x in p)) / len(p)
x = min(f for f, x in p)
y = max(f for f, x in p)
i = 0
while len(p) > len(self.population) * top:
# Weaker candidates have a higher chance of being removed,
# chance being equal to (1-fitness), starting with the weakest.
if x + (y-x) * random() >= p[i][0]:
p.pop(i)
else:
i = (i+1) % len(p)
# Reproduction.
g = []
while len(g) < len(self.population):
# Choose randomly between recombination of parents or mutation.
# Mutation avoids local optima by maintaining genetic diversity.
if random() < d:
i = int(round(random() * (len(p)-1)))
j = choice(range(0,i) + range(i+1, len(p)))
g.append(self.crossover(p[i][1], p[j][1], d=crossover))
else:
g.append(self.mutate(choice(p)[1], d=mutation))
self.population = g
self.generation += 1
@property
def avg(self):
# Average fitness is supposed to increase each generation.
if not self._avg: self._avg = float(sum(map(self.fitness, self.population))) / len(self.population)
return self._avg
average_fitness = avg
GA = GeneticAlgorithm
# GA for floats between 0.0-1.0 that prefers higher numbers:
#class HighFloatGA(GeneticAlgorithm):
# def fitness(self, x):
# return x
# def crossover(self, x, y, d=0.5):
# return (x+y) / 2
# def mutate(self, x, d=0.1):
# return min(1, max(0, x + random()*0.2-0.1))
#
#ga = HighFloatGA([random() for i in range(100)])
#for i in range(100):
# ga.update()
# print ga.average_fitness
#print ga.population
|
{
"content_hash": "91f84c7faaa88b744b2067a53ed62727",
"timestamp": "",
"source": "github",
"line_count": 2051,
"max_line_length": 114,
"avg_line_length": 41.90443686006826,
"alnum_prop": 0.5602122262816187,
"repo_name": "decebel/dataAtom_alpha",
"id": "49eb7ceef65df97b733f9699a18fe31df068401c",
"size": "86911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/plug/py/external/pattern/vector/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "485271"
},
{
"name": "C++",
"bytes": "797264"
},
{
"name": "JavaScript",
"bytes": "192237"
},
{
"name": "Objective-C",
"bytes": "13917"
},
{
"name": "Python",
"bytes": "1608265"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickfont",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
|
{
"content_hash": "f9b87e32f114a1e63a0051dc087e6845",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 37.73809523809524,
"alnum_prop": 0.5356466876971608,
"repo_name": "plotly/plotly.py",
"id": "5857416b58f5290c747a0531d305260407fc0dce",
"size": "1585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import ast
import logging
import threading
import time
import unittest
import six
from plop.collector import Collector, PlopFormatter
class CollectorTest(unittest.TestCase):
def filter_stacks(self, collector):
# Kind of hacky, but this is the simplest way to keep the tests
# working after the internals of the collector changed to support
# multiple formatters.
stack_counts = ast.literal_eval(PlopFormatter().format(collector))
counts = {}
for stack, count in six.iteritems(stack_counts):
filtered_stack = [frame[2] for frame in stack
if frame[0].endswith('collector_test.py')]
if filtered_stack:
counts[tuple(filtered_stack)] = count
return counts
def check_counts(self, counts, expected):
failed = False
output = []
for stack, count in six.iteritems(expected):
# every expected frame should appear in the data, but
# the inverse is not true if the signal catches us between
# calls.
self.assertTrue(stack in counts)
ratio = float(counts[stack])/float(count)
output.append('%s: expected %s, got %s (%s)' %
(stack, count, counts[stack], ratio))
if not (0.70 <= ratio <= 1.25):
failed = True
if failed:
for line in output:
logging.warning(line)
for key in set(counts.keys()) - set(expected.keys()):
logging.warning('unexpected key: %s: got %s' % (key, counts[key]))
self.fail("collected data did not meet expectations")
def test_collector(self):
start = time.time()
def a(end):
while time.time() < end: pass
c(time.time() + 0.1)
def b(end):
while time.time() < end: pass
c(time.time() + 0.1)
def c(end):
while time.time() < end: pass
collector = Collector(interval=0.01, mode='prof')
collector.start()
a(time.time() + 0.1)
b(time.time() + 0.2)
c(time.time() + 0.3)
end = time.time()
collector.stop()
elapsed = end - start
self.assertTrue(0.8 < elapsed < 0.9, elapsed)
counts = self.filter_stacks(collector)
expected = {
('a', 'test_collector'): 10,
('c', 'a', 'test_collector'): 10,
('b', 'test_collector'): 20,
('c', 'b', 'test_collector'): 10,
('c', 'test_collector'): 30,
}
self.check_counts(counts, expected)
# cost depends on stack depth; for this tiny test I see 40-80usec
time_per_sample = float(collector.sample_time) / collector.samples_taken
self.assertTrue(time_per_sample < 0.000100, time_per_sample)
def test_collect_threads(self):
start = time.time()
def a(end):
while time.time() < end: pass
def thread1_func():
a(time.time() + 0.2)
def thread2_func():
a(time.time() + 0.3)
collector = Collector(interval=0.01, mode='prof')
collector.start()
thread1 = threading.Thread(target=thread1_func)
thread2 = threading.Thread(target=thread2_func)
thread1.start()
thread2.start()
a(time.time() + 0.1)
while thread1.isAlive(): pass
while thread2.isAlive(): pass
thread1.join()
thread2.join()
end = time.time()
collector.stop()
elapsed = end - start
self.assertTrue(0.3 < elapsed < 0.4, elapsed)
counts = self.filter_stacks(collector)
expected = {
('a', 'test_collect_threads'): 10,
('a', 'thread1_func'): 20,
('a', 'thread2_func'): 30,
}
self.check_counts(counts, expected)
|
{
"content_hash": "12863103d2c5e3dfe1e7c914a9e94524",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 82,
"avg_line_length": 35.61818181818182,
"alnum_prop": 0.5421133231240429,
"repo_name": "omidraha/plop",
"id": "d5e22c7cd0bb034ed5da73ad0d54793e7ab520a2",
"size": "3918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plop/test/collector_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "HTML",
"bytes": "1834"
},
{
"name": "JavaScript",
"bytes": "2953"
},
{
"name": "Python",
"bytes": "25082"
},
{
"name": "Ruby",
"bytes": "300"
},
{
"name": "Shell",
"bytes": "1396"
}
],
"symlink_target": ""
}
|
import importlib.metadata
import os
# Sphinx extensions
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinxarg.ext",
]
intersphinx_mapping = {
"numpy": ("https://numpy.org/doc/stable/", None),
"pandas": ("https://pandas.pydata.org/docs/", None),
"proj": ("https://proj.org/", None),
"python": ("https://docs.python.org/3", None),
"shapely": ("https://shapely.readthedocs.io/en/stable/", None),
"xarray": ("https://docs.xarray.dev/en/stable/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyproj"
copyright = "2006-2018, Jeffrey Whitaker; 2019-2022, Open source contributors"
author = "Jeffrey Whitaker"
version = release = importlib.metadata.version("pyproj")
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "material"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = os.getenv("PYPROJ_HTML_THEME", "furo")
html_logo = "media/logo.png"
html_favicon = "media/icon.png"
# Add any paths that contain custom static files (such as style sheets) here,
# html_static_path = ["_static"]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("cli", "pyproj", "pyproj CLI", [author], 1)]
|
{
"content_hash": "699edb1d3180269bc33b0e4a23f66020",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 30.313432835820894,
"alnum_prop": 0.6597735105859183,
"repo_name": "ocefpaf/pyproj",
"id": "66ba52e34eca2c551d92f342ab27e8c855f17611",
"size": "2031",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "194556"
},
{
"name": "Makefile",
"bytes": "2671"
},
{
"name": "Python",
"bytes": "649387"
},
{
"name": "Shell",
"bytes": "10347"
}
],
"symlink_target": ""
}
|
import polyadcirc.run_framework.domain as dom
import polyadcirc.run_framework.random_wall as rmw
import numpy as np
adcirc_dir = '/work/01837/lcgraham/v50_subdomain/work'
grid_dir = adcirc_dir + '/ADCIRC_landuse/Inlet/inputs/poly_walls'
save_dir = adcirc_dir + '/ADCIRC_landuse/Inlet/runs/few_walls'
basis_dir = adcirc_dir +'/ADCIRC_landuse/Inlet/landuse_basis/gap/beach_walls_2lands'
script = "runWALLrun.sh"
# setup and save to shelf
# set up saving
save_file = 'py_save_file'
timeseries_files = ["fort.63"]
nontimeseries_files = ["tinun.63", "maxvel.63", "maxele.63", "timemax63"]
# NoNx12/TpN where NoN is number of nodes and TpN is tasks per node, 12 is the
# number of cores per node See -pe line in submission_script <TpN>way<NoN x
# 12>
nprocs = 2 # number of processors per PADCIRC run
ppnode = 12
NoN = 44
num_of_parallel_runs = (ppnode*NoN)/nprocs # procs_pnode * NoN / nproc
main_run = rmw.runSet(grid_dir, save_dir, basis_dir, num_of_parallel_runs,
base_dir=adcirc_dir, script_name=script)
main_run.initialize_random_field_directories(num_procs=nprocs)
domain = dom.domain(grid_dir)
domain.update()
# Set samples
lam_domain = np.array([[.07, .15], [.1, .2]])
lam1 = np.linspace(lam_domain[0, 0], lam_domain[0, 1], 20)
lam2 = np.linspace(lam_domain[1, 0], lam_domain[1, 1], 20)
lam4 = 0.012
lam1, lam2, lam4 = np.meshgrid(lam1, lam2, lam4)
lam_samples = np.column_stack((lam1.ravel(), lam2.ravel(), lam4.ravel()))
mann_pts = lam_samples.transpose()
num_walls = 6
ymin = np.linspace(1500, -1500, num_walls)
xmin = 1420*np.ones(ymin.shape)
xmax = 1580*np.ones(ymin.shape)
ymax = 1500*np.ones(ymin.shape)
wall_height = -2.5*np.ones(ymax.shape)
# box_limits [xmin, xmax, ymin, ymax, wall_height]
wall_points = np.column_stack((xmin, xmax, ymin, ymax, wall_height))
wall_points = wall_points.transpose()
mann_pts = np.tile(mann_pts, num_walls)
# Run experiments
# MainFile_RandomMann
main_run.run_points(domain, wall_points, mann_pts, save_file,
num_procs=nprocs, procs_pnode=ppnode, ts_names=timeseries_files,
nts_names=nontimeseries_files)
|
{
"content_hash": "7b5e2fcf991fec75d5a80ad042deb16b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 30.808823529411764,
"alnum_prop": 0.7107398568019093,
"repo_name": "UT-CHG/PolyADCIRC",
"id": "441b6b4f55a385e22118a3de6c4501c6f69b4d05",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/run_framework/random_wall.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "FORTRAN",
"bytes": "92757"
},
{
"name": "Python",
"bytes": "314633"
}
],
"symlink_target": ""
}
|
import glob
import optparse
import re
def ParseTest(lines):
r"""Parses section-based test.
Args:
lines: list of \n-terminated strings.
Returns:
List of string pairs (field name, field content) in order. Field content is
concatenation of \n-terminated lines, so it's either empty or ends with \n.
"""
fields = []
field_data = {}
current_field = None
for line in lines:
if line.startswith(' '):
assert current_field is not None, line
field_data[current_field].append(line[2:])
else:
match = re.match('@(\S+):$', line)
if match is None:
raise Exception('Bad line: %r' % line)
current_field = match.group(1)
assert current_field not in field_data, current_field
field_data[current_field] = []
fields.append(current_field)
return [(field, ''.join(field_data[field])) for field in fields]
def SplitLines(lines, separator_regex):
"""Split sequence of lines into sequence of list of lines.
Args:
lines: sequence of strings.
separator_regex: separator regex.
Yields:
Nonempty sequence of (possibly empty) lists of strings. Separator lines
are not included.
"""
part = []
for line in lines:
if re.match(separator_regex, line):
yield part
part = []
else:
part.append(line)
yield part
def LoadTestFile(filename):
r"""Loads and parses .test file.
Args:
filename: filename.
Returns:
List of tests (see ParseTest).
"""
with open(filename) as file_in:
return map(ParseTest, SplitLines(file_in, r'-{3,}\s*$'))
def UnparseTest(items_list):
"""Convert test to sequence of \n-terminated strings
Args:
items_list: list of string pairs (see ParseTest).
Yields:
Sequence of \n-terminated strings.
"""
for field, content in items_list:
yield '@%s:\n' % field
if content == '':
continue
assert content.endswith('\n')
content = content[:-1]
for line in content.split('\n'):
yield ' %s\n' % line
def SaveTestFile(tests, filename):
r"""Saves .test file
Args:
tests: list of tests (see ParseTest).
filename: filename.
Returns:
None.
"""
with open(filename, 'w') as file_out:
first = True
for test in tests:
if not first:
file_out.write('-' * 70 + '\n')
first = False
for line in UnparseTest(test):
file_out.write(line)
def ParseHex(hex_content):
"""Parse content of @hex section and return binary data
Args:
hex_content: Content of @hex section as a string.
Yields:
Chunks of binary data corresponding to lines of given @hex section (as
strings). If line ends with r'\\', chunk is continued on the following line.
"""
bytes = []
for line in hex_content.split('\n'):
line, sep, comment = line.partition('#')
line = line.strip()
if line == '':
continue
if line.endswith(r'\\'):
line = line[:-2]
continuation = True
else:
continuation = False
for byte in line.split():
assert len(byte) == 2
bytes.append(chr(int(byte, 16)))
if not continuation:
assert len(bytes) > 0
yield ''.join(bytes)
bytes = []
assert bytes == [], r'r"\\" should not appear on the last line'
def AssertEquals(actual, expected):
if actual != expected:
raise AssertionError('\nEXPECTED:\n"""\n%s"""\n\nACTUAL:\n"""\n%s"""'
% (expected, actual))
class TestRunner(object):
SECTION_NAME = None
def CommandLineOptions(self, parser):
pass
def GetSectionContent(self, options, sections):
raise NotImplementedError()
def Test(self, options, items_list):
info = dict(items_list)
assert self.SECTION_NAME in info
content = self.GetSectionContent(options, info)
print ' Checking %s field...' % self.SECTION_NAME
if options.update:
if content != info[self.SECTION_NAME]:
print ' Updating %s field...' % self.SECTION_NAME
info[self.SECTION_NAME] = content
else:
AssertEquals(content, info[self.SECTION_NAME])
# Update field values, but preserve their order.
items_list = [(field, info[field]) for field, _ in items_list]
return items_list
def Run(self, argv):
parser = optparse.OptionParser()
parser.add_option('--bits',
type=int,
help='The subarchitecture to run tests against: 32 or 64')
parser.add_option('--update',
default=False,
action='store_true',
help='Regenerate golden fields instead of testing')
self.CommandLineOptions(parser)
options, args = parser.parse_args(argv)
if options.bits not in [32, 64]:
parser.error('specify --bits 32 or --bits 64')
if len(args) == 0:
parser.error('No test files specified')
processed = 0
for glob_expr in args:
test_files = sorted(glob.glob(glob_expr))
if len(test_files) == 0:
raise AssertionError(
'%r matched no files, which was probably not intended' % glob_expr)
for test_file in test_files:
print 'Testing %s...' % test_file
tests = LoadTestFile(test_file)
tests = [self.Test(options, test) for test in tests]
if options.update:
SaveTestFile(tests, test_file)
processed += 1
print '%s test files were processed.' % processed
|
{
"content_hash": "a74ccb8a12c0323b6c80abf51ac34fe6",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 80,
"avg_line_length": 25.635071090047393,
"alnum_prop": 0.6169347383989647,
"repo_name": "CTSRD-SOAAP/chromium-42.0.2311.135",
"id": "341d1f3366e4e063b8ca72b81117531fc847bd17",
"size": "5581",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "native_client/src/trusted/validator_ragel/test_format.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "241154"
},
{
"name": "C",
"bytes": "12370053"
},
{
"name": "C++",
"bytes": "266788423"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "813488"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "20131029"
},
{
"name": "Java",
"bytes": "8495790"
},
{
"name": "JavaScript",
"bytes": "12980966"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "208709"
},
{
"name": "Objective-C",
"bytes": "1509363"
},
{
"name": "Objective-C++",
"bytes": "7960581"
},
{
"name": "PLpgSQL",
"bytes": "215882"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "432373"
},
{
"name": "Python",
"bytes": "11147426"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1207731"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
__author__ = "Simone Campagna"
__all__ = [
'create_file_dir',
]
import os
def create_file_dir(filename):
dirname, filename = os.path.split(os.path.abspath(filename))
if not os.path.isdir(dirname):
os.makedirs(dirname)
return dirname
|
{
"content_hash": "b640f053a764bcd813837260f57e6818",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 21.25,
"alnum_prop": 0.6588235294117647,
"repo_name": "simone-campagna/sheru",
"id": "dff57dede037a9424378c66f26dc734af5a57069",
"size": "863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/sheru/files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "53950"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
"""
Misc. functions/utils
"""
import pandas as pd
import numpy as np
from hub.structures.frame import OdhType, OdhSeries
from hub.odhql.functions.core import VectorizedFunction
class NVL(VectorizedFunction):
"""
Gibt das zweite Argument zurück, falls das erste NULL ist.
Parameter
- `a`: Spalte oder Wert der auf NULL geprüft werden soll
- `b`: Spalte oder Wert der als Ersatz verwendet werden soll
Beispiel
.. code:: sql
NVL(ODH12.title, '') as title
"""
name = 'NVL'
def apply(self, a, b):
return a.where(~pd.isnull(a), b)
class Round(VectorizedFunction):
"""
Rundet auf die angegebene Anzahl Nachkommastellen.
Parameter
- `col`: Spalte oder Wert der gerundet werden soll. Muss vom Datentyp FLOAT sein.
- `decimals`: Anzahl Nachkommastellen, auf die gerundet werden soll.
Beispiel
.. code:: sql
ROUND(ODH20.fraction, 4) AS fraction
"""
name = 'ROUND'
def apply(self, col, decimals):
self.assert_float('column', col)
self.assert_int('decimals', decimals)
self.assert_value()
return self.expand(col).round(decimals)
class Cast(VectorizedFunction):
"""
Konvertiert den Datentyp einer Spalte oder eines einzelnen Wertes.
Parameter
- `values`: Spalte oder Wert der konvertiert werden soll.
- `datatype`: Gültiger ODHQL Datentyp
Beispiel
.. code:: sql
CAST(ODH42.age, 'INTEGER') AS age
"""
name = 'CAST'
def apply(self, values, datatype):
datatype = datatype.upper()
self.assert_in('type', datatype.upper(), OdhType.by_name.keys())
odh_type = OdhType.by_name[datatype]
with self.errorhandler('Unable to cast ({exception})'):
return odh_type.convert(self.expand(values))
class ToDate(VectorizedFunction):
"""
Konvertiert ein Datum in TEXT-Form zu DATETIME.
Parameter
- `values`: Spalte oder Wert der konvertiert werden soll.
- `format`: Format-Angabe. Siehe `Dokumentation <https://docs.python.org/2/library/time.html#time.strftime>`_.
Beispiel
.. code:: sql
TO_DATE(ODH5.baubeginn, '%d%m%Y') AS baubeginn
"""
name = 'TO_DATE'
def apply(self, values, format=None):
values = self.expand(values)
self.assert_str(0, values)
with self.errorhandler('Unable to parse datetimes ({exception})'):
return pd.to_datetime(values, format=format, infer_datetime_format=True, coerce=True)
class Range(VectorizedFunction):
"""
Liefert eine Sequenz von Ganzzahlen. Geeignet um beispielsweise künstliche IDs zu erstellen.
Parameter
- `start`: Erster Wert der Sequenz.
- `step`: Abstand zwischen den Ganzzahlen.
Beispiel
.. code:: sql
RANGE() AS id
"""
name = 'RANGE'
def apply(self, start=1, step=1):
self.assert_value('start', start)
self.assert_value('step', step)
self.assert_int('start', start)
self.assert_int('step', step)
stop = (start + self.num_rows) * step
return OdhSeries(np.arange(start, stop, step))
|
{
"content_hash": "253e2f73c88dfddd91ef421f57956a59",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 118,
"avg_line_length": 26.826446280991735,
"alnum_prop": 0.6275415896487985,
"repo_name": "hsr-ba-fs15-dat/opendatahub",
"id": "82bd35f8b54b0ec8dd2e499ac74183760cb23d99",
"size": "3274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/hub/odhql/functions/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25670"
},
{
"name": "HTML",
"bytes": "75529"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Makefile",
"bytes": "1995"
},
{
"name": "PLSQL",
"bytes": "9414"
},
{
"name": "Python",
"bytes": "334952"
},
{
"name": "Shell",
"bytes": "280"
},
{
"name": "TypeScript",
"bytes": "111755"
}
],
"symlink_target": ""
}
|
import itertools
import toyplot
import numpy as np
from .Render import split_rgba_style
from .NodeAssist import NodeAssist
from .TreeStyle import COLORS1
from .utils import ToytreeError
ITERABLE = (list, tuple, np.ndarray)
"""
Checks style dictionaries for types and size and builds out arguments
into arrays for variable styles.
"""
class StyleChecker:
"""
Checks for allowed styles in style dictionaries and expands args
for individuals styles into arrays while checking types.
"""
def __init__(self, ttree, style):
# input objects
self.ttree = ttree
self.style = style
# dimensions
self.ntips = self.ttree.ntips
self.nnodes = self.ttree.nnodes
self.nedges = self.ttree._coords.edges.shape[0]
# check dictionary inputs
self.check_dicts()
# expand to arrays and sometimes set 'fill' or 'stroke' in dicts.
self.expand_vars()
def check_dicts(self):
"""
Checks style dicts for allowable styles.
TODO: check for None and convert to "none"?
TODO: convert all fill/stroke to rgb and stroke-opacity.
"""
# check for allowable types
self.style.node_style = toyplot.style.require(
self.style.node_style, toyplot.style.allowed.marker)
self.style.edge_style = toyplot.style.require(
self.style.edge_style, toyplot.style.allowed.line)
self.style.edge_align_style = toyplot.style.require(
self.style.edge_align_style, toyplot.style.allowed.line)
self.style.tip_labels_style = toyplot.style.require(
self.style.tip_labels_style, toyplot.style.allowed.text)
self.style.node_labels_style = toyplot.style.require(
self.style.node_labels_style, toyplot.style.allowed.text)
sdicts = [
self.style.node_style,
self.style.edge_style,
self.style.edge_align_style,
self.style.tip_labels_style,
self.style.node_labels_style,
]
# check for None and split colors
for sdict in sdicts:
# next set the fill or stroke by splitting rgba to rgb and opacity
# and set the -opacity unless the unser explicitly set it already.
for coltype in ['fill', 'stroke']:
if sdict.get(coltype):
# convert to css color if not already
color = sdict[coltype]
try:
color = toyplot.color.css(color)
except Exception:
pass
# split the color
coldict = split_rgba_style({coltype: color})
sdict[coltype] = coldict[coltype]
# set the opacity of this type unless already set
okey = "{}-opacity".format(coltype)
if not sdict.get(okey):
sdict[okey] = coldict[okey]
def expand_vars(self):
"""
Expand args for individual styles into arrays of correct length. If
an indiv style was set with only one value then we instead set the
style dict fill or stroke (and opacity) with all properly using rgb
and opacity settings.
"""
self._assign_node_labels()
self._assign_node_colors()
self._assign_node_sizes()
self._assign_node_shapes()
self._assign_node_hover()
self._assign_tip_colors()
self._assign_tip_labels()
self._assign_tip_labels_angles()
self._assign_edge_colors()
self._assign_edge_widths()
self._assign_admixture_idxs()
def _assign_tip_colors(self):
"""
Sets .tip_colors as an array of length ntips. Only fills vals if there
is variation among tips, otherwise leaves empty to use style fill.
tip_colors=None | [None, None, ...]
tip_colors=ITERABLE | [col0, col1, col2, ...]
tip_colors=STR | [None, None, ...] & tip_labels_style['fill']
"""
arg = self.style.tip_labels_colors
# set it all empty
if arg is None:
self.style.tip_labels_colors = toyplot.broadcast.pyobject(None, self.ntips)
else:
self.style.tip_labels_colors = toyplot.color.broadcast(arg, self.ntips, None)
# if all the same then reset to None
if len(set([str(i) for i in self.style.tip_labels_colors])) == 1:
# save the fixed color and set to None
color = self.style.tip_labels_colors[0]
self.style.tip_labels_colors = toyplot.broadcast.pyobject(None, self.ntips)
# set edge_style stroke and stroke-opacity
sub = split_rgba_style({'fill': color})
self.style.tip_labels_style['fill'] = sub["fill"]
self.style.tip_labels_style['fill-opacity'] = sub["fill-opacity"]
else:
# reorder array for fixed if needed
if self.style.fixed_order:
self.style.tip_labels_colors = self.style.tip_labels_colors[self.ttree._fixed_idx]
def _assign_tip_labels(self):
"""
Sets .tip_labels as an array of length ntips.
tip_labels=None | ['a', 'b', 'c'] or fixed_order
tip_labels=True | ['a', 'b', 'c'] or fixed_order
tip_labels=False | [None, None, None]
tip_labels=['a', 'c', 'b'] | ['a', 'c', 'b']
"""
arg = self.style.tip_labels
# set it
if arg is False:
self.style.tip_labels = None
elif isinstance(arg, ITERABLE):
pass
else: # arg in [None, True]:
self.style.tip_labels = self.ttree.get_tip_labels()
# check length
self.style.tip_labels = toyplot.broadcast.pyobject(
self.style.tip_labels, self.ntips)
def _assign_tip_labels_angles(self):
"""
Sets .tip_labels_angles as an array of length ntips. No user
args for this one, it inherits from the layout type.
"""
# set circular layout
if self.style.layout == 'c':
# define range of tip radians
tip_radians = np.linspace(0, -np.pi * 2, self.ntips + 1)[:-1]
angles = np.array([np.rad2deg(abs(i)) for i in tip_radians]) * -1
elif self.style.layout == "u":
angles = -90
elif self.style.layout == "d":
angles = -90
elif self.style.layout == "l":
angles = 0
else:
angles = 0
self.style.__dict__['tip_labels_angles'] = toyplot.broadcast.scalar(angles, self.ntips)
def _assign_node_labels(self):
"""
Sets .node_labels array of length nnodes. In addition to being the
text labels on nodes these features can also affect whether node
marks will be shown. Suppressed markers are recorded with nan as
the value label. To show the node mark but with empty labels a None
value is used.
node_labels=None | [None, None, None,]
node_labels=False | [None, None, None,]
node_labels=True | [0, 1, 2, 3]
node_labels=ITERABLE | [100, 90, 95, ...]
node_labels=STR in .feat, e.g., "idx" | [nan, nan, 2, ..., nan]
node_labels=TUPLE, e.g., ("idx", 1, 0) | [0, 1, 2, ..., nan]
"""
arg = self.style.node_labels
# set it.
if arg is True:
self.style.node_labels = self.ttree.get_node_values("idx", 1, 1)[::-1]
elif arg is False:
self.style.node_labels = None
elif arg is None:
self.style.node_labels = None
elif isinstance(arg, (str, bytes)):
if arg in self.ttree.features:
self.style.node_labels = self.ttree.get_node_values(arg, 0, 0)[::-1]
elif isinstance(arg, tuple) and (len(arg) == 3):
if arg[0] in self.ttree.features:
self.style.node_labels = self.ttree.get_node_values(*arg)[::-1]
elif isinstance(arg, ITERABLE):
self.style.node_labels = arg[::-1]
else:
raise ToytreeError(
"node_labels argument not recognized: {}".format(arg))
# check length
self.style.node_labels = toyplot.broadcast.pyobject(
self.style.node_labels, self.nnodes)
def _assign_node_shapes(self):
"""
Sets .node_shapes as an array of length nnodes in levelorder
node_markers=None | ['o', 'o', 'o', ...]
node_markers=STR | ['o', 'o', 'o', ...]
node_markders=ITERABLE | ['o', 's', 'o', ...]
"""
arg = self.style.node_markers
if isinstance(arg, ITERABLE):
self.style.node_markers = arg[::-1]
elif isinstance(arg, (bytes, str)):
self.style.node_markers = arg
else:
self.style.node_markers = 'o'
# project to size
self.style.node_markers = toyplot.broadcast.pyobject(
self.style.node_markers, self.nnodes)
def _assign_node_sizes(self):
"""
Sets .node_sizes as an array of length nnodes in levelorder
node_sizes=None | [None, None, None, ...]
node_sizes=INT | [12, 12, 12, 12, ...]
node_sizes=ITERABLE | [5, 12, 20, 5, ...]
"""
#
arg = self.style.node_sizes
if isinstance(arg, ITERABLE):
self.style.node_sizes = arg[::-1]
elif isinstance(arg, (int, float)):
self.style.node_sizes = arg
else:
self.style.node_sizes = None
# project to size
self.style.node_sizes = toyplot.broadcast.pyobject(
self.style.node_sizes, self.nnodes)
# special: hide nodes with labels that are (TODO: nan) for now ("")
# but only if arg was None or INT, not if explicity list type.
if isinstance(arg, (int, float)):
self.style.node_sizes[self.style.node_labels == ""] = 0
def _assign_node_hover(self):
"""
Sets .node_hover as an array of length nnodes in levelorder.
node_hover=None | [None, None, None, ...]
node_hover=True | [node_idx=31\nname=blah\nfeat=x, ...]
node_hover="idx" | [31, 30, 29, 28, ...]
node_hover=ITERABLE | ['a', 'b', 'c', 'd', ...]
"""
#
arg = self.style.node_hover
if isinstance(arg, ITERABLE):
self.style.node_hover = arg[::-1]
elif arg is None:
self.style.node_hover = None
elif arg is False:
self.style.node_hover = None
elif arg is True:
ordered_features = ["idx", "dist", "support", "height"]
lfeatures = list(set(self.ttree.features) - set(ordered_features))
ordered_features += lfeatures
# build list of hoverstrings in order of idxs
self.style.node_hover = [" "] * self.ttree.nnodes
for idx in self.ttree.idx_dict:
feats = []
node = self.ttree.idx_dict[idx]
for feature in ordered_features:
val = getattr(node, feature)
if isinstance(val, float):
feats.append("{}: {:.4f}".format(feature, val))
else:
feats.append("{}: {}".format(feature, val))
self.style.node_hover[idx] = "\n".join(feats)
# already in levelorder from idx_dict iter
self.style.node_hover = self.style.node_hover
# project to size
self.style.node_hover = toyplot.broadcast.pyobject(
self.style.node_hover, self.nnodes)
# special: hide nodes with labels that are (TODO: nan) for now ("")
# node_labels is already in levelorder.
self.style.node_hover[self.style.node_labels == ""] = None
def _assign_node_colors(self):
"""
Sets .node_colors as an array of length nedges. Default is to
leave the list as None so that the default style fill all.
node_colors=None | [None, None, None, ...]
node_colors='red' | [None, None, ...] & sets node_style['fill']
node_colors=ITERABLE | ['red', 'blue', 'green']
"""
arg = self.style.node_colors
# set to empty
if arg is None:
self.style.node_colors = toyplot.broadcast.pyobject(arg, self.nnodes)
# fill array with whatever was provided
else:
self.style.node_colors = toyplot.color.broadcast(arg, self.nnodes)[::-1]
# if all the same then reset to None
if len(set([str(i) for i in self.style.node_colors])) == 1:
# save the fixed color and set to None
color = self.style.node_colors[0]
self.style.node_colors = toyplot.broadcast.pyobject(None, self.nnodes)
# set edge_style stroke and stroke-opacity
sub = split_rgba_style({'fill': color})
self.style.node_style['fill'] = sub["fill"]
self.style.node_style['fill-opacity'] = sub["fill-opacity"]
def _assign_edge_colors(self):
"""
Sets .edge_colors as an array of length nedges. Default is to
leave the list as None so that the default style stroke fills all.
edge_colors=None | [None, None, None, ...]
edge_colors='red' | [None, None, ...] & sets edge_style['fill']
edge_colors=ITERABLE | ['red', 'blue', 'green']
"""
arg = self.style.edge_colors
# set to empty
if arg is None:
self.style.edge_colors = toyplot.broadcast.pyobject(arg, self.nedges)
# fill array with whatever was provided
else:
self.style.edge_colors = toyplot.color.broadcast(arg, self.nedges)[::-1]
# if all the same then reset to None
if len(set([str(i) for i in self.style.edge_colors])) == 1:
# save the fixed color and set to None
color = self.style.edge_colors[0]
self.style.edge_colors = toyplot.broadcast.pyobject(None, self.nedges)
# set edge_style stroke and stroke-opacity
sub = split_rgba_style({'stroke': color})
self.style.edge_style['stroke'] = sub["stroke"]
# only set sub opacity if it is not already at 1, otherwise
# keep the edgestyledict opacity b/c user probably set it.
if self.style.edge_style["stroke-opacity"] == 1:
self.style.edge_style['stroke-opacity'] = sub["stroke-opacity"]
def _assign_edge_widths(self):
"""
Sets .edge_widths as an array of length nedges. Default is to
leave the list as None so that the default style stroke-width sets
all values. There is special option for entry "Ne" which will auto
scale edge withs by the range of values on TreeNodes.Ne
edge_widths=None | [None, None, None, ...]
edge_widths=2 | [None, None, ...] & edge_style[stroke-width]
edge_widths=ITERABLE | [2, 3, 3, 2, ...]
edge_widths='Ne' | [2, 2.5, 5, 2, ...]
"""
arg = self.style.edge_widths
# set to empty
if arg is None:
self.style.edge_widths = toyplot.broadcast.pyobject(arg, self.nedges)
# fill array with whatever was provided
else:
if str(arg) == "Ne":
try:
arg = self.ttree.get_edge_values("Ne", normalize=True)
except Exception:
arg = np.repeat(2, self.nedges)
self.style.edge_widths = toyplot.broadcast.pyobject(arg, self.nedges)[::-1]
# if all the same then reset to None
if len(set([str(i) for i in self.style.edge_widths])) == 1:
# save the fixed color and set to None
width = self.style.edge_widths[0]
self.style.edge_widths = toyplot.broadcast.pyobject(None, self.nedges)
# set edge_style stroke and stroke-opacity
self.style.edge_style['stroke-width'] = width
def _assign_admixture_idxs(self):
"""
Check the input admixture arg list and expand/check so
that final format is:
admixture_edges = [
(src_idx, dest_idx, (src_time, dest_time), styledict, label)
]
"""
arg = self.style.admixture_edges
# bail if empty
if arg is None:
return
# if tuple then expand into a list
if isinstance(arg, tuple):
arg = [arg]
colors = itertools.cycle(COLORS1)
next(colors)
admix_tuples = []
for atup in arg:
# required src, dest
if isinstance(atup[0], (str, list, tuple)):
nas = NodeAssist(self.ttree, atup[0], None, None)
node = nas.get_mrca()
if not node.is_root():
src = node.idx
else:
nas.match_reciprocal()
src = nas.get_mrca().idx
else:
src = int(atup[0])
if isinstance(atup[1], (str, list, tuple)):
nas = NodeAssist(self.ttree, atup[1], None, None)
node = nas.get_mrca()
if not node.is_root():
dest = node.idx
else:
nas.match_reciprocal()
dest = nas.get_mrca().idx
else:
dest = int(atup[1])
# optional: proportion on edges
if len(atup) > 2:
if isinstance(atup[2], (int, float)):
prop = float(atup[2])
else:
prop = (float(atup[2][0]), float(atup[2][1]))
else:
prop = 0.5
# optional: style dictionary
if len(atup) > 3:
style = dict(atup[3])
else:
style = {}
# optional label on midpoint
if len(atup) > 4:
label = str(atup[4])
else:
label = None
# color setting and correction
if "stroke" not in style:
style['stroke'] = next(colors)
if "stroke-opacity" not in style:
style['stroke-opacity'] = '0.7'
# colorfix to edge styles
colorfix = split_rgba_style(style.copy())
style['stroke'] = colorfix['stroke']
# check styledict colors, etc
admix_tuples.append((src, dest, prop, style, label))
self.style.admixture_edges = admix_tuples
|
{
"content_hash": "3a35dfefee8bb4054101396410f58efb",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 102,
"avg_line_length": 35.9078947368421,
"alnum_prop": 0.5341569387007277,
"repo_name": "eaton-lab/toytree",
"id": "63879548afb57d615e04174431bb4a6c001ff5e0",
"size": "19126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toytree/StyleChecker.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "10062307"
},
{
"name": "Python",
"bytes": "436929"
},
{
"name": "TeX",
"bytes": "52374"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='owner',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='article',
name='published',
field=models.BooleanField(default=True),
),
]
|
{
"content_hash": "3dd4ba80543d4b542264dc8b0e91787a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 88,
"avg_line_length": 26.44,
"alnum_prop": 0.6081694402420574,
"repo_name": "wahuneke/django-skd-smoke",
"id": "a065ffd48b38e255590fa5480ba61742abdcef4d",
"size": "685",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example_project/articles/migrations/0002_auto_20151009_1404.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "67851"
}
],
"symlink_target": ""
}
|
"""
On the Subject of Wires
:Copyright: 2015 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from ..templating import render_template
def render_page():
return render_template('wires')
|
{
"content_hash": "c2ffd048c46a77ade4cee1487413a415",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 40,
"avg_line_length": 17.5,
"alnum_prop": 0.7285714285714285,
"repo_name": "homeworkprod/better-bomb-defusal-manual",
"id": "46306def7698e812422ca2e05467d6c58c60dd91",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bombdefusalmanual/pages/wires.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15194"
},
{
"name": "Python",
"bytes": "38038"
}
],
"symlink_target": ""
}
|
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param preorder, a list of integers
# @param inorder, a list of integers
# @return a tree node
def buildTree(self, preorder, inorder):
lookup = {}
for i, num in enumerate(inorder):
lookup[num] = i
return self.buildTreeRecu(lookup, preorder, inorder, 0, 0, len(inorder))
def buildTreeRecu(self, lookup, preorder, inorder, pre_start, in_start, in_end):
if in_start == in_end:
return None
node = TreeNode(preorder[pre_start])
i = lookup[preorder[pre_start]]
node.left = self.buildTreeRecu(lookup, preorder, inorder, pre_start + 1, in_start, i)
node.right = self.buildTreeRecu(lookup, preorder, inorder, pre_start + 1 + i - in_start, i + 1, in_end)
return node
if __name__ == "__main__":
preorder = [1, 2, 3]
inorder = [2, 1, 3]
result = Solution().buildTree(preorder, inorder)
print result.val
print result.left.val
print result.right.val
|
{
"content_hash": "1e9eb762ce5a7b2b0cc52d67e7d7c36c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 111,
"avg_line_length": 34.90625,
"alnum_prop": 0.5989256938227395,
"repo_name": "jaredkoontz/leetcode",
"id": "e3196454d25ea8180353da88821cc51cff142b03",
"size": "1334",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/construct-binary-tree-from-preorder-and-inorder-traversal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1443094"
},
{
"name": "Go",
"bytes": "2824"
},
{
"name": "Java",
"bytes": "477246"
},
{
"name": "Python",
"bytes": "833880"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "13635"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import annoying.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ERP', '0024_auto_20170328_1757'),
]
operations = [
migrations.AlterField(
model_name='modelgridfield',
name='columns',
field=annoying.fields.JSONField(blank=True, default=None, null=True, verbose_name='\u8868\u5934'),
),
migrations.AlterField(
model_name='modelgridfield',
name='searches',
field=annoying.fields.JSONField(blank=True, default=None, null=True, verbose_name='\u641c\u7d22\u9879'),
),
]
|
{
"content_hash": "175e6491bb9566ae43659dce2cec7c4e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 116,
"avg_line_length": 28.625,
"alnum_prop": 0.61863173216885,
"repo_name": "Pandaaaa906/ChemErpSystem",
"id": "eca746229b4e31cce5e0227d92ebde8538ef44ed",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ERP/migrations/0025_auto_20170328_1758.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "889524"
},
{
"name": "HTML",
"bytes": "1572825"
},
{
"name": "JavaScript",
"bytes": "7586850"
},
{
"name": "PHP",
"bytes": "496"
},
{
"name": "Python",
"bytes": "216883"
},
{
"name": "Shell",
"bytes": "291"
}
],
"symlink_target": ""
}
|
from supybot.test import *
class NetworkTestCase(PluginTestCase):
plugins = ['Network', 'Utilities']
def testNetworks(self):
self.assertNotError('networks')
def testCommand(self):
self.assertResponse('network command %s echo 1' % self.irc.network,
'1')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
{
"content_hash": "52d36880f0593582ccfa7aa6679a2d02",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 75,
"avg_line_length": 26.785714285714285,
"alnum_prop": 0.648,
"repo_name": "buildbot/supybot",
"id": "bd0ab11bec917c24b9ccac95b73f2bd8199f188e",
"size": "1964",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "plugins/Network/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2026939"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import six
from .initializer import Initializer, Xavier, Constant
from .regularizer import WeightDecayRegularizer
__all__ = [
'ParamAttr',
'WeightNormParamAttr',
]
class ParamAttr(object):
"""
Parameter attributes object. To fine-tuning network training process, user
can set parameter's attributes to control training details. Such as learning rate,
regularization, trainable, do_model_average and the method to initialize param.
Args:
name(str): The parameter's name. Default None.
initializer(Initializer): The method to initial this parameter. Default None.
learning_rate(float): The parameter's learning rate. The learning rate when
optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`.
Default 1.0.
regularizer(WeightDecayRegularizer): Regularization factor. Default None.
trainable(bool): Whether this parameter is trainable. Default True.
gradient_clip(BaseGradientClipAttr): The method to clip this parameter's
gradient. Default None.
do_model_average(bool): Whether this parameter should do model average.
Default False.
Examples:
.. code-block:: python
w_param_attrs = fluid.ParamAttr(name="fc_weight",
learning_rate=0.5,
regularizer=fluid.L2Decay(1.0),
trainable=True)
y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs)
"""
def __init__(self,
name=None,
initializer=None,
learning_rate=1.0,
regularizer=None,
trainable=True,
gradient_clip=None,
do_model_average=False):
self.name = name
self.initializer = initializer
self.learning_rate = learning_rate
self.regularizer = regularizer
self.trainable = trainable
self.gradient_clip = gradient_clip
self.model_average = do_model_average
def _set_default_initializer(self, initializer):
"""
Set the default initializer, the initializer should be Constant,
Uniform, Normal, Xavier, MSRA.
Args:
initializer(Initializer): the initializer to set.
Returns:
None
"""
if initializer is None:
if self.initializer is None:
raise ValueError("ParamAttr.initializer is not set")
return
if self.initializer is not None:
return
self.initializer = initializer
def _set_default_param_initializer(self):
"""
Set the default initializer for the parameter with Xavier.
Args:
None.
Returns:
None.
"""
self._set_default_initializer(Xavier())
def _set_default_bias_initializer(self):
"""
Set the default initializer for the bias with Constant(0.0).
Args:
None.
Returns:
None.
"""
self._set_default_initializer(Constant(0.0))
@staticmethod
def _to_attr(arg):
"""
Create ParamAttr[s].
Args:
arg: Arguments to initialize ParamAttr[s]. arg's type can be
str, Initializer, float, WeightDecayRegularizer, BaseGradientClipAttr,
bool, ParamAttr, or a list of above type.
Returns:
ParamAttr[s]: ParamAttr[s] initialized with arg.
Raises:
arg can not initialize a ParamAttr.
"""
if arg is None:
return ParamAttr()
elif isinstance(arg, list) or isinstance(arg, tuple):
return [ParamAttr._to_attr(a) for a in arg]
elif isinstance(arg, ParamAttr):
return arg
elif isinstance(arg, six.string_types):
return ParamAttr(name=arg)
elif isinstance(arg, Initializer):
return ParamAttr(initializer=arg)
elif isinstance(arg, WeightDecayRegularizer):
return ParamAttr(regularizer=arg)
elif isinstance(arg, bool):
return ParamAttr._to_attr(None) if arg else False
else:
raise TypeError("{0} cast to ParamAttr".format(type(arg)))
def _to_kwargs(self, with_initializer=False):
"""
Returns the attributes of this parameter.
Args:
with_initializer(bool): Whether to add initializer attr.
Returns:
Parameter attributes(map): The attributes of this parameter.
"""
kwargs = {
'name': self.name,
'optimize_attr': {
'learning_rate': self.learning_rate
},
'regularizer': self.regularizer,
'trainable': self.trainable,
'gradient_clip_attr': self.gradient_clip,
'model_average': self.model_average
}
if with_initializer:
kwargs['initializer'] = self.initializer
return kwargs
class WeightNormParamAttr(ParamAttr):
"""
Used for weight Norm. Weight Norm is a reparameterization of the weight vectors
in a neural network that decouples the length of those weight vectors from
their direction. Weight Norm has been implemented as discussed in this
paper: `Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks
<https://arxiv.org/pdf/1602.07868.pdf>`_.
Args:
dim(list): The parameter's name. Default None.
kwargs: Any field in ParamAttr. Default None.
Examples:
.. code-block:: python
data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32")
fc = fluid.layers.fc(input=data,
size=1000,
param_attr=WeightNormParamAttr(
dim=None,
name='weight_norm_param'))
"""
# List to record the parameters reparameterized by weight normalization.
# If these parameters are treated as Variable rather than Parameter,
# it can be used to discriminate these parameters and help to serialize
# these paramters for inference.
params_with_weight_norm = []
def __init__(self, dim=None, **kwargs):
super(WeightNormParamAttr, self).__init__(**kwargs)
self.dim = dim
|
{
"content_hash": "b5eb5a4d6d7b4d6b8466bd730c9f8854",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 86,
"avg_line_length": 33.63076923076923,
"alnum_prop": 0.5867642573955474,
"repo_name": "QiJune/Paddle",
"id": "f0be794327f51cbbc4202b8b7b401b712b6d66a3",
"size": "7171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/paddle/fluid/param_attr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274815"
},
{
"name": "C++",
"bytes": "8855056"
},
{
"name": "CMake",
"bytes": "304904"
},
{
"name": "Cuda",
"bytes": "1181169"
},
{
"name": "Dockerfile",
"bytes": "8142"
},
{
"name": "Go",
"bytes": "109508"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "4527408"
},
{
"name": "Shell",
"bytes": "165381"
}
],
"symlink_target": ""
}
|
import simtk.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_bond_type import AbstractBondType
class G96BondType(AbstractBondType):
__slots__ = ['length', 'k', 'order', 'c']
@accepts_compatible_units(None, None,
length=units.nanometers,
k=units.kilojoules_per_mole * units.nanometers ** (-4),
order=None,
c=None)
def __init__(self, bondingtype1, bondingtype2,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-4),
order=1, c=False):
AbstractBondType.__init__(self, bondingtype1, bondingtype2, order, c)
self.length = length
self.k = k
class G96Bond(G96BondType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
length=0.0 * units.nanometers,
k=0.0 * units.kilojoules_per_mole * units.nanometers ** (-4),
order=1, c=False):
self.atom1 = atom1
self.atom2 = atom2
G96BondType.__init__(self, bondingtype1, bondingtype2,
length=length,
k=k,
order=order, c=c)
|
{
"content_hash": "3c686aa11ac07705fcb0281ac34a77df",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 85,
"avg_line_length": 36.513513513513516,
"alnum_prop": 0.5440414507772021,
"repo_name": "ctk3b/InterMol",
"id": "6adfadd978c0451b8d1d5b142aa9deac89d7a263",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intermol/forces/g96_bond_type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "156"
},
{
"name": "Python",
"bytes": "510036"
},
{
"name": "Shell",
"bytes": "795"
}
],
"symlink_target": ""
}
|
'''serializer.py: common python serializer for heron'''
from abc import abstractmethod
try:
import cPickle as pickle
except:
import pickle
import heronpy.api.cloudpickle as cloudpickle
class IHeronSerializer(object):
"""Serializer interface for Heron"""
@abstractmethod
def initialize(self, config):
"""Initializes the serializer"""
pass
@abstractmethod
def serialize(self, obj):
"""Serialize an object
:param obj: The object to be serialized
:returns: Serialized object as byte string
"""
pass
@abstractmethod
def deserialize(self, input_str):
"""Deserialize an object
:param input_str: Serialized object as byte string
:returns: Deserialized object
"""
pass
class PythonSerializer(IHeronSerializer):
"""Default serializer"""
def initialize(self, config=None):
pass
def serialize(self, obj):
return cloudpickle.dumps(obj)
def deserialize(self, input_str):
return pickle.loads(input_str)
default_serializer = PythonSerializer()
|
{
"content_hash": "fa5d9589a7b1a640a525f58a6d170fba",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 55,
"avg_line_length": 21.80851063829787,
"alnum_prop": 0.7141463414634146,
"repo_name": "ashvina/heron",
"id": "6a922f63b8ad915d5af9f1d6ab4e6ec9cbfca806",
"size": "1874",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "heronpy/api/serializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14063"
},
{
"name": "C++",
"bytes": "1710172"
},
{
"name": "CSS",
"bytes": "109554"
},
{
"name": "HCL",
"bytes": "2114"
},
{
"name": "HTML",
"bytes": "156836"
},
{
"name": "Java",
"bytes": "4695767"
},
{
"name": "JavaScript",
"bytes": "1112006"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Objective-C",
"bytes": "2143"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1635113"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "83215"
},
{
"name": "Shell",
"bytes": "162792"
},
{
"name": "Smarty",
"bytes": "528"
}
],
"symlink_target": ""
}
|
"""
This example is using the XKCD downloader (https://github.com/1995eaton/xkcd_downloader)
"""
import os
import numpy as np
import scipy.misc
import plusminus
import glob
# Basic example with black theme
class Basic(plusminus.PlusMinus):
def __init__(self):
plusminus.PlusMinus.__init__(self)
self.height = 1200 # window height in px
self.id, self.image = self.get_random_image() # initial image
self.status = None # initial status
self.leftmsg.emit("XKCD %d" %self.id)
def get_random_image(self):
os.system('./xkcd_downloader/xkcd_downloader.py --random 1 -d')
try:
filename = glob.glob('./*.png')[0]
except IndexError:
return self.get_random_image()
img = scipy.misc.imread(filename)
os.system('rm *.png')
if img.ndim == 3:
img = img[:,:,0]
return int(filename[2:-4]), np.rot90(np.invert(img).astype(np.float), -1)/255.
def first(self):
self.update.emit() # update canvas and other GUI elements
def next(self):
self.id, self.image = self.get_random_image() # next image
self.status = None # next status
self.leftmsg.emit("XKCD %d" %self.id)
self.update.emit() # update canvas and other GUI elements
def prev(self):
self.id, self.image = self.get_random_image() # prev image
self.status = None # prev status
self.leftmsg.emit("XKCD %d" %self.id)
self.update.emit() # update canvas and other GUI elements
def plus(self):
self.status = True # plus status
# Do Something here....
def minus(self):
self.status = False # minus status
# Do something here....
# Start plusminus GUI with random google image
basic = Basic()
plusminus.opengui(basic, theme='black')
# Remove files
os.system('rm *.png')
os.system('rm *.jpg')
|
{
"content_hash": "d08cfb20579ed053fb4feb06b150a6b3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 32.21666666666667,
"alnum_prop": 0.6032074495602691,
"repo_name": "daurer/plusminus",
"id": "d05c6da812bcbb9998120c9f9f0ed0b207d83508",
"size": "1955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/xkcd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8449"
}
],
"symlink_target": ""
}
|
c.auto_save.session = True
# Enable smooth scrolling for web pages. Note smooth scrolling does not
# work with the `:scroll-px` command.
# Type: Bool
c.scrolling.smooth = True
## The editor (and arguments) to use for the `open-editor` command. `{}`
## gets replaced by the filename of the file to be edited.
## Type: ShellCommand
# c.editor.command = ['gvim', '-f', '{file}', '-c', 'normal {line}G{column0}l']
c.editor.command = ["nvim-gui", "-f", "{file}", "-c", "'normal {line}G{column0}l'"]
## Open new tabs (middleclick/ctrl+click) in the background.
## Type: Bool
c.tabs.background = True
## Which tab to select when the focused tab is removed.
## Type: SelectOnRemove
## Valid values:
## - prev: Select the tab which came before the closed one (left in horizontal, above in vertical).
## - next: Select the tab which came after the closed one (right in horizontal, below in vertical).
## - last-used: Select the previously selected tab.
c.tabs.select_on_remove = 'last-used'
|
{
"content_hash": "c86a1dc4c31a4444dd866b4c93efb287",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 101,
"avg_line_length": 38.23076923076923,
"alnum_prop": 0.6911468812877264,
"repo_name": "mifix/dotfiles",
"id": "5fc2cea1e39046f2e88425fd1f21283d5f2c5bb3",
"size": "1099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qutebrowser/config.d/gerneral.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1634"
},
{
"name": "Makefile",
"bytes": "605"
},
{
"name": "Python",
"bytes": "147120"
},
{
"name": "Shell",
"bytes": "116973"
},
{
"name": "Vim script",
"bytes": "451462"
}
],
"symlink_target": ""
}
|
import pdfkit
import os
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from cStringIO import StringIO
def convert_pdf_to_txt(path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = file(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True):
interpreter.process_page(page)
text = retstr.getvalue()
fp.close()
device.close()
retstr.close()
return text
def show_menu():
print("I want to ")
print("1. input URL of the webpage")
print("2. input location (local) of an HTML file")
print("3. use default URL - http://computemagazine.com/man-who-invented-world-wide-web-gives-new-definition/\n")
def show_error():
print("You may not have a working internet connection :(")
def clear_screen():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def user_input(url):
clear_screen()
pdfkit.from_url(url, 'output.pdf')
print convert_pdf_to_txt('output.pdf')
def default():
url = "http://computemagazine.com/man-who-invented-world-wide-web-gives-new-definition/"
user_input(url)
def from_file(location):
clear_screen()
if location[-5:] != '.html':
print("Extension of the file should be .html -_-")
else:
pdfkit.from_file(location, 'output.pdf')
print convert_pdf_to_txt('output.pdf')
clear_screen()
show_menu()
choice = int(input("Enter choice (1, 2, 3) - "))
while choice not in {1, 2, 3}:
clear_screen()
print("Option not available! Please try again :)\n")
show_menu()
choice = int(input("Enter choice (1, 2, 3) - "))
clear_screen()
if choice == 1:
link = input("Input URL - ")
user_input(link)
elif choice == 2:
loc = input("Input file location - ")
from_file(loc)
elif choice == 3:
default()
|
{
"content_hash": "6ba0f12985cce04debb0c7f85f33a506",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 125,
"avg_line_length": 23.875,
"alnum_prop": 0.6509598603839442,
"repo_name": "samkit-jain/Textractor",
"id": "8d6259159123b3d1ec4b4720c68746062cf2717f",
"size": "2292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2292"
}
],
"symlink_target": ""
}
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy',parent_package,top_path)
config.add_subpackage('distutils')
config.add_subpackage('testing')
config.add_subpackage('f2py')
config.add_subpackage('core')
config.add_subpackage('lib')
config.add_subpackage('oldnumeric')
config.add_subpackage('numarray')
config.add_subpackage('fft')
config.add_subpackage('linalg')
config.add_subpackage('random')
config.add_subpackage('ma')
config.add_subpackage('matrixlib')
config.add_subpackage('doc')
config.add_data_dir('doc')
config.add_data_dir('tests')
config.make_config_py() # installs __config__.py
return config
if __name__ == '__main__':
print 'This is the wrong setup.py file to run'
|
{
"content_hash": "4047f58331fda074c6f0f556adce4b73",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 59,
"avg_line_length": 36.78260869565217,
"alnum_prop": 0.6867612293144209,
"repo_name": "chadnetzer/numpy-gaurdro",
"id": "52580ca4163751919f019d5af42252b0fca30843",
"size": "869",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "numpy/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4500127"
},
{
"name": "C++",
"bytes": "22396"
},
{
"name": "FORTRAN",
"bytes": "8946"
},
{
"name": "Python",
"bytes": "3741035"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
}
|
from flask import session
from flask_yoloapi import endpoint, parameter
from findex_gui.web import app
KEYS = [
"search_display_view"
]
VALUES = [
"table", "fancy"
]
@app.route("/api/v2/session/set", methods=["POST"])
@endpoint.api(
parameter("key", type=str, required=True),
parameter("val", type=str, required=False)
)
def api_session_set(key, val):
if key not in KEYS:
return Exception("key \"%s\" doesn't exist" % key)
if val not in VALUES:
return Exception("could not set val \"%s\" - doesn't exist" % val)
session[key] = val
return "session key set"
@app.route("/api/v2/session/get", methods=["POST"])
@endpoint.api(
parameter("key", type=str, required=True),
parameter("val", type=str, required=True)
)
def api_session_get(key, val):
if key not in KEYS:
return Exception("key \"%s\" doesn't exist" % key)
if key not in session:
session[key] = "fancy"
return session[key]
|
{
"content_hash": "5eefdd364c573dd37a4f408384359d1e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 24.846153846153847,
"alnum_prop": 0.6357069143446853,
"repo_name": "skftn/findex-gui",
"id": "3ef4ea1660af83b5f69cad63963b32cdfd581d1b",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findex_gui/controllers/session/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "344290"
},
{
"name": "HTML",
"bytes": "256465"
},
{
"name": "JavaScript",
"bytes": "690059"
},
{
"name": "Python",
"bytes": "249228"
}
],
"symlink_target": ""
}
|
"""Validate the locations in the downloaded checklists.
Validation Tests:
Location:
1. the location is a dict.
LocationIdentifier (optional):
1. identifier is a string.
2. identifier is set.
3. identifier does not have leading/trailing whitespace.
LocationName:
1. name is a string.
2. name is set.
3. name does not have leading/trailing whitespace.
LocationCounty (optional):
1. county is a string.
LocationRegion (optional):
1. region is a string.
LocationCountry:
1. country is a string.
LocationCoordinates
1. latitude and longitude are floats.
"""
from checklists_scrapers.tests.validation import checklists, ValidationTestCase
class Location(ValidationTestCase):
"""Validate the locations in the downloaded checklists."""
def setUp(self):
"""Initialize the test."""
self.locations = [(checklist['location'], checklist['source'])
for checklist in checklists]
def test_location_type(self):
"""Verify the locations field contains a dict."""
for location, source in self.locations:
self.assertIsInstance(location, dict, msg=source)
class LocationIdentifier(ValidationTestCase):
"""Validate the location identifier in the downloaded checklists.
This field is optional.
"""
def setUp(self):
"""Initialize the test."""
self.identifiers = [
(checklist['location']['identifier'], checklist['source'])
for checklist in checklists
if 'identifier' in checklist['location']
]
def test_identifier_type(self):
"""Verify the location identifier is a unicode string."""
for identifier, source in self.identifiers:
self.assertIsInstance(identifier, unicode, msg=source)
def test_identifier_set(self):
"""Verify the location identifier is set."""
for identifier, source in self.identifiers:
self.assertTrue(identifier, msg=source)
def test_identifier_stripped(self):
"""Verify the location identifier has no extra whitespace."""
for identifier, source in self.identifiers:
self.assertStripped(identifier, msg=source)
class LocationName(ValidationTestCase):
"""Validate the location name in the downloaded checklists."""
def setUp(self):
"""Initialize the test."""
self.locations = [(checklist['location'], checklist['source'])
for checklist in checklists]
def test_name_type(self):
"""Verify the location name is a unicode string."""
for location, source in self.locations:
self.assertIsInstance(location['name'], unicode, msg=source)
def test_name_set(self):
"""Verify the location name is set."""
for location, source in self.locations:
self.assertTrue(location['name'], msg=source)
def test_name_stripped(self):
"""Verify the location name has no extra whitespace."""
for location, source in self.locations:
self.assertStripped(location['name'], msg=source)
class LocationCounty(ValidationTestCase):
"""Validate the location county name in the downloaded checklists.
This field is optional.
"""
def setUp(self):
"""Initialize the test."""
self.counties = [(checklist['location']['county'], checklist['source'])
for checklist in checklists
if 'county' in checklist['location']]
def test_county_type(self):
"""Verify the location county is a unicode string."""
for county, source in self.counties:
self.assertIsInstance(county, unicode, msg=source)
class LocationRegion(ValidationTestCase):
"""Validate the location region name in the downloaded checklists.
This field is optional.
"""
def setUp(self):
"""Initialize the test."""
self.regions = [(checklist['location']['region'], checklist['source'])
for checklist in checklists
if 'region' in checklist['location']]
def test_region_type(self):
"""Verify the location county is a unicode string."""
for region, source in self.regions:
self.assertIsInstance(region, unicode, msg=source)
class LocationCountry(ValidationTestCase):
"""Validate the location country name in the downloaded checklists."""
def setUp(self):
"""Initialize the test."""
self.locations = [(checklist['location'], checklist['source'])
for checklist in checklists]
def test_country_type(self):
"""Verify the location country is a unicode string."""
for location, source in self.locations:
self.assertIsInstance(location['country'], unicode, msg=source)
class LocationCoordinates(ValidationTestCase):
"""Validate the latitude and longitude fields."""
def setUp(self):
"""Initialize the test."""
self.locations = [(checklist['location'], checklist['source'])
for checklist in checklists]
def test_latitude(self):
"""Verify the location latitude is a unicode string."""
for location, source in self.locations:
self.assertIsInstance(location['lat'], float, msg=source)
def test_longitude(self):
"""Verify the location longitude is a unicode string."""
for location, source in self.locations:
self.assertIsInstance(location['lon'], float, msg=source)
|
{
"content_hash": "e8aec4f1349c96916ca015efd1b6dc63",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 33.45238095238095,
"alnum_prop": 0.6366548042704626,
"repo_name": "StuartMacKay/checklists_scrapers",
"id": "340bcf887b75a3baee9bcfff571167cddd4096c9",
"size": "5620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "checklists_scrapers/tests/validation/test_location.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "173637"
},
{
"name": "Shell",
"bytes": "6728"
}
],
"symlink_target": ""
}
|
"""Setup script for PyGreSQL version 4.0
Authors and history:
* PyGreSQL written 1997 by D'Arcy J.M. Cain <darcy@druid.net>
* based on code written 1995 by Pascal Andre <andre@chimay.via.ecp.fr>
* setup script created 2000/04 Mark Alexander <mwa@gate.net>
* tweaked 2000/05 Jeremy Hylton <jeremy@cnri.reston.va.us>
* win32 support 2001/01 by Gerhard Haering <gerhard@bigfoot.de>
* tweaked 2006/02 and 2008/11 by Christoph Zwerschke <cito@online.de>
Prerequisites to be installed:
* Python including devel package (header files and distutils)
* PostgreSQL libs and devel packages (header files of client and server)
* PostgreSQL pg_config tool (usually included in the devel package)
(the Windows installer has it as part of the database server feature)
Tested with Python 2.5.2 and PostGreSQL 8.3.5. Older version should work
as well, but you will need at least Python 2.3 and PostgreSQL 7.4.
Use as follows:
python setup.py build # to build the module
python setup.py install # to install it
You should use MinGW (www.mingw.org) for building on Win32:
python setup.py build -c mingw32 install # use MinGW
Note that Python newer than version 2.3 is using msvcr71 instead of msvcrt
as its common runtime library. So, if you are using MinGW to build PyGreSQL,
you should edit the file "%MinGWpath%/lib/gcc/%MinGWversion%/specs"
and change the entry that reads -lmsvcrt to -lmsvcr71.
See docs.python.org/doc/install/ for more information on
using distutils to install Python programs.
"""
version = "4.0"
import sys
import os
if not (2, 2) < sys.version_info[:2] < (3, 0):
raise Exception("PyGreSQL %s requires a Python 2 version"
" newer than 2.2." % version)
import os
from distutils.core import setup
from distutils.extension import Extension
def pg_config(s):
"""Retrieve information about installed version of PostgreSQL."""
if os.path.exists("../../../../src/bin/pg_config/pg_config"):
f = os.popen("../../../../src/bin/pg_config/pg_config --%s" % s)
else:
"""If a VPATH build, it might not be there. Look other places"""
"""It should be the one in the path, because the makefile includes greenplum_path.sh """
f = os.popen("pg_config --%s" % s)
d = f.readline().strip()
if f.close() is not None:
raise Exception("pg_config tool is not available.")
if not d:
raise Exception("Could not get %s information." % s)
return os.getenv('DESTDIR','')+d
def mk_include():
"""Create a temporary local include directory.
The directory will contain a copy of the PostgreSQL server header files,
where all features which are not necessary for PyGreSQL are disabled.
"""
os.mkdir('include')
for f in os.listdir(pg_include_dir_server):
if not f.endswith('.h'):
continue
d = open(os.path.join(pg_include_dir_server, f)).read()
if f == 'pg_config.h':
d += '\n'.join(('',
'#undef ENABLE_NLS',
'#undef USE_REPL_SNPRINTF',
'#undef USE_SSL',
'#undef USE_ZLIB',
'#undef HAVE_STDINT_H',
'#undef HAVE_SYS_TIME_H',
'#undef HAVE_UNISTD_H',
'#define _CRT_SECURE_NO_WARNINGS 1',
'#define _USE_32BIT_TIME_T 1',
''))
open(os.path.join('include', f), 'w').write(d)
def rm_include():
"""Remove the temporary local include directory."""
if os.path.exists('include'):
for f in os.listdir('include'):
os.remove(os.path.join('include', f))
os.rmdir('include')
pg_include_dir = pg_config('includedir')
pg_include_dir_server = pg_config('includedir-server')
rm_include()
mk_include()
include_dirs = ['include', pg_include_dir, pg_include_dir_server]
pg_libdir = pg_config('libdir')
library_dirs = [pg_libdir]
libraries = ['pq']
extra_compile_args = ['-O2']
if sys.platform == "win32":
include_dirs.append(os.path.join(pg_include_dir_server, 'port/win32'))
elif sys.platform == 'darwin' and sys.maxsize > 2**32:
extra_compile_args.extend(['-arch', 'x86_64'])
setup(
name="PyGreSQL",
version=version,
description="Python PostgreSQL Interfaces",
long_description = ("PyGreSQL is an open-source Python module"
" that interfaces to a PostgreSQL database."
" It embeds the PostgreSQL query library to allow easy use"
" of the powerful PostgreSQL features from a Python script."),
keywords="postgresql database api dbapi",
author="D'Arcy J. M. Cain",
author_email="darcy@PyGreSQL.org",
url="http://www.pygresql.org",
download_url = "ftp://ftp.pygresql.org/pub/distrib/",
platforms = ["any"],
license="Python",
py_modules=['pg', 'pgdb'],
ext_modules=[Extension(
'_pg', ['pgmodule.c'],
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args
)],
classifiers=[
"Development Status :: 6 - Mature",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Database",
"Topic :: Database :: Front-Ends",
"Topic :: Software Development :: Libraries :: Python Modules"
]
)
rm_include()
|
{
"content_hash": "c7ed44980b1129cde798d1a0d9ced757",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 96,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6457760674363203,
"repo_name": "ashwinstar/gpdb",
"id": "205fd0df65e65dc9641c684527debc8a0b99bd6d",
"size": "5534",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "gpMgmt/bin/pythonSrc/PyGreSQL-4.0/setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12768"
},
{
"name": "C",
"bytes": "42705726"
},
{
"name": "C++",
"bytes": "2839973"
},
{
"name": "CMake",
"bytes": "3425"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11990"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "342783"
},
{
"name": "HTML",
"bytes": "653351"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229553"
},
{
"name": "M4",
"bytes": "114378"
},
{
"name": "Makefile",
"bytes": "455445"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLSQL",
"bytes": "160856"
},
{
"name": "PLpgSQL",
"bytes": "5722287"
},
{
"name": "Perl",
"bytes": "798287"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3267988"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "81695"
},
{
"name": "SQLPL",
"bytes": "313387"
},
{
"name": "Shell",
"bytes": "453847"
},
{
"name": "TSQL",
"bytes": "3294076"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "672568"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.conf import settings
from django.core import exceptions
from django.db import models
from django.db.models import permalink
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from model_utils.fields import MonitorField
from model_utils.models import TimeStampedModel
from dateutil.relativedelta import relativedelta
from audit_log.models import AuthStampedModel
from contacts.models import Person
from ananta.models import NextPrevMixin
from currencies.utils import (get_currency_choices, get_currency_words,
get_foreign_currency_choices)
class Source(models.Model):
name = models.CharField("Name", max_length=255)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class SourceMixin(models.Model):
SOURCE_CHOICES = (
(u'ahmedabad-1', _('Ahmedabad 1')),
(u'ahmedabad-2', _('Ahmedabad 2')),
(u'tovp-f-mayapur', _('TOVP Fundraising Mayapur')),
(u'tovp-exhibits', _('TOVP Exhibits')),
(u'baroda', _('Baroda')),
(u'bcs-vp-2015', _('BCS Vyasa Puja 2015')),
(u'bhagavata-saptaha-2015', _('Bhagavata Saptaha 2015')),
(u'bhakti-vriksa-kolkata-2016', _('Bhakti Vriksa Kolkata 2016')),
(u'braj-mohan-mumbai', _('Braj Mohan (Mumbai)')),
(u'delhi-vidyanagar-2015', _('Delhi Vidyanagar 2015')),
(u'gkg-vp-2015', _('GKG Vyasa Puja 2015')),
(u'gkg-vp-2016', _('GKG Vyasa Puja 2016')),
(u'ils-2016', _('ILS 2016')),
(u'iskcon-silicon-valley', _('ISKCON Silicon Valley (ISV)')),
(u'j-w-marriot', _('J W Marriot')),
(u'jps-office', _('JPS Office')),
(u'jps-others', _('JPS Others')),
(u'kolkata-nityananda-yatra-2017', _('Kolkata Nityananda Yatra 2017')),
(u'kanjurmarg-mumbai-2015', _('Kanjurmarg Mumbai 2015')),
(u'lm-reception', _('Life Membership Reception')),
(u'mayapur-community', _('Mayapur Community')),
(u'mso', _('MSO')),
(u'mumbai-yatra-2016', _('Mumbai Yatra 2016')),
(u'namahatta', _('JPS Namahatta')),
(u'botswana-2016', _('Botswana 2016')),
(u'south-afrika-2016', _('South Afrika 2016')),
(u'neel-vasan-das', _('Neel Vasan Das')),
(u'nigdi-2016.', _('Nigdi 2016.')),
(u'nityananda', _('Nityananda Tour')),
(u'nvs', _('Nava Yogendra Swami')),
(u'other', _('Other')),
(u'prabhupada-currency-inr', _('Prabhupada Currency INR')),
(u'pune-group-mayapur-2015', _('Pune Group Mayapur 2015')),
(u'pune-yatra-2016', _('Pune Yatra 2016')),
(u'rns-kartik-yatra', _('RNS Kartik Yatra')),
(u'rohini-narayani', _('Rohini (Sri Narayani Devi Dasi)')),
(u'surat-1', _('Surat 1')),
(u'surat-2', _('Surat 2')),
(u'vallabha-vidyanagar', _('Vallabha Vidyanagar')),
(u'vrindavan-booth', _('Vrindavan Booth 2015')),
(u'vrindavan-booth-2016', _('Vrindavan Booth 2016')),
(u'vvps-vp-2015', _('Vedavyasapriya Swami Vyasa Puja 2015')),
)
source_old = models.CharField("Source", max_length=30, default='',
choices=SOURCE_CHOICES, blank=True)
source = models.ForeignKey(Source, verbose_name="Source", blank=True, null=True)
class Meta:
abstract = True
class Pledge(TimeStampedModel, AuthStampedModel, NextPrevMixin, SourceMixin):
"""
Through Pledge model you can keep track of all its contributions and
associated promotions. Contribution and promotions cannot exist without its
pledge.
"""
NEXT_PAYMENT_GRACE_PERIOD = 14
def reindex_related(self):
"""
Function which returns list models to be reindex when model is updated.
"""
related = []
for contribution in self.contributions.all():
related.append(contribution)
for promotion in self.assigned_promotions:
related.append(promotion)
return related
# Foreign key to Person (Contact)
person = models.ForeignKey(Person, verbose_name="Person", blank=True,
related_name='pledges')
# Foreign key for user who is assigned for Follow Up of this Pledge
followed_by = models.ForeignKey(settings.AUTH_USER_MODEL, null=True,
blank=True, related_name='pledges')
amount = models.DecimalField(_('Amount'), max_digits=20, decimal_places=2)
amount_paid = models.DecimalField(_('Amount Paid'), max_digits=20,
default=0, decimal_places=2,
null=True, blank=True)
currency = models.CharField("Currency", max_length=6,
choices=get_currency_choices(), default="INR")
# Date from which person supose to start pay for this pledge. Help us to
# recognize which pledge is late with payments. Is for example used as
# filter on Follow Up page.
payments_start_date = models.DateField(
_("Payments Start"), null=True, blank=True, default=datetime.now,
help_text=_('Date of first expected payment for this pledge.'),
)
# Interval helps us to determine expected date of next payment
INTERVAL_CHOICES = (
(u'1', _('1 month')),
(u'2', _('2 months')),
(u'3', _('3 months')),
(u'4', _('4 months')),
(u'6', _('6 months')),
(u'12', _('12 months')),
)
interval = models.CharField(
_("Payments Interval"), max_length=30, choices=INTERVAL_CHOICES,
default=u'1', help_text=_("Enter planned interval of payments "
"(e.g. 1 month)"),
)
# Todo: Number of instalments - Currently doesn't do anything.
number_of_instalments = models.IntegerField(
_('Number of instalments'), default=1,
help_text=_('If somebody knows in how many instalment they would like '
'to pay the pledge.'))
# Status of the pledge
STATUS_CHOICES = (
# New pledge without any payments
(u'pending', _('Pledged')),
# Pledge is partialy paid (0% > paid <100%)
(u'partial', _('Partially Paid')),
# Fully paid pledge (paid >= 100%)
(u'completed', _('Completed')),
(u'failed', _('Shadow')),
(u'canceled', _('Canceled')),
)
status = models.CharField("Status", max_length=30, default='pending',
choices=STATUS_CHOICES, blank=True)
# Keeps track when Status field got changed
status_changed = MonitorField(monitor='status')
# Expected date for next payment
next_payment_date = models.DateField(
_("Next Payment Date"), null=True, blank=True,
help_text=_('Date of next expected payment.'),
)
@cached_property
def assigned_promotions(self):
"""
Returns all promotions (e.g. Golden Brick, Silver Coin) for person
"""
from promotions.models import promotions
assigned_promotions = []
for promotion_class in promotions:
for promotion in promotion_class.objects.all(). \
filter(pledge=self):
assigned_promotions.append(promotion)
self.cache_assigned_promotions = assigned_promotions
return self.cache_assigned_promotions
@property
def progress(self):
"""
Return pledge's payment progress in percents.
"""
if self.amount_paid and self.amount:
return self.amount_paid / self.amount * 100
return 0
@permalink
def get_absolute_url(self):
return ('contributions:pledge:detail', None, {
'person_id': self.person.pk,
'pk': self.pk})
def info(self):
"""
Generates one line summary about pledge
"""
return 'Pledged {amount}{currency} - {progress:.2f}% completed'. \
format(amount=self.amount, currency=self.get_currency_display(),
progress=self.progress, status=self.get_status_display())
def _calculate_amount_paid(self):
"""
Calculates how much of pledge is paid.
"""
total = 0
for contribution in self.contributions.all(). \
filter(status='completed'):
total += contribution.amount
self.amount_paid = total
def update_next_payment_date(self):
"""
Update pledge's next_payment_date based on latest contribution and
payment interval plus grace period (NEXT_PAYMENT_GRACE_PERIOD).
"""
latest = self.contributions.all().order_by('-cleared_on')[:1]
interval = int(self.interval)
# if there is any contribution dated after payment start
if latest.count() and latest[0].cleared_on and (latest[0].cleared_on > self.payments_start_date):
self.next_payment_date = latest[0].cleared_on + \
relativedelta(months=interval)
else:
self.next_payment_date = self.payments_start_date + \
relativedelta(months=interval) + \
relativedelta(days=self.NEXT_PAYMENT_GRACE_PERIOD)
return self.next_payment_date
def has_late_payment(self):
"""
Returns True if pledge is in late payment
"""
self.update_next_payment_date()
if self.next_payment_date > datetime.date(datetime.now()):
return False
return True
def save(self, **kwargs):
self._calculate_amount_paid()
# set pledge's status depending on ammount paid
if not self.amount_paid:
self.status = 'pending'
elif self.amount_paid < self.amount:
self.status = 'partial'
else:
self.status = 'completed'
# set next payment should be expected
self.update_next_payment_date()
super(Pledge, self).save()
def __str__(self):
return '{amount}{currency} ({progress:.2f}%)'.format(
amount=self.amount, currency=self.get_currency_display(),
progress=self.progress)
def can_user_delete(self, user):
"""
Check if user is able to delete the pledge. Some users can delete any
pledge, some for safety can delete only pledges which has no assigned
contributions.
"""
if user.has_perm('contributions.delete_pledge'):
return True
if (user.has_perm('contributions.can_delete_if_no_contributions') and
not self.contributions.all().count()):
return True
return False
def assign_follow_up(self, user):
"""
Assigns user for Follow Up of the pledge or unset follow up if followed
by other user.
"""
if not (self.followed_by and self.followed_by != user):
if self.followed_by:
self.followed_by = None
else:
self.followed_by = user
self.save()
class Meta:
permissions = (("can_delete_if_no_contributions",
"Can delete if no contributions"),)
class BaseContributionMixin(TimeStampedModel, AuthStampedModel, NextPrevMixin,
SourceMixin):
"""
Mixin to define base fields which can be shared for Contributions and
BulkPayments.
"""
tmp_serial = None
# fields to save serial number for contributions for which are not from
# the books/slips and we should generate receipt for
serial_year = models.CharField(
_('Serial Number Year'), max_length=5, blank=True,
help_text=_('Serial Number Year of this contribution.'))
serial_number = models.CharField(
_('Serial Number'), max_length=5, blank=True,
help_text=_('Serial Number of this contribution for financial year.'))
# serial_number_int = models.IntegerField(
# _('Serial Number Int'), null=True, blank=True, default=None,
# help_text=_('Serial Number of this contribution for financial year.'))
amount = models.DecimalField(_('Amount'), max_digits=20, decimal_places=2)
currency = models.CharField(
"Currency", max_length=6, choices=get_currency_choices(), default="INR")
# Whenever is cash coming from foreign currency (other than INR) we note
# it here for reference
foreign_amount = models.DecimalField(_('Foreign Amount'), max_digits=20,
decimal_places=2, blank=True,
null=True)
foreign_currency = models.CharField(
"Foreign Currency", max_length=6, choices=get_foreign_currency_choices(),
default="INR", help_text=_('Please fill if donation is coming from'
'foreign currency.'))
PAYMENT_METHOD_CHOICES = (
(u'cashl', _('Cash (Indian)')),
(u'cashf', _('Cash (Foreign)')),
(u'cashd', _('Cash Deposit')),
(u'ccdcsl', _('Credit/Debit Card Swipe Local')),
(u'ccdcsf', _('Credit/Debit Card Swipe Foreign')),
(u'neftl', _('NEFT (Indian)')),
(u'neftf', _('NEFT/Bank Transfer (Foreign)')),
(u'chequel', _('Cheque (Indian)')),
(u'chequef', _('Cheque (Foreign)')),
(u'chequed', _('Cheque Deposit')),
(u'paypal', _('Paypal')),
(u'axis', _('Gateway Axis (Internet)')),
(u'treasury', _('ISKCON Treasury')),
(u'bulk', _('Part of the Bulk Payment')),
)
payment_method = models.CharField(
"Payment Method", max_length=16, choices=PAYMENT_METHOD_CHOICES)
transaction_id = models.CharField(
_('Transaction ID or Cheque No'), max_length=100, blank=True,
help_text=_('Transaction ID of this contribution or cheque number.'))
bank = models.CharField(
_('Bank'), max_length=100, blank=True,
help_text=_('Write bank name (and possible branch or location) '
'for cheque'))
dated = models.DateField(
_("Dated"), null=True, blank=True,
help_text=_('Enter date on the cheque')
)
receipt_date = models.DateField(
_("Receipt Date"), null=True, blank=True,
help_text=_("Enter date which should be on the receipt.")
)
cleared_on = models.DateField(
_("Cleared On"), null=True, blank=True,
help_text=_('Enter date when transaction was completed '
'(money came to TOVP)')
)
STATUS_CHOICES = (
(u'pending', _('Pending')),
(u'completed', _('Completed')),
(u'failed', _('Failed')),
(u'canceled', _('Canceled')),
)
status = models.CharField("Status", max_length=30, choices=STATUS_CHOICES)
status_changed = MonitorField(monitor='status')
# If contribution is entered from receipt book we enter book number
book_number = models.CharField(
_('Book Number'), max_length=20, blank=True,
help_text=_('Enter if you are entering contribution from book'))
# and slip number
slip_number = models.CharField(
_('Slip Number'), max_length=20, blank=True,
help_text=_('Enter if you are entering contribution from slip'))
# If contribution is paid on behalf of somebody we may need to overwrite
# any of name, address, PAN card to show on receipt instead of Contact
overwrite_name = models.CharField(
_("Name who pays on behalf of main contact"), max_length=255,
blank=True)
overwrite_address = models.CharField(
_("Address who pays on behalf of main contact"), max_length=255,
blank=True)
overwrite_pan_card = models.CharField(
_('Overwrite PAN card number'), max_length=50, blank=True, null=True)
note = models.TextField(_("Note"), max_length=255, blank=True)
def __init__(self, *args, **kwargs):
super(BaseContributionMixin, self).__init__(*args, **kwargs)
if self.serial_year:
self._serial_year = self.serial_year
self._serial_number = self.serial_number
else:
self._serial_year = None
self._serial_number = None
@classmethod
def get_serial_number_prefix(cls, completed=None):
"""
Generates serial_number_prefix based on variables from Model class
"""
if completed:
return cls.serial_number_prefix_completed
return cls.serial_number_prefix_temporary
def get_serial_number(self):
"""
Generates full serial number.
"""
if self.book_number:
return '{book}/{slip}'.format(book=self.book_number,
slip=self.slip_number)
elif self.serial_year and self.serial_number:
number = '%05d' % int(self.serial_number)
if self.status == 'completed':
prefix = self.get_serial_number_prefix(completed=True)
else:
prefix = self.get_serial_number_prefix(completed=None)
atg = ''
if self.status == 'completed' and self.overwrite_pan_card != 'hide' and (self.overwrite_pan_card or self.pledge.person.pan_card_number):
atg = '80G/'
return '{prefix}/{year}/{atg}{number}'.format(prefix=prefix,
year=self.serial_year,
atg=atg,
number=number)
return ''
def generate_serial_year(self):
"""
Serial number year is generated based on India financial year.
E.g. April - March
"""
if self.receipt_date:
date = self.receipt_date
if date.month < 4:
year = date.year - 2001
else:
year = date.year - 2000
return '%d-%d' % (year, year + 1)
def clean(self):
errors = {}
# Strip all whitespace
for field in ['transaction_id']:
if self.__dict__[field]:
self.__dict__[field] = self.__dict__[field].strip()
if self.status == 'completed' and not self.cleared_on:
msg = _("There must be date for completed transaction")
errors['cleared_on'] = [msg]
if (not (self.receipt_date or self.cleared_on) and
self.payment_method != 'paypal'):
msg = _("You have to fill this when there is no Cleared On date.")
errors['receipt_date'] = [msg]
# transaction id is required for cheque or credit/debit cards payments
if not self.transaction_id:
if self.payment_method in ['cheque', 'ccdcsl', 'ccdcsf']:
if self.payment_method in ['ccdcsl', 'ccdcsf']:
msg = _("You have to fill Transaction ID for Credit/Debit "
"card payment.")
errors['transaction_id'] = [msg]
if self.payment_method == 'cheque':
msg = _("You have to fill Cheque Number")
errors['transaction_id'] = [msg]
self.ensure_book_and_slip_number()
self.ensure_serial_number_not_generated()
if errors:
raise exceptions.ValidationError(errors)
def ensure_book_and_slip_number(self):
"""
Make sure that none or both book and slip number is entered
"""
if bool(self.book_number) != bool(self.slip_number):
msg = _("There must be both book number and slip number")
if self.book_number:
raise exceptions.ValidationError({'slip_number': [msg]})
else:
raise exceptions.ValidationError({'book_number': [msg]})
@property
def currency_words(self):
return get_currency_words(self.currency)
def info(self, show_name=None):
"""
Generates one line summary
"""
field_values = [
'#' + str(self.pk),
str(self.amount),
self.currency,
'(%s)' % self.get_payment_method_display(),
self.get_status_display()
]
return ' - '.join(filter(bool, field_values))
def save(self, **kwargs):
if self.cleared_on and not self.receipt_date:
self.receipt_date = self.cleared_on
print(self.receipt_date)
if not self.receipt_date:
if self.pk:
self.receipt_date = self.created
else:
self.receipt_date = datetime.now()
if self._serial_year and not self.tmp_serial:
self.serial_year = self._serial_year
self.serial_number = self._serial_number
super(BaseContributionMixin, self).save()
class Meta:
abstract = True
class BulkPayment(BaseContributionMixin):
"""
BulkPayment is used for collectors, like when somebody brings cash for
multiple people, or make bank transfer on behalf of other people.
"""
person = models.ForeignKey(Person, verbose_name="Person", blank=True,
related_name='bulk_payments')
# There are two types of receipts:
#
# Official receipt: collector gets official receipt which can be used for
# accounting purposes. Donors under such bulk payment will not be able to
# receive official receipts, only aknowledgement. Official receipts will be
# used most probably for everything else then cash.
#
# (e.g.: Collector transfers 1,00,000 by bank on behalf of 2 donors of 50000
# each. Collector gets official receipt based on bank tranfer and thus
# individual donors cannot get official receipt anymore)
#
# Aknowldgement: confirmation for collector that he gave funds to us on
# behalf of donors, but it cannot be used for accounting purposes as each
# donor under bulk payment will get own official receipt.
#
# (e.g: Collector brings 1,00,000 in cash on behalf of 2 donors of 50000
# each. Collector gets aknowledgement so he has prove he gave cash to us,
# but he cannot use it in his accounting for tax benefits, and we generate
# official receipts for both of donors for 50000 each which they can than
# use in their accounting)
RECEIPT_TYPE_CHOICES = (
('official', _('Official')),
('acknowledgement', _('Not official / Acknowledgement')),
)
receipt_type = models.CharField(
"Receipt Type", max_length=100, choices=RECEIPT_TYPE_CHOICES)
def get_serial_number_prefix(self, completed=None):
"""
Generated serial_number_prefix based on receipt type and status of the
payment
"""
if self.receipt_type == 'official':
if completed:
return 'BULK-TOVP'
return 'BULK-TMP'
else:
return 'BULK-INTRA'
def get_deposit_status(self):
"""
Generates info of how many associated contributions are deposited.
Is used for cash.
"""
status = ''
deposited = 0
not_deposited = 0
for contribution in self.contributions.all():
if contribution.deposited_status == 'deposited':
deposited += 1
else:
not_deposited += 1
if deposited and not not_deposited:
status = 'success'
else:
status = 'danger'
return '<div class="btn btn-%s btn-xs">%d of %d deposited</div>' % (status, deposited, deposited + not_deposited)
@permalink
def get_absolute_url(self):
return ('contributions:bulk_payment:detail', None, {
'person_id': self.person.pk,
'pk': self.pk})
def get_serial_number(self):
"""
Generates full serial number.
"""
if self.book_number:
return '{book}/{slip}'.format(book=self.book_number,
slip=self.slip_number)
elif self.serial_year and self.serial_number:
number = '%05d' % int(self.serial_number)
if self.status == 'completed':
prefix = self.get_serial_number_prefix(completed=True)
else:
prefix = self.get_serial_number_prefix(completed=None)
atg = ''
if self.status == 'completed' and self.overwrite_pan_card != 'hide' and (self.overwrite_pan_card or self.person.pan_card_number):
atg = '80G/'
return '{prefix}/{year}/{atg}{number}'.format(prefix=prefix,
year=self.serial_year,
atg=atg,
number=number)
return ''
def ensure_serial_number_not_generated(self):
"""
Used for validation. Once contribution have serial number, we cannot
change and use book and slip number. It is done like this to ensure
there are no missing serial numbers/receipts for audit and to make sure
that after giving receipt to donor we have same serial in our database.
"""
if self._serial_number and self.book_number:
msg = _("This contribution has already serial number generated, "
"You cannot add book and slip numbers anymore.")
raise exceptions.ValidationError({'book_number': [msg]})
def __str__(self):
field_values = (
'#' + str(self.pk),
self.person.mixed_name,
str(self.amount),
str(self.receipt_date),
# '(%s)' % self.get_payment_method_display()
)
return ' - '.join(filter(bool, field_values))
def save(self, **kwargs):
if not (self.book_number or self.serial_number):
if self.receipt_date:
date = self.receipt_date
year = date.year
if date.month < 4:
year -= 1
if year > 2014:
self.serial_year = self.generate_serial_year()
self.serial_number = len(
self.__class__.objects.all().
filter(receipt_type=self.receipt_type).
filter(serial_year=self.serial_year)) + 1
super(BulkPayment, self).save()
class Contribution(BaseContributionMixin):
"""
Contribution is used for individual contributions for donors.
"""
serial_number_prefix_completed = 'TOVP'
serial_number_prefix_temporary = 'TMP'
# Foreign key to pledge
pledge = models.ForeignKey(Pledge, verbose_name="Pledge",
related_name='contributions')
# We have various receipt types:
#
# Mayapur Receipt: for transaction in India where Mayapur TOVP office gives
# official receipt which can be used for accounting purposes/tax deduction
# in India.
#
# USA Receipt: for transaction which comes through USA office, can be used
# for accounting purposes/tax decuction in USA.
#
# External Receipt: for any transactions where receipt is given by others
# then above two.
#
# Examples for external receipts:
# - when official receipt was given by ISKCON Tirupati, we cannot produce
# another official receipt here as there would be duplicate donation with
# only one payment
# - when contribution is part of bulk payment (with Official receipt),
# e.g: when collector transfered payment by bank so he is one who got
# official receipt and individual donors for each contribution cannot get
# official receipt anymore only aknowledgement
RECEIPT_TYPE_CHOICES = (
('mayapur-receipt', _('Mayapur Receipt')),
('usa-receipt', _('USA Receipt')),
('external-receipt', _('External / Non Receipt')),
)
receipt_type = models.CharField(
"Receipt Type", max_length=100, choices=RECEIPT_TYPE_CHOICES,
default='external-receipt',
)
collector = models.ForeignKey(
Person, verbose_name="Collector", blank=True, null=True,
related_name='collector_contributions',
help_text='If this is comes through collector.')
# In case contribution is part of bulk payment we will use foreign key to
# specific bulk payment
bulk_payment = models.ForeignKey(
BulkPayment, verbose_name="Bulk Payment", blank=True, null=True,
related_name='contributions',
help_text=_('If this contribution is part of bulk payment please choose'
'it here.'))
# Keeps track of deposited status, where first logical state is
# 'Not deposited' and final state is 'Deposited'. Is used to keep track
# which cash transactions needs to be deposited in treasury
DEPOSITED_STATUS_CHOICES = (
('not-deposited', _('Not deposited')),
('ready-to-deposit', _('Ready to deposit')),
('deposited', _('Deposited')),
)
deposited_status = models.CharField(
"Is Deposited", max_length=20, choices=DEPOSITED_STATUS_CHOICES,
default="not-deposited")
deposited_status_changed = MonitorField(monitor='deposited_status')
# Deposited status flow used by widget for changing status state.
DEPOSITED_STATUS_FLOW = {
'not-deposited': 'ready-to-deposit',
'ready-to-deposit': 'deposited',
'deposited': 'not-deposited',
}
def info(self, show_name=None):
field_values = [
'#' + str(self.pk),
self.receipt_date.strftime("%B %-d, %Y"),
str(self.amount),
self.currency,
'(%s)' % self.get_payment_method_display(),
self.get_status_display()
]
if self.bulk_payment:
field_values.append('[%s]' % self.get_deposited_status_display())
return ' - '.join(filter(bool, field_values))
def change_deposited_status(self, user):
"""
Changes deposit status if user us capable to do so based on permissions.
"""
next_status = self.DEPOSITED_STATUS_FLOW[self.deposited_status]
if next_status == 'deposited':
if user.has_perm('contributions.can_deposit'):
self.deposited_status = next_status
else:
self.deposited_status = 'not-deposited'
if next_status == 'ready-to-deposit':
self.deposited_status = next_status
if next_status == 'not-deposited':
if user.has_perm('contributions.can_deposit'):
self.deposited_status = next_status
self.save()
# todo: seems it is possible to remove is_external from everythere as
# it is replaced receipt type
is_external = models.BooleanField(
_('Non Mayapur TOVP receipt'), default=False, db_index=True,
help_text='This MUST be checked if other than India TOVP receipt '
'was given.')
def __init__(self, *args, **kwargs):
super(Contribution, self).__init__(*args, **kwargs)
if self.pk:
self._original_pledge = self.pledge
else:
self._original_pledge = None
def reindex_related(self):
"""
Function which returns list models to be reindex when model is updated.
"""
related = []
if self.bulk_payment:
related.append(self.bulk_payment)
return related
def ensure_serial_number_not_generated(self):
if self._serial_number and self.book_number:
msg = _("This contribution has already serial number generated, "
"You cannot add book and slip numbers anymore.")
raise exceptions.ValidationError({'book_number': [msg]})
if self._serial_number and self.receipt_type == 'external-receipt':
msg = _("This contribution has already serial number generated, "
"You cannot set is as external anymore.")
raise exceptions.ValidationError({'receipt_type': [msg]})
@permalink
def get_absolute_url(self):
return ('contributions:contribution:detail', None, {
'person_id': self.pledge.person.pk,
'pk': self.pk})
def save(self, **kwargs):
# set contribution to external if bulk payment is official receipt type
if self.bulk_payment and self.bulk_payment.receipt_type == 'official':
self.receipt_type = 'external-receipt'
if (self.receipt_type == 'mayapur-receipt'
and not (self.book_number or self.serial_number)):
if self.receipt_date:
date = self.receipt_date
year = date.year
if date.month < 4:
year -= 1
if year > 2014:
self.serial_year = self.generate_serial_year()
last_serial = 0
try:
last_serial = self.__class__.objects.all(). \
filter(serial_year=self.serial_year). \
order_by('-created')[0].serial_number
except:
pass
self.serial_number = str(int(last_serial) + 1)
super(Contribution, self).save()
# if contribution pledge changed save original pledge first, so its
# amount_paid is updated correctly
if self._original_pledge and (self._original_pledge != self.pledge):
self._original_pledge.save()
# save pledge to update its amount_paid
if self.pledge:
self.pledge.save()
def delete(self, **kwargs):
super(Contribution, self).delete()
# save pledge to update its amount_paid
if self.pledge:
self.pledge.save()
def __str__(self):
field_values = (
self.pledge.person.full_name,
str(self.amount),
'(%s)' % self.get_payment_method_display()
)
return ' - '.join(filter(bool, field_values))
class Meta:
permissions = (("can_edit_completed", "Can edit completed"),
("can_change_deposit_status", "Can change deposit status"),
("can_do_follow_up", "Can do follow up"),
("can_deposit", "Can mark as deposited"),
("can_move_contribution", "Can move contribution"))
class FollowUp(TimeStampedModel, AuthStampedModel):
"""
Keep track of follow ups. After any contact with donor during follow up
we should create new follow up, so others can see history of any follow up.
"""
pledge = models.ForeignKey(Pledge, verbose_name="Pledge",
related_name='follow_ups')
STATUS_CHOICES = (
('wrong-contact', _('Wrong contact')),
('could-not-reach', _('Could not reach')),
('waiting-reply', _('Waiting for reply')),
('agreed-to-pay', _('Agreed to pay')),
('see-note', _('See note')),
('will-not-pay', _('Will not pay')),
)
status = models.CharField("Status", max_length=30, choices=STATUS_CHOICES)
note = models.TextField(_("Note"), blank=True)
@permalink
def get_absolute_url(self):
return ('contributions:follow_up:detail', None, {
'pledge_id': self.pledge.pk,
'pk': self.pk})
|
{
"content_hash": "436444c9a75891d983c2dd152097b79f",
"timestamp": "",
"source": "github",
"line_count": 894,
"max_line_length": 148,
"avg_line_length": 39.727069351230426,
"alnum_prop": 0.5795697713706498,
"repo_name": "phani00/tovp",
"id": "1563ca7031e93d135d228ba4d07bf363c4846b26",
"size": "35516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/contributions/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190528"
},
{
"name": "HTML",
"bytes": "288449"
},
{
"name": "JavaScript",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "514452"
}
],
"symlink_target": ""
}
|
from functools import wraps
import re
import socket
import struct
import time
import gevent
import requests
def tz_hours():
delta = time.localtime().tm_hour - time.gmtime().tm_hour
sign = '-' if delta < 0 else ''
return "%s%02d.00" % (sign, abs(delta))
def is_dst():
return 1 if time.localtime().tm_isdst else 0
def get_timesync():
timesync = """
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:TimeSync xmlns:u="urn:Belkin:service:timesync:1">
<UTC>{utc}</UTC>
<TimeZone>{tz}</TimeZone>
<dst>{dst}</dst>
<DstSupported>{dstsupported}</DstSupported>
</u:TimeSync>
</s:Body>
</s:Envelope>""".format(
utc=int(time.time()),
tz=tz_hours(),
dst=is_dst(),
dstsupported=is_dst()).strip()
return timesync
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('1.2.3.4', 9))
return s.getsockname()[0]
except socket.error:
return None
finally:
del s
def matcher(match_string):
pattern = re.compile('.*?'.join(re.escape(c) for c in match_string.lower()))
def matches(s):
return pattern.search(s.lower()) is not None
return matches
# This is pretty arbitrary. I'm choosing, for no real reason, the length of
# a subscription.
_RETRIES = 1801/60
def get_retries():
return _RETRIES
def retry_with_delay(f, delay=60):
"""
Retry the wrapped requests.request function in case of ConnectionError.
Optionally limit the number of retries or set the delay between retries.
"""
@wraps(f)
def inner(*args, **kwargs):
kwargs['timeout'] = 5
remaining = get_retries() + 1
while remaining:
remaining -= 1
try:
return f(*args, **kwargs)
except requests.ConnectionError:
if not remaining:
raise
gevent.sleep(delay)
return inner
requests_get = retry_with_delay(requests.get)
requests_post = retry_with_delay(requests.post)
requests_request = retry_with_delay(requests.request)
|
{
"content_hash": "614e92682b54391eaa2460bdc2b372eb",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 124,
"avg_line_length": 24.8,
"alnum_prop": 0.6209677419354839,
"repo_name": "fritz-fritz/ouimeaux",
"id": "8d57675367dbc64766056af08ec00d121a82c6fc",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ouimeaux/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387"
},
{
"name": "HTML",
"bytes": "3772"
},
{
"name": "JavaScript",
"bytes": "103426"
},
{
"name": "Makefile",
"bytes": "1186"
},
{
"name": "Python",
"bytes": "202240"
}
],
"symlink_target": ""
}
|
""" Simulation class that implements n-population discrete-time replicator dynamics
Classes:
:py:class:`NPopDiscreteReplicatorDynamics`
implements n-population discrete time replicator dynamics
Functions:
:py:func:`stable_state_handler`
Default handler for 'stable state' and 'force stop' events in n
populations
"""
import numpy as np
import numpy.random as rand
import simulations.dynamics.replicator_fastfuncs as fastfuncs
from simulations.dynamics.discrete_replicator import DiscreteReplicatorDynamics
class NPopDiscreteReplicatorDynamics(DiscreteReplicatorDynamics):
""" Implements n-population discrete time replicator dynamics
Keyword Parameters:
effective_zero
The effective zero value for floating-point comparisons
(default 1e-10)
types
A list of names for the possible types (used to calculate
dimensionality, defaults to the return value of :py:meth:`~NPopDiscreteReplicatorDynamics._default_types`)
background_rate
The natural rate of reproduction (parameter in the dynamics,
default 0.)
Methods to Implement:
:py:meth:`~NPopDiscreteReplicatorDynamics._profile_payoffs`
Returns the payoff for a type given a strategy profile
Events:
force stop(this, genct, finalgen, prevgen, firstgen)
emitted when the generation iteration is broken by a forced stop
condition (instead of stable state event)
generation(this, genct, thisgen, lastgen)
emitted when a generation is complete
initial set(this, initial_pop)
emitted when the initial population is set up
stable state(this, genct, finalgen, prevgen, firstgen)
emitted when a stable state is reached
"""
def __init__(self, *args, **kwdargs):
""" Checks for kwdargs parameters and then delegates to the parent.
"""
super(NPopDiscreteReplicatorDynamics, self).__init__(*args, **kwdargs)
self._one_or_many = self.TYPE_MANY
def _add_default_listeners(self):
""" Sets up default event listeners for various events
Handlers:
- stable state - :py:func:`stable_state_handler`
- force stop - :py:func:`stable_state_handler`
"""
super(NPopDiscreteReplicatorDynamics, self)._add_default_listeners()
self.on('stable state', stable_state_handler)
self.on('force stop', stable_state_handler)
def _default_types(self):
""" Returns the default types if none are given to the constructor
"""
return [
['A', 'B'],
['C', 'D']
]
def _random_population(self):
""" Generate a set of random population on the unit simplex of
appropriate dimensionalities
"""
rand.seed()
samples = [rand.dirichlet([1] * len(self.types[i]))
for i in xrange(len(self.types))]
type_cts = [len(i) for i in self.types]
max_type_ct = max(type_cts)
initpop = np.zeros([len(self.types), max_type_ct], dtype=np.float64)
for i, sample in enumerate(samples):
initpop[i, :type_cts[i]] = samples[i]
return initpop
def _null_population(self):
""" Generates a population that will not be equal to any initial population
"""
type_cts = [len(i) for i in self.types]
max_type_ct = max(type_cts)
initpop = np.zeros([max_type_ct] * len(self.types), dtype=np.float64)
return initpop
def _profile_payoffs(self, profile):
""" You should implement this method
Parameters:
profile
the strategy profile that is being played (tuple of integers)
"""
return [1, 1]
def _create_caches(self):
self._profiles_cache = fastfuncs.generate_profiles(np.array([np.int(len(i))
for i in self.types]))
self._payoffs_cache = np.array([np.array(self._profile_payoffs(c), dtype=np.float64)
for c in self._profiles_cache])
def stable_state_handler(this, genct, thisgen, lastgen, firstgen):
""" Print out a report when a stable state is reached.
Parameters:
this
a reference to the simulation
genct
the number of generations
thisgen
the stable state population
lastgen
the previous population
firstgen
the initial population
"""
for k in xrange(len(thisgen)):
print >> this.out, "\tPopulation {0}:".format(k)
print >> this.out, "\t{0}".format(thisgen[k])
for i, pop in enumerate(thisgen[k]):
if abs(pop - 0.) > this.effective_zero:
fstr3 = "\t\t{0:>5}: {1:>20}: {2}"
print >> this.out, fstr3.format(i, this.types[k][i], pop)
print >> this.out
|
{
"content_hash": "e3f099b1ccd5437a78647b5fdd8a0abc",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 116,
"avg_line_length": 28.551136363636363,
"alnum_prop": 0.6075621890547264,
"repo_name": "gsmcwhirter/simulations",
"id": "256c01aee09dbb2b29968371e9d6affa1374c93f",
"size": "5025",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/simulations/dynamics/npop_discrete_replicator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "378173"
},
{
"name": "Python",
"bytes": "158691"
},
{
"name": "Shell",
"bytes": "5121"
}
],
"symlink_target": ""
}
|
from asic_la.sharded_probability_function.utils import (
permute,
relative_permutation,
invert_permutation,
remove_and_reduce,
send_to_left_side,
send_to_right_side,
)
from asic_la.sharded_probability_function.sharded_probability_function import (
ShardedProbabilityFunction,
)
from asic_la.sharded_probability_function.sharded_discrete_probability_function import (
ShardedDiscretedProbabilityFunction,
)
|
{
"content_hash": "f054a87d339a0b0a641b61a9fc139ab4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 88,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.773972602739726,
"repo_name": "google/distla_core",
"id": "02c1beba64e06d469912ff6127a433b07751a256",
"size": "1122",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distla/distla_core/asic_tests/sharded_probability_function/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1317325"
},
{
"name": "Shell",
"bytes": "5454"
}
],
"symlink_target": ""
}
|
from sicpythontask.PythonTaskInfo import PythonTaskInfo
from sicpythontask.PythonTask import PythonTask
from sicpythontask.InputPort import InputPort
from sicpythontask.OutputPort import OutputPort
from sicpythontask.data.Int32 import Int32
from sicpythontask.data.Control import Control
@PythonTaskInfo(generator=True)
class GeneratorAtomicWith2SIn1AIn(PythonTask):
def __init_ports__(self):
self.in1 = InputPort(name="in1", data_type=Int32)
self.in2 = InputPort(name="in2", data_type=Int32)
self.in3 = InputPort(name="in3", data_type=Control, asynchronous=True)
self.out = OutputPort(name="out", data_type=Int32)
def runner_start(self):
self.on = True
def execute_async(self, async_in):
self.on = not self.on
def generate(self):
if self.on:
self.out.put_data(Int32(637343730))
self.sleep(500)
def execute(self, grp):
self.out.put_data(Int32(self.in1.get_data(Int32).values[0] + self.in2.get_data(Int32).values[0]))
|
{
"content_hash": "a942f8f712d1d9ec2b561e6e2e23d90f",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 105,
"avg_line_length": 35.41379310344828,
"alnum_prop": 0.7049659201557936,
"repo_name": "systemincloud/sic-examples",
"id": "e3175828e131dae55e00b68ba6827e55d4d0dab6",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com.systemincloud.examples.tasks.pythontask/src/test/py/tasks/generator/GeneratorAtomicWith2SIn1AIn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "84659"
},
{
"name": "Python",
"bytes": "36262"
}
],
"symlink_target": ""
}
|
import json
import os
import string
from runtime.diagnostics import SQLFlowDiagnostic
from runtime.pai import pai_model
JOB_ARCHIVE_FILE = "job.tar.gz"
PARAMS_FILE = "params.txt"
ENTRY_FILE = "entry.py"
def get_pai_tf_cmd(cluster_config, tarball, params_file, entry_file,
model_name, oss_model_path, train_table, val_table,
res_table, project):
"""Get PAI-TF cmd for training
Args:
cluster_config: PAI cluster config
tarball: the zipped resource name
params_file: PAI param file name
entry_file: entry file in the tarball
model_name: trained model name
oss_model_path: path to save the model
train_table: train data table
val_table: evaluate data table
res_table: table to save train model, if given
project: current odps project
Retruns:
The cmd to run on PAI
"""
job_name = "_".join(["sqlflow", model_name]).replace(".", "_")
cf_quote = json.dumps(cluster_config).replace("\"", "\\\"")
# submit table should format as: odps://<project>/tables/<table >,
# odps://<project>/tables/<table > ...
submit_tables = _max_compute_table_url(train_table)
if train_table != val_table and val_table:
val_table = _max_compute_table_url(val_table)
submit_tables = "%s,%s" % (submit_tables, val_table)
output_tables = ""
if res_table != "":
table = _max_compute_table_url(res_table)
output_tables = "-Doutputs=%s" % table
# NOTE(typhoonzero): use - DhyperParameters to define flags passing
# OSS credentials.
# TODO(typhoonzero): need to find a more secure way to pass credentials.
cmd = ("pai -name tensorflow1150 -project algo_public_dev "
"-DmaxHungTimeBeforeGCInSeconds=0 -DjobName=%s -Dtags=dnn "
"-Dscript=%s -DentryFile=%s -Dtables=%s %s -DhyperParameters='%s'"
) % (job_name, tarball, entry_file, submit_tables, output_tables,
params_file)
# format the oss checkpoint path with ARN authorization, should use eval
# because we use '''json''' in the workflow yaml file.
oss_checkpoint_configs = eval(os.getenv("SQLFLOW_OSS_CHECKPOINT_CONFIG"))
if not oss_checkpoint_configs:
raise SQLFlowDiagnostic(
"need to configure SQLFLOW_OSS_CHECKPOINT_CONFIG when "
"submitting to PAI")
if isinstance(oss_checkpoint_configs, dict):
ckpt_conf = oss_checkpoint_configs
else:
ckpt_conf = json.loads(oss_checkpoint_configs)
model_url = pai_model.get_oss_model_url(oss_model_path)
role_name = _get_project_role_name(project)
# format the oss checkpoint path with ARN authorization.
oss_checkpoint_path = "%s/?role_arn=%s/%s&host=%s" % (
model_url, ckpt_conf["arn"], role_name, ckpt_conf["host"])
cmd = "%s -DcheckpointDir='%s'" % (cmd, oss_checkpoint_path)
if cluster_config["worker"]["count"] > 1:
cmd = "%s -Dcluster=\"%s\"" % (cmd, cf_quote)
else:
cmd = "%s -DgpuRequired='%d'" % (cmd, cluster_config["worker"]["gpu"])
return cmd
def _get_project_role_name(project):
"""Get oss role form project name.
A valid role name contains letters and numbers only.
The prefix 'pai2oss' of the role name denotes PAI access OS
Args:
project: string
project name
Returns:
role name for the project
"""
return "pai2oss" + "".join(x for x in project.lower()
if x in string.ascii_lowercase + string.digits)
def _max_compute_table_url(table):
parts = table.split(".")
if len(parts) != 2:
raise SQLFlowDiagnostic("odps table: %s should be format db.table" %
table)
return "odps://%s/tables/%s" % (parts[0], parts[1])
|
{
"content_hash": "046837200cabfacb4968ec1ecd3bdac4",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 36.875,
"alnum_prop": 0.6211212516297262,
"repo_name": "sql-machine-learning/sqlflow",
"id": "d5ad79e53a6412d8b07686ae925a98373997d9fe",
"size": "4435",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/runtime/pai/get_pai_tf_cmd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "13031"
},
{
"name": "Go",
"bytes": "996957"
},
{
"name": "HTML",
"bytes": "5632"
},
{
"name": "Java",
"bytes": "48951"
},
{
"name": "JavaScript",
"bytes": "22018"
},
{
"name": "Python",
"bytes": "809068"
},
{
"name": "Shell",
"bytes": "82143"
},
{
"name": "Yacc",
"bytes": "15317"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
import numpy as np
from layer_utils.snippets import generate_anchors_pre
from layer_utils.proposal_layer import proposal_layer
from layer_utils.proposal_top_layer import proposal_top_layer
from layer_utils.anchor_target_layer import anchor_target_layer
from layer_utils.proposal_target_layer import proposal_target_layer
from model.config import cfg
class Network(object):
def __init__(self, batch_size=1):
self._feat_stride = [16, ]
self._feat_compress = [1. / 16., ]
self._batch_size = batch_size
self._predictions = {}
self._losses = {}
self._anchor_targets = {}
self._proposal_targets = {}
self._layers = {}
self._act_summaries = []
self._score_summaries = {}
self._train_summaries = []
self._event_summaries = {}
self._variables_to_fix = {}
def _add_image_summary(self, image, boxes):
# add back mean
image += cfg.PIXEL_MEANS
# bgr to rgb (opencv uses bgr)
channels = tf.unstack (image, axis=-1)
image = tf.stack ([channels[2], channels[1], channels[0]], axis=-1)
# dims for normalization
width = tf.to_float(tf.shape(image)[2])
height = tf.to_float(tf.shape(image)[1])
# from [x1, y1, x2, y2, cls] to normalized [y1, x1, y1, x1]
cols = tf.unstack(boxes, axis=1)
boxes = tf.stack([cols[1] / height,
cols[0] / width,
cols[3] / height,
cols[2] / width], axis=1)
# add batch dimension (assume batch_size==1)
assert image.get_shape()[0] == 1
boxes = tf.expand_dims(boxes, dim=0)
image = tf.image.draw_bounding_boxes(image, boxes)
return tf.summary.image('ground_truth', image)
def _add_act_summary(self, tensor):
tf.summary.histogram('ACT/' + tensor.op.name + '/activations', tensor)
tf.summary.scalar('ACT/' + tensor.op.name + '/zero_fraction',
tf.nn.zero_fraction(tensor))
def _add_score_summary(self, key, tensor):
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def _add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
def _reshape_layer(self, bottom, num_dim, name):
input_shape = tf.shape(bottom)
with tf.variable_scope(name) as scope:
# change the channel to the caffe format
to_caffe = tf.transpose(bottom, [0, 3, 1, 2])
# then force it to have channel 2
reshaped = tf.reshape(to_caffe,
tf.concat(axis=0, values=[[self._batch_size], [num_dim, -1], [input_shape[2]]]))
# then swap the channel back
to_tf = tf.transpose(reshaped, [0, 2, 3, 1])
return to_tf
def _softmax_layer(self, bottom, name):
if name == 'rpn_cls_prob_reshape':
input_shape = tf.shape(bottom)
bottom_reshaped = tf.reshape(bottom, [-1, input_shape[-1]])
reshaped_score = tf.nn.softmax(bottom_reshaped, name=name)
return tf.reshape(reshaped_score, input_shape)
return tf.nn.softmax(bottom, name=name)
def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name) as scope:
rois, rpn_scores = tf.py_func(proposal_top_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info,
self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32])
rois.set_shape([cfg.TEST.RPN_TOP_N, 5])
rpn_scores.set_shape([cfg.TEST.RPN_TOP_N, 1])
return rois, rpn_scores
def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred, name):
with tf.variable_scope(name) as scope:
rois, rpn_scores = tf.py_func(proposal_layer,
[rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode,
self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32])
rois.set_shape([None, 5])
rpn_scores.set_shape([None, 1])
return rois, rpn_scores
# Only use it if you have roi_pooling op written in tf.image
def _roi_pool_layer(self, bootom, rois, name):
with tf.variable_scope(name) as scope:
return tf.image.roi_pooling(bootom, rois,
pooled_height=cfg.POOLING_SIZE,
pooled_width=cfg.POOLING_SIZE,
spatial_scale=1. / 16.)[0]
def _crop_pool_layer(self, bottom, rois, name):
with tf.variable_scope(name) as scope:
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
# Get the normalized coordinates of bboxes
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
# Won't be backpropagated to rois anyway, but to save time
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
return slim.max_pool2d(crops, [2, 2], padding='SAME')
def _dropout_layer(self, bottom, name, ratio=0.5):
return tf.nn.dropout(bottom, ratio, name=name)
def _anchor_target_layer(self, rpn_cls_score, name):
with tf.variable_scope(name) as scope:
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(
anchor_target_layer,
[rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors],
[tf.float32, tf.float32, tf.float32, tf.float32])
rpn_labels.set_shape([1, 1, None, None])
rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4])
rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4])
rpn_labels = tf.to_int32(rpn_labels, name="to_int32")
self._anchor_targets['rpn_labels'] = rpn_labels
self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights
self._score_summaries.update(self._anchor_targets)
return rpn_labels
def _proposal_target_layer(self, rois, roi_scores, name):
with tf.variable_scope(name) as scope:
rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func(
proposal_target_layer,
[rois, roi_scores, self._gt_boxes, self._num_classes],
[tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32])
rois.set_shape([cfg.TRAIN.BATCH_SIZE, 5])
roi_scores.set_shape([cfg.TRAIN.BATCH_SIZE])
labels.set_shape([cfg.TRAIN.BATCH_SIZE, 1])
bbox_targets.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])
bbox_inside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])
bbox_outside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])
self._proposal_targets['rois'] = rois
self._proposal_targets['labels'] = tf.to_int32(labels, name="to_int32")
self._proposal_targets['bbox_targets'] = bbox_targets
self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights
self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights
self._score_summaries.update(self._proposal_targets)
return rois, roi_scores
def _anchor_component(self):
with tf.variable_scope('ANCHOR_' + self._tag) as scope:
# just to get the shape right
height = tf.to_int32(tf.ceil(self._im_info[0, 0] / np.float32(self._feat_stride[0])))
width = tf.to_int32(tf.ceil(self._im_info[0, 1] / np.float32(self._feat_stride[0])))
anchors, anchor_length = tf.py_func(generate_anchors_pre,
[height, width,
self._feat_stride, self._anchor_scales, self._anchor_ratios],
[tf.float32, tf.int32], name="generate_anchors")
anchors.set_shape([None, 4])
anchor_length.set_shape([])
self._anchors = anchors
self._anchor_length = anchor_length
def build_network(self, sess, is_training=True):
raise NotImplementedError
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):
sigma_2 = sigma ** 2
box_diff = bbox_pred - bbox_targets
in_box_diff = bbox_inside_weights * box_diff
abs_in_box_diff = tf.abs(in_box_diff)
smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2)))
in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \
+ (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)
out_loss_box = bbox_outside_weights * in_loss_box
loss_box = tf.reduce_mean(tf.reduce_sum(
out_loss_box,
axis=dim
))
return loss_box
def _add_losses(self, sigma_rpn=3.0):
with tf.variable_scope('loss_' + self._tag) as scope:
# RPN, class loss
rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])
rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])
rpn_select = tf.where(tf.not_equal(rpn_label, -1))
rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
rpn_cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))
# RPN, bbox loss
rpn_bbox_pred = self._predictions['rpn_bbox_pred']
rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']
rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])
# RCNN, class loss
cls_score = self._predictions["cls_score"]
label = tf.reshape(self._proposal_targets["labels"], [-1])
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(cls_score, [-1, self._num_classes]), labels=label))
# RCNN, bbox loss
bbox_pred = self._predictions['bbox_pred']
bbox_targets = self._proposal_targets['bbox_targets']
bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
bbox_outside_weights = self._proposal_targets['bbox_outside_weights']
loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)
self._losses['cross_entropy'] = cross_entropy
self._losses['loss_box'] = loss_box
self._losses['rpn_cross_entropy'] = rpn_cross_entropy
self._losses['rpn_loss_box'] = rpn_loss_box
loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box
self._losses['total_loss'] = loss
self._event_summaries.update(self._losses)
return loss
def create_architecture(self, sess, mode, num_classes, tag=None,
anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):
self._image = tf.placeholder(tf.float32, shape=[self._batch_size, None, None, 3])
self._im_info = tf.placeholder(tf.float32, shape=[self._batch_size, 3])
self._gt_boxes = tf.placeholder(tf.float32, shape=[None, 5])
self._tag = tag
self._num_classes = num_classes
self._mode = mode
self._anchor_scales = anchor_scales
self._num_scales = len(anchor_scales)
self._anchor_ratios = anchor_ratios
self._num_ratios = len(anchor_ratios)
self._num_anchors = self._num_scales * self._num_ratios
training = mode == 'TRAIN'
testing = mode == 'TEST'
assert tag != None
# handle most of the regularizers here
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY)
if cfg.TRAIN.BIAS_DECAY:
biases_regularizer = weights_regularizer
else:
biases_regularizer = tf.no_regularizer
# list as many types of layers as possible, even if they are not used now
with arg_scope([slim.conv2d, slim.conv2d_in_plane, \
slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
rois, cls_prob, bbox_pred = self.build_network(sess, training)
layers_to_output = {'rois': rois}
layers_to_output.update(self._predictions)
for var in tf.trainable_variables():
self._train_summaries.append(var)
if mode == 'TEST':
stds = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (self._num_classes))
means = np.tile(np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (self._num_classes))
self._predictions["bbox_pred"] *= stds
self._predictions["bbox_pred"] += means
else:
self._add_losses()
layers_to_output.update(self._losses)
val_summaries = []
with tf.device("/cpu:0"):
val_summaries.append(self._add_image_summary(self._image, self._gt_boxes))
for key, var in self._event_summaries.items():
val_summaries.append(tf.summary.scalar(key, var))
for key, var in self._score_summaries.items():
self._add_score_summary(key, var)
for var in self._act_summaries:
self._add_act_summary(var)
for var in self._train_summaries:
self._add_train_summary(var)
self._summary_op = tf.summary.merge_all()
if not testing:
self._summary_op_val = tf.summary.merge(val_summaries)
return layers_to_output
def get_variables_to_restore(self, variables, var_keep_dic):
raise NotImplementedError
def fix_variables(self, sess, pretrained_model):
raise NotImplementedError
# Extract the head feature maps, for example for vgg16 it is conv5_3
# only useful during testing mode
def extract_head(self, sess, image):
feed_dict = {self._image: image}
feat = sess.run(self._layers["head"], feed_dict=feed_dict)
return feat
# only useful during testing mode
def test_image(self, sess, image, im_info):
feed_dict = {self._image: image,
self._im_info: im_info}
cls_score, cls_prob, bbox_pred, rois = sess.run([self._predictions["cls_score"],
self._predictions['cls_prob'],
self._predictions['bbox_pred'],
self._predictions['rois']],
feed_dict=feed_dict)
return cls_score, cls_prob, bbox_pred, rois
def get_summary(self, sess, blobs):
feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],
self._gt_boxes: blobs['gt_boxes']}
summary = sess.run(self._summary_op_val, feed_dict=feed_dict)
return summary
def train_step(self, sess, blobs, train_op):
feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],
self._gt_boxes: blobs['gt_boxes']}
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, _ = sess.run([self._losses["rpn_cross_entropy"],
self._losses['rpn_loss_box'],
self._losses['cross_entropy'],
self._losses['loss_box'],
self._losses['total_loss'],
train_op],
feed_dict=feed_dict)
return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss
def train_step_with_summary(self, sess, blobs, train_op):
feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],
self._gt_boxes: blobs['gt_boxes']}
rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary, _ = sess.run([self._losses["rpn_cross_entropy"],
self._losses['rpn_loss_box'],
self._losses['cross_entropy'],
self._losses['loss_box'],
self._losses['total_loss'],
self._summary_op,
train_op],
feed_dict=feed_dict)
return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary
def train_step_no_return(self, sess, blobs, train_op):
feed_dict = {self._image: blobs['data'], self._im_info: blobs['im_info'],
self._gt_boxes: blobs['gt_boxes']}
sess.run([train_op], feed_dict=feed_dict)
|
{
"content_hash": "0d8c55f827c46c12acfe0be7dd6c502b",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 124,
"avg_line_length": 45.89393939393939,
"alnum_prop": 0.5862220754924617,
"repo_name": "junranhe/tf-faster-rcnn",
"id": "648585635bb6f8d105c105ea5a12dc9c915581db",
"size": "18402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/nets/network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "146"
},
{
"name": "Cuda",
"bytes": "5064"
},
{
"name": "Makefile",
"bytes": "94"
},
{
"name": "Matlab",
"bytes": "1821"
},
{
"name": "Python",
"bytes": "232492"
},
{
"name": "Roff",
"bytes": "1195"
},
{
"name": "Shell",
"bytes": "10039"
}
],
"symlink_target": ""
}
|
import json
import select
import subprocess
import time
import uuid
from apiclient.discovery import build
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
# This is the default location
# https://cloud.google.com/dataflow/pipelines/specifying-exec-params
DEFAULT_DATAFLOW_LOCATION = 'us-central1'
class _DataflowJob(LoggingMixin):
def __init__(self, dataflow, project_number, name, location, poll_sleep=10):
self._dataflow = dataflow
self._project_number = project_number
self._job_name = name
self._job_location = location
self._job_id = None
self._job = self._get_job()
self._poll_sleep = poll_sleep
def _get_job_id_from_name(self):
jobs = self._dataflow.projects().locations().jobs().list(
projectId=self._project_number,
location=self._job_location
).execute()
for job in jobs['jobs']:
if job['name'] == self._job_name:
self._job_id = job['id']
return job
return None
def _get_job(self):
if self._job_name:
job = self._get_job_id_from_name()
else:
job = self._dataflow.projects().jobs().get(
projectId=self._project_number,
jobId=self._job_id
).execute()
if job and 'currentState' in job:
self.log.info(
'Google Cloud DataFlow job %s is %s',
job['name'], job['currentState']
)
elif job:
self.log.info(
'Google Cloud DataFlow with job_id %s has name %s',
self._job_id, job['name']
)
else:
self.log.info(
'Google Cloud DataFlow job not available yet..'
)
return job
def wait_for_done(self):
while True:
if self._job and 'currentState' in self._job:
if 'JOB_STATE_DONE' == self._job['currentState']:
return True
elif 'JOB_STATE_RUNNING' == self._job['currentState'] and \
'JOB_TYPE_STREAMING' == self._job['type']:
return True
elif 'JOB_STATE_FAILED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} has failed.".format(
self._job['name']))
elif 'JOB_STATE_CANCELLED' == self._job['currentState']:
raise Exception("Google Cloud Dataflow job {} was cancelled.".format(
self._job['name']))
elif 'JOB_STATE_RUNNING' == self._job['currentState']:
time.sleep(self._poll_sleep)
elif 'JOB_STATE_PENDING' == self._job['currentState']:
time.sleep(15)
else:
self.log.debug(str(self._job))
raise Exception(
"Google Cloud Dataflow job {} was unknown state: {}".format(
self._job['name'], self._job['currentState']))
else:
time.sleep(15)
self._job = self._get_job()
def get(self):
return self._job
class _Dataflow(LoggingMixin):
def __init__(self, cmd):
self.log.info("Running command: %s", ' '.join(cmd))
self._proc = subprocess.Popen(
cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
def _line(self, fd):
if fd == self._proc.stderr.fileno():
lines = self._proc.stderr.readlines()
for line in lines:
self.log.warning(line[:-1])
if lines:
return lines[-1]
if fd == self._proc.stdout.fileno():
line = self._proc.stdout.readline()
return line
@staticmethod
def _extract_job(line):
if line is not None:
if line.startswith("Submitted job: "):
return line[15:-1]
def wait_for_done(self):
reads = [self._proc.stderr.fileno(), self._proc.stdout.fileno()]
self.log.info("Start waiting for DataFlow process to complete.")
while self._proc.poll() is None:
ret = select.select(reads, [], [], 5)
if ret is not None:
for fd in ret[0]:
line = self._line(fd)
if line:
self.log.debug(line[:-1])
else:
self.log.info("Waiting for DataFlow process to complete.")
if self._proc.returncode is not 0:
raise Exception("DataFlow failed with return code {}".format(
self._proc.returncode))
class DataFlowHook(GoogleCloudBaseHook):
def __init__(self,
gcp_conn_id='google_cloud_default',
delegate_to=None,
poll_sleep=10):
self.poll_sleep = poll_sleep
super(DataFlowHook, self).__init__(gcp_conn_id, delegate_to)
def get_conn(self):
"""
Returns a Google Cloud Storage service object.
"""
http_authorized = self._authorize()
return build('dataflow', 'v1b3', http=http_authorized)
def _start_dataflow(self, task_id, variables, name,
command_prefix, label_formatter):
variables = self._set_variables(variables)
cmd = command_prefix + self._build_cmd(task_id, variables,
label_formatter)
_Dataflow(cmd).wait_for_done()
_DataflowJob(self.get_conn(), variables['project'], name,
variables['region'], self.poll_sleep).wait_for_done()
@staticmethod
def _set_variables(variables):
if variables['project'] is None:
raise Exception('Project not specified')
if 'region' not in variables.keys():
variables['region'] = DEFAULT_DATAFLOW_LOCATION
return variables
def start_java_dataflow(self, task_id, variables, dataflow, job_class=None,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
variables['jobName'] = name
def label_formatter(labels_dict):
return ['--labels={}'.format(
json.dumps(labels_dict).replace(' ', ''))]
command_prefix = (["java", "-cp", dataflow, job_class] if job_class
else ["java", "-jar", dataflow])
self._start_dataflow(task_id, variables, name,
command_prefix, label_formatter)
def start_template_dataflow(self, task_id, variables, parameters, dataflow_template,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
self._start_template_dataflow(
name, variables, parameters, dataflow_template)
def start_python_dataflow(self, task_id, variables, dataflow, py_options,
append_job_name=True):
if append_job_name:
name = task_id + "-" + str(uuid.uuid1())[:8]
else:
name = task_id
variables['job_name'] = name
def label_formatter(labels_dict):
return ['--labels={}={}'.format(key, value)
for key, value in labels_dict.items()]
self._start_dataflow(task_id, variables, name,
["python"] + py_options + [dataflow],
label_formatter)
def _build_cmd(self, task_id, variables, label_formatter):
command = ["--runner=DataflowRunner"]
if variables is not None:
for attr, value in variables.items():
if attr == 'labels':
command += label_formatter(value)
elif value is None or value.__len__() < 1:
command.append("--" + attr)
else:
command.append("--" + attr + "=" + value)
return command
def _start_template_dataflow(self, name, variables, parameters, dataflow_template):
# Builds RuntimeEnvironment from variables dictionary
# https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment
environment = {}
for key in ['maxWorkers', 'zone', 'serviceAccountEmail', 'tempLocation',
'bypassTempDirValidation', 'machineType']:
if key in variables:
environment.update({key: variables[key]})
body = {"jobName": name,
"parameters": parameters,
"environment": environment}
service = self.get_conn()
request = service.projects().templates().launch(projectId=variables['project'],
gcsPath=dataflow_template,
body=body)
response = request.execute()
variables = self._set_variables(variables)
_DataflowJob(self.get_conn(), variables['project'], name, variables['region'],
self.poll_sleep).wait_for_done()
return response
|
{
"content_hash": "8f10bd117beaeb9f6a54c219f2427712",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 89,
"avg_line_length": 38.64754098360656,
"alnum_prop": 0.5270413573700954,
"repo_name": "yk5/incubator-airflow",
"id": "a7c75e133bca72f9cdafab61abd152da299b7d55",
"size": "10241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/gcp_dataflow_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "274912"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3877246"
},
{
"name": "Shell",
"bytes": "47007"
}
],
"symlink_target": ""
}
|
import unittest
from io import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...format import Format
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test merged cell range"""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
cell_format = Format({'xf_index': 1})
worksheet.merge_range('B3:C3', 'Foo', cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="B3:C3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="3" spans="2:3">
<c r="B3" s="1" t="s">
<v>0</v>
</c>
<c r="C3" s="1"/>
</row>
</sheetData>
<mergeCells count="1">
<mergeCell ref="B3:C3"/>
</mergeCells>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_write(self):
"""Test writing a worksheet with a blank cell with write() method."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write(0, 0, None)
worksheet.write(1, 2, None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
def test_assemble_xml_file_A1(self):
"""Test writing a worksheet with a blank cell with A1 notation."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
cell_format = Format({'xf_index': 1})
# No format. Should be ignored.
worksheet.write_blank('A1', None)
worksheet.write_blank('C2', None, cell_format)
worksheet.select()
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="C2"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="2" spans="3:3">
<c r="C2" s="1"/>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
|
{
"content_hash": "02eb59a8232177a87a21b99ffd2da0a0",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 171,
"avg_line_length": 35.847328244274806,
"alnum_prop": 0.5106473594548552,
"repo_name": "jmcnamara/XlsxWriter",
"id": "8ae573318cc530a1ecfeeb44fa9df2d50b68fea1",
"size": "4909",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/worksheet/test_merge_range01.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
"""
sentry.db.models
~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.db import models
from django.db.models import signals
from .fields.bounded import BoundedBigAutoField
from .manager import BaseManager
from .query import update
__all__ = ('BaseModel', 'Model', 'sane_repr')
UNSAVED = object()
def sane_repr(*attrs):
if 'id' not in attrs and 'pk' not in attrs:
attrs = ('id',) + attrs
def _repr(self):
cls = type(self).__name__
pairs = (
'%s=%s' % (a, repr(getattr(self, a, None)))
for a in attrs)
return u'<%s at 0x%x: %s>' % (cls, id(self), ', '.join(pairs))
return _repr
class BaseModel(models.Model):
class Meta:
abstract = True
objects = BaseManager()
update = update
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__(*args, **kwargs)
self._update_tracked_data()
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_Model__data', None)
return d
def __reduce__(self):
(model_unpickle, stuff, _) = super(BaseModel, self).__reduce__()
return (model_unpickle, stuff, self.__getstate__())
def __setstate__(self, state):
self.__dict__.update(state)
self._update_tracked_data()
def __get_field_value(self, field):
if isinstance(field, models.ForeignKey):
return getattr(self, field.column, None)
return getattr(self, field.name, None)
def _update_tracked_data(self):
"Updates a local copy of attributes values"
if self.id:
data = {}
for f in self._meta.fields:
try:
data[f.column] = self.__get_field_value(f)
except AttributeError as e:
# this case can come up from pickling
logging.exception(unicode(e))
self.__data = data
else:
self.__data = UNSAVED
def has_changed(self, field_name):
"Returns ``True`` if ``field`` has changed since initialization."
if self.__data is UNSAVED:
return False
field = self._meta.get_field(field_name)
return self.__data.get(field_name) != self.__get_field_value(field)
def old_value(self, field_name):
"Returns the previous value of ``field``"
if self.__data is UNSAVED:
return None
return self.__data.get(field_name)
def __model_post_save(instance, **kwargs):
if not isinstance(instance, BaseModel):
return
instance._update_tracked_data()
class Model(BaseModel):
id = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr('id')
signals.post_save.connect(__model_post_save)
|
{
"content_hash": "788f94cacb9db139a6e74d7a08cc26e3",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 75,
"avg_line_length": 26.017391304347825,
"alnum_prop": 0.5778743315508021,
"repo_name": "daevaorn/sentry",
"id": "2a6e6a861b3cecf262ca7e8609d3f6a0959513f6",
"size": "2992",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/db/models/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "174905"
},
{
"name": "HTML",
"bytes": "200247"
},
{
"name": "JavaScript",
"bytes": "618375"
},
{
"name": "Lua",
"bytes": "21966"
},
{
"name": "Makefile",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "8680827"
},
{
"name": "Shell",
"bytes": "746"
}
],
"symlink_target": ""
}
|
import copy
import inspect
import logging
from django.core import urlresolvers
from django import forms
from django.forms.forms import NON_FIELD_ERRORS # noqa
from django import template
from django.template.defaultfilters import linebreaks # noqa
from django.template.defaultfilters import safe # noqa
from django.template.defaultfilters import slugify # noqa
from django.utils.encoding import force_unicode # noqa
from django.utils.importlib import import_module # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
class Action(forms.Form):
"""
An ``Action`` represents an atomic logical interaction you can have with
the system. This is easier to understand with a conceptual example: in the
context of a "launch instance" workflow, actions would include "naming
the instance", "selecting an image", and ultimately "launching the
instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
__metaclass__ = ActionMetaclass
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
self.required_css_class = 'required'
def __unicode__(self):
return force_unicode(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
""" Returns the help text for this step. """
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += linebreaks(force_unicode(self.help_text))
return safe(text)
def add_error(self, message):
"""
Adds an error to the Action's Step based on API issues.
"""
self._get_errors()[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""
Handles any requisite processing for this action. The method should
return either ``None`` or a dictionary of data to be passed to
:meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class MembershipAction(Action):
"""
An action that allows a user to add/remove members from a group.
Extend the Action class with additional helper method for membership
management.
"""
def get_default_role_field_name(self):
return "default_" + self.slug + "_role"
def get_member_field_name(self, role_id):
return self.slug + "_role_" + role_id
class Step(object):
"""
A step is a wrapper around an action which defines it's context in a
workflow. It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return force_unicode(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("You must specify an action for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, basestring):
return TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except Exception:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""
Allows for customization of how the workflow context is passed to the
action; this is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
""" Returns the ID for this step. Suitable for use in HTML markup. """
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""
Adds the data listed in ``contributes`` to the workflow's shared
context. By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
""" Renders the step. """
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
return step_template.render(context)
def get_help_text(self):
""" Returns the help text for this step. """
text = linebreaks(force_unicode(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_error(self, message):
"""
Adds an error to the Step based on API issues.
"""
self.action.add_error(message)
def has_required_fields(self):
"""
Returns True if action contains any required fields
"""
for key in self.contributes:
field = self.action.fields.get(key, None)
if (field and field.required):
return True
return False
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = set([])
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
def get_member_field_name(self, role_id):
if issubclass(self.action_class, MembershipAction):
return self.action.get_member_field_name(role_id)
else:
return self.slug + "_role_" + role_id
class Workflow(html.HTMLElement):
"""
A Workflow is a collection of Steps. It's interface is very
straightforward, but it is responsible for handling some very
important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
"""
__metaclass__ = WorkflowMetaclass
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
_registerable_class = Step
def __unicode__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = dict([(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
""" Returns the instantiated step matching the given slug. """
for step in self.steps:
if step.slug == slug:
return step
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = [self._registry[step_class]
for step_class in ordered_step_classes
if has_permissions(self.request.user,
self._registry[step_class])]
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""
Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
""" Registers a :class:`~horizon.workflows.Step` with the workflow. """
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.add(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""
Unregisters a :class:`~horizon.workflows.Step` from the workflow.
"""
try:
cls._cls_registry.remove(step_class)
except KeyError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""
Hook for custom context data validation. Should return a boolean
value or raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""
Verified that all required data is present in the context and
calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""
Finalizes a workflow by running through all the actions in order
and calling their ``handle`` methods. Returns ``True`` on full success,
or ``False`` for a partial success, e.g. there were non-critical
errors. (If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except Exception:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""
Handles any final processing for this workflow. Should return a boolean
value indicating success.
"""
return True
def get_success_url(self):
"""
Returns a URL to redirect the user to upon completion. By default it
will attempt to parse a ``success_url`` attribute on the workflow,
which can take the form of a reversible URL pattern name, or a
standard HTTP URL.
"""
try:
return urlresolvers.reverse(self.success_url)
except urlresolvers.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""
Hook to allow customization of the message returned to the user
upon successful or unsuccessful completion of the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def render(self):
""" Renders the workflow. """
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
context = template.RequestContext(self.request, extra_context)
return workflow_template.render(context)
def get_absolute_url(self):
""" Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""
Adds an error to the workflow's Step with the
specifed slug based on API issues. This is useful
when you wish for API errors to appear as errors on
the form rather than using the messages framework.
"""
step = self.get_step(slug)
if step:
step.add_error(message)
|
{
"content_hash": "ee67c3a91e23405eaa8b34ecf7fb938a",
"timestamp": "",
"source": "github",
"line_count": 862,
"max_line_length": 79,
"avg_line_length": 37.75986078886311,
"alnum_prop": 0.5869611969645765,
"repo_name": "openstack-ja/horizon",
"id": "9412a35241bc962db3669e722c14ff6e99b2eef6",
"size": "33199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "horizon/workflows/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160649"
},
{
"name": "JavaScript",
"bytes": "421267"
},
{
"name": "Python",
"bytes": "2777515"
},
{
"name": "Shell",
"bytes": "13001"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django import forms
from django.contrib import admin, messages
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext_lazy as _
from treeadmin.admin import TreeAdmin
from arkestra_utilities.admin_mixins import GenericModelAdmin, GenericModelForm, HostedByFilter, fieldsets
from links.admin import ObjectLinkInline
from models import NewsArticle, NewsSource, Event, EventType
class NewsAndEventsForm(GenericModelForm):
# a shared form for news and events
pass
class NewsAndEventsAdmin(GenericModelAdmin):
exclude = ('content',)
list_display = ['short_title', 'date', 'hosted_by', 'published', 'in_lists']
list_editable = ['published', 'in_lists']
related_search_fields = ['hosted_by', 'external_url',]
prepopulated_fields = {'slug': ['title']}
list_max_show_all = 1000
list_per_page = 1000
# this or something like it can be enabled when the
# autocomplete-stop-sworking-after-adding-an-inlin
# bug has been addressed
# it will hugely speed up loading of news, events etc with lots of people in the m2m
# class NewsPersonInline(AutocompleteMixin, admin.TabularInline):
# model = NewsArticle.please_contact.through
# related_search_fields = ["person", ]
# extra = 1
# def _media(self):
# return super(AutocompleteMixin, self).media
# media = property(_media)
class NewsArticleForm(NewsAndEventsForm):
class Meta(NewsAndEventsForm.Meta):
model = NewsArticle
def clean(self):
super(NewsArticleForm, self).clean()
# sticky_until value must be greater than (later) than date
date = datetime.date(self.cleaned_data['date'])
self.cleaned_data['sticky_until'] = self.cleaned_data.get('sticky_until', date)
# if importance = 0, it's not sticky
self.cleaned_data['sticky_until'] = self.cleaned_data['sticky_until'] or datetime.date(self.cleaned_data['date'])
if self.cleaned_data['importance'] == 0:
self.cleaned_data['sticky_until'] = None
elif self.cleaned_data['sticky_until'] < datetime.date(self.cleaned_data['date']):
self.cleaned_data['sticky_until'] = datetime.date(self.cleaned_data['date'])
return self.cleaned_data
class NewsArticleAdmin(NewsAndEventsAdmin):
# some general settings
form = NewsArticleForm
list_filter = ('date', HostedByFilter)
read_only_fields = ('sticky_until')
# inlines = [MembershipInline,]
fieldset_stickiness = ('How this item should behave in lists', {'fields': ('sticky_until', 'is_sticky_everywhere',)})
tabs = (
('Basic', {'fieldsets': (fieldsets["basic"], fieldsets["host"], fieldsets["image"], fieldsets["publishing_control"],),}),
('Date & significance', {'fieldsets': (fieldsets["date"], fieldsets["importance"], fieldset_stickiness)}),
('Body', {'fieldsets': (fieldsets["body"],)}),
('Where to Publish', {'fieldsets': (fieldsets["where_to_publish"],)}),
('Related people', {'fieldsets': (fieldsets["people"],)}),
('Links', {'inlines': [ObjectLinkInline]}),
('Advanced Options', {'fieldsets': (fieldsets["url"], fieldsets["slug"],)}),
)
class EventForm(NewsAndEventsForm):
class Meta(NewsAndEventsForm.Meta):
model = Event
def clean(self):
# if any fields are invalid, give up now
if not self.is_valid():
return self.cleaned_data
# 1. obtain missing information from parent
parent = self.cleaned_data['parent']
if parent:
# the many-to-many fields can be inherited
m2m_fields = ['publish_to', ] #organisers ,'enquiries', 'registration_enquiries',
for field_name in m2m_fields:
self.cleaned_data[field_name] = self.cleaned_data[field_name] or list(getattr(parent,field_name).all())
# other fields
attribute_list = ['building', 'precise_location', 'hosted_by', 'access_note']
for field_name in attribute_list:
self.cleaned_data[field_name] = self.cleaned_data[field_name] or getattr(parent,field_name)
# if parent is single day event, and this one has no date set, inherit the parent's
if not self.cleaned_data["date"]:
if parent.single_day_event:
self.cleaned_data["date"] = self.cleaned_data["end_date"] = parent.date
self.cleaned_data["single_day_event"] = True
message = u"You didn't say, but I am guessing that this is a single-day event on " + unicode(self.cleaned_data["date"]) + u"."
messages.add_message(self.request, messages.INFO, message)
else:
raise forms.ValidationError(u"I'm terribly sorry, I can't work out when this event is supposed to start. You'll have to enter that information yourself.")
# 2. go and do the checks in the parent class
super(EventForm, self).clean()
# 3. check dates
if self.cleaned_data["date"]:
if self.cleaned_data["series"]:
raise forms.ValidationError("An event with a start date can't also be a series of events. Please correct this.")
elif self.cleaned_data["end_date"] == self.cleaned_data["date"]:
self.cleaned_data["single_day_event"] = True
elif not self.cleaned_data["end_date"]:
self.cleaned_data["single_day_event"] = True
message = u"You didn't enter an end date, so I have assumed this is a single-day event"
messages.add_message(self.request, messages.INFO, message)
elif not self.cleaned_data["single_day_event"]:
if self.cleaned_data["end_date"] < self.cleaned_data["date"]:
raise forms.ValidationError('This event appears to end before it starts, which is very silly. Please correct the dates.')
if not self.cleaned_data["start_time"] and self.cleaned_data["end_time"]:
self.cleaned_data["end_time"] = None
message = u"You didn't enter a start time, so I deleted the end time. I hope that's OK."
messages.add_message(self.request, messages.WARNING, message)
if self.cleaned_data["single_day_event"]:
self.cleaned_data["end_date"] = self.cleaned_data["date"]
if not self.cleaned_data["start_time"]:
message = u"You have a lovely smile."
messages.add_message(self.request, messages.INFO, message)
self.cleaned_data["end_time"] = None
elif self.cleaned_data["end_time"] and self.cleaned_data["end_time"] < self.cleaned_data["start_time"]:
raise forms.ValidationError('This event appears to end before it starts, which is very silly. Please correct the times.')
self.cleaned_data['jumps_queue_on'] = self.cleaned_data['jumps_queue_on'] or self.cleaned_data['date']
if self.cleaned_data['importance'] == 0:
self.cleaned_data['jumps_queue_on'] = None
elif self.cleaned_data['jumps_queue_on'] > self.cleaned_data['date']:
self.cleaned_data['jumps_queue_on'] = self.cleaned_data['date']
# an event without a start date can be assumed to be a series of events
else:
self.cleaned_data["series"] = True
message = u"You didn't enter a start date, so I will assume this is a series of events."
messages.add_message(self.request, messages.INFO, message)
self.cleaned_data['date'] = self.cleaned_data['end_date'] = self.cleaned_data['start_time'] = self.cleaned_data['end_time'] = None
self.cleaned_data['single_day_event'] = False
self.cleaned_data['jumps_queue_on'] = None
self.cleaned_data['importance'] = 0
return self.cleaned_data
'''
def clean_enquiries(self):
data = self.cleaned_data['enquiries']
parent = self.cleaned_data['parent']
print "cleaning enquiries: %s (%s) parent: %s (%s)" % (data,type(data), parent, type(parent))
if not data and parent:
print " getting defaultdata from parent"
data = list(parent.enquiries.all())
return data
'''
class EventIsSeries(SimpleListFilter):
title = _('actual/series')
parameter_name = 'series'
def lookups(self, request, model_admin):
return (
('actual', _('Actual')),
('series', _('Series')),
)
def queryset(self, request, queryset):
if self.value() == 'actual':
return queryset.filter(series=False)
if self.value() == 'series':
return queryset.filter(series=True)
class EventAdmin(NewsAndEventsAdmin, TreeAdmin):
# some general settings
form = EventForm
filter_horizontal = (
'please_contact',
'publish_to',
'registration_enquiries',
'featuring',
)
ordering = ['type',]
list_filter = (EventIsSeries, 'date', HostedByFilter)
save_as = True
filter_include_ancestors = True
# autocomplete fields
related_search_fields = ['hosted_by','parent','building', 'external_url']
# the tabs
fieldset_type = ('Type', {'fields': ('type',)},)
fieldset_building = ('Building', {'fields': ('building',)},)
fieldset_when = ('When', {'fields': ('series', 'single_day_event', ('date', 'start_time'), ('end_date', 'end_time'))})
fieldsets_relationships = (
('Parent & children', {
'fields': ('parent', 'child_list_heading',),},),
('When displaying the children of this item in lists', {
'fields': ('show_titles', 'display_series_summary',),},),
)
fieldset_registration = ('Registration enquiries', {'fields': ('registration_enquiries',)})
fieldset_featuring = ('Featured people', {'fields': ('featuring',)})
fieldset_jumpiness = ('How this item should behave in lists', {'fields': ('jumps_queue_on', 'jumps_queue_everywhere')})
tabs = (
('Basic', {'fieldsets': (fieldsets["basic"], fieldset_type, fieldsets["host"], fieldsets["image"], fieldsets["publishing_control"],)}),
('Date & significance', {'fieldsets':
(
fieldset_when,
fieldsets["importance"],
fieldset_jumpiness,)}
),
('Location', {'fieldsets': (fieldset_building, fieldsets["location"],)}),
('Parent & children', {'fieldsets': fieldsets_relationships}),
('Body', {'fieldsets': (fieldsets["body"],)}),
('Where to Publish', {'fieldsets': (fieldsets["where_to_publish"],)}),
('People', {'fieldsets': (fieldset_featuring, fieldsets["people"], fieldset_registration)}),
('Links', {'inlines': (ObjectLinkInline,),}),
('Advanced Options', {'fieldsets': (fieldsets["url"], fieldsets["slug"],)}),
)
class EventTypeAdmin(admin.ModelAdmin):
pass
class NewsSourceAdmin(admin.ModelAdmin):
pass
admin.site.register(Event,EventAdmin)
admin.site.register(NewsSource,NewsSourceAdmin)
admin.site.register(EventType,EventTypeAdmin)
admin.site.register(NewsArticle,NewsArticleAdmin)
|
{
"content_hash": "06ba4834a1c6f0578c5cab81625f7091",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 174,
"avg_line_length": 45.37051792828685,
"alnum_prop": 0.6171408500175624,
"repo_name": "evildmp/Arkestra",
"id": "710c280f4b8fc9a104c84343620aa0b32edd0e7b",
"size": "11388",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "news_and_events/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "161712"
},
{
"name": "HTML",
"bytes": "724671"
},
{
"name": "JavaScript",
"bytes": "656447"
},
{
"name": "Makefile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "1524510"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.stack_features import check_stack_feature
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from zookeeper import zookeeper
from resource_management.core.exceptions import ClientComponentHasNoStatus
class ZookeeperClient(Script):
def configure(self, env):
import params
env.set_params(params)
zookeeper(type='client')
pass
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
pass
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
pass
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class ZookeeperClientLinux(ZookeeperClient):
def install(self, env):
self.install_packages(env)
self.configure(env)
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
stack_select.select_packages(params.version)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class ZookeeperClientWindows(ZookeeperClient):
def install(self, env):
# client checks env var to determine if it is installed
if not os.environ.has_key("ZOOKEEPER_HOME"):
self.install_packages(env)
self.configure(env)
if __name__ == "__main__":
ZookeeperClient().execute()
|
{
"content_hash": "024d67bbb576e677920d1223545afeb6",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 114,
"avg_line_length": 34.10126582278481,
"alnum_prop": 0.7754268745360059,
"repo_name": "arenadata/ambari",
"id": "bca007b467382c103cdc6d880caf25b53a6481d1",
"size": "2694",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/stacks/ADH/1.0/services/ZOOKEEPER/package/scripts/zookeeper_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
import gevent.monkey
gevent.monkey.patch_all() # noqa
import logging
from cfgm_common import BGP_RTGT_MIN_ID
from vnc_cfg_api_server.tests import test_case
from vnc_cfg_api_server.vnc_cfg_types import RouteTargetServer
logger = logging.getLogger(__name__)
class TestRouteTargetBase(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestRouteTargetBase, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestRouteTargetBase, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
def test_route_target_format_name(self):
# list of tested route target names with succeed or not and parsed
# result
tested_values = [
(':::', False, None),
('not-enough-part-in-rt', False, None),
('to:much:part:in:rt', False, None),
('bad-prefix:1:1', False, None),
('target:non-digit-asn:1', False, None),
('target:1:non-digit-target', False, None),
('target:1:1', True, (1, 1)),
('target:1.1.1.1:1', True, ('1.1.1.1', 1)),
(['target', '1.1.1.1', '1'], True, ('1.1.1.1', 1)),
(['target', '1', 1], True, (1, 1)),
]
for rt_name, expected_succeed, expected_result in tested_values:
succeed, result = RouteTargetServer._parse_route_target_name(
rt_name)
if expected_succeed:
if not succeed:
self.fail("Cannot parse route target '%s'" % rt_name)
self.assertEqual(result, expected_result)
if not expected_succeed and succeed:
self.fail("Succeed to parse route target '%s'" % rt_name)
def test_route_target_is_in_system_range(self):
# RT name, global ASN, expected result
tested_values = [
('target:1:1', 42, True),
('target:42:1', 42, True),
('target:42:%d' % (BGP_RTGT_MIN_ID + 1000), 42, False),
]
for rt_name, global_asn, expected_result in tested_values:
ok, result = RouteTargetServer.is_user_defined(rt_name, global_asn)
if not ok:
self.fail("Cannot determine if it is a user defined route "
"target: %s", result[1])
self.assertEqual(result, expected_result,
"Route target: %s and global ASN: %d" %
(rt_name, global_asn))
|
{
"content_hash": "2abc3177362d399a722efbe3b3ef6e61",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 79,
"avg_line_length": 38.943661971830984,
"alnum_prop": 0.5674502712477396,
"repo_name": "eonpatapon/contrail-controller",
"id": "79b15679878b5ee40edf15b54b6880d360e20ad7",
"size": "2834",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/config/api-server/vnc_cfg_api_server/tests/test_route_target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "722794"
},
{
"name": "C++",
"bytes": "22097574"
},
{
"name": "GDB",
"bytes": "39260"
},
{
"name": "Go",
"bytes": "47213"
},
{
"name": "Java",
"bytes": "91653"
},
{
"name": "Lua",
"bytes": "13345"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "7240671"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "53994"
}
],
"symlink_target": ""
}
|
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
doc = DocumentManager.Instance.CurrentDBDocument
dynunittype = IN[0]
if IN[1] > 2021:
if str(dynunittype.GetType()) == "Autodesk.Revit.DB.ForgeTypeId":
unittype = dynunittype
elif str(dynunittype) == 'DynamoUnits.Area':
unittype = ForgeTypeId('autodesk.spec.aec:area-2.0.0')
elif str(dynunittype) == 'DynamoUnits.Length':
unittype = ForgeTypeId('autodesk.spec.aec:length-2.0.0')
elif str(dynunittype) == 'DynamoUnits.Volume':
unittype = ForgeTypeId('autodesk.spec.aec:volume-2.0.0')
else:
unittype = None
formatoptions = doc.GetUnits().GetFormatOptions(unittype)
dispunits = formatoptions.GetUnitTypeId()
symtype = formatoptions.GetSymbolTypeId()
if symtype.TypeId == '': dispsym = None
else: dispsym = LabelUtils.GetLabelForSymbol(symtype)
else:
if str(dynunittype.GetType()) == "Autodesk.Revit.DB.UnitType":
unittype = dynunittype
elif str(dynunittype) == 'DynamoUnits.Area':
unittype = UnitType.UT_Area
elif str(dynunittype) == 'DynamoUnits.Length':
unittype = UnitType.UT_Length
elif str(dynunittype) == 'DynamoUnits.Volume':
unittype = UnitType.UT_Volume
else:
unittype = None
formatoptions = doc.GetUnits().GetFormatOptions(unittype)
dispunits = formatoptions.DisplayUnits
symtype = formatoptions.UnitSymbol
if symtype == UnitSymbolType.UST_NONE: dispsym = None
else: dispsym = LabelUtils.GetLabelFor(symtype)
OUT = (dispunits,dispsym)
|
{
"content_hash": "a37a759147efb17435b55423e7f3903d",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 66,
"avg_line_length": 36.372093023255815,
"alnum_prop": 0.7615089514066496,
"repo_name": "andydandy74/ClockworkForDynamo",
"id": "0afe12d7d3c0c832a8985e62c21f05af66d13b2b",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nodes/2.x/python/UnitType.DisplayUnitType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "717382"
}
],
"symlink_target": ""
}
|
import requests
import json
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class UploadListingTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
self.setup_nodes()
def run_test(self):
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.bitcoincash:
listing_json["metadata"]["pricingCurrency"] = "tbch"
api_url = self.nodes[0]["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("UploadListingTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("UploadListingTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
api_url = self.nodes[0]["gateway_url"] + "ob/inventory"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
inv = resp["ron-swanson-tshirt"]
if inv == None:
raise TestFailure("UploadListingTest - FAIL: Did not return inventory for listing")
if inv["inventory"] != 213:
raise TestFailure("UploadListingTest - FAIL: Returned incorrect amount of inventory: %d", inv["inventory"])
elif r.status_code == 404:
raise TestFailure("UploadListingTest - FAIL: Listing post endpoint not found")
else:
resp = json.loads(r.text)
raise TestFailure("UploadListingTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
print("UploadListingTest - PASS")
if __name__ == '__main__':
print("Running UploadListingTest")
UploadListingTest().main()
|
{
"content_hash": "d440da3d4a28e2792961f6cd4c2d0535",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 123,
"avg_line_length": 41.829787234042556,
"alnum_prop": 0.6230925737538149,
"repo_name": "gubatron/openbazaar-go",
"id": "f607d64aa0db1545a6f1ef949d8ab6fcc1e059e8",
"size": "1966",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qa/upload_listing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "348"
},
{
"name": "Go",
"bytes": "1426347"
},
{
"name": "Makefile",
"bytes": "633"
},
{
"name": "Python",
"bytes": "279318"
},
{
"name": "Shell",
"bytes": "2516"
}
],
"symlink_target": ""
}
|
import unittest
from PyQt4.Qt import *
import sys
from eigen import *
from atom import *
from bond import *
from molecule import *
from fragment import *
from cube import *
from residue import *
from mesh import *
from primitivelist import *
from pluginmanager import *
from toolgroup import *
from tool import *
from color import *
from engine import *
from extension import *
from glwidget import *
from camera import *
if __name__ == "__main__":
suite0 = unittest.TestLoader().loadTestsFromTestCase(TestEigen)
suite1 = unittest.TestLoader().loadTestsFromTestCase(TestAtom)
suite2 = unittest.TestLoader().loadTestsFromTestCase(TestBond)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TestMolecule)
suite4 = unittest.TestLoader().loadTestsFromTestCase(TestFragment)
suite5 = unittest.TestLoader().loadTestsFromTestCase(TestCube)
suite6 = unittest.TestLoader().loadTestsFromTestCase(TestResidue)
suite7 = unittest.TestLoader().loadTestsFromTestCase(TestMesh)
suite8 = unittest.TestLoader().loadTestsFromTestCase(TestPrimitiveList)
suite9 = unittest.TestLoader().loadTestsFromTestCase(TestPluginManager)
suite10 = unittest.TestLoader().loadTestsFromTestCase(TestToolGroup)
suite12 = unittest.TestLoader().loadTestsFromTestCase(TestTool)
suite13 = unittest.TestLoader().loadTestsFromTestCase(TestColor)
suite14 = unittest.TestLoader().loadTestsFromTestCase(TestEngine)
suite15 = unittest.TestLoader().loadTestsFromTestCase(TestExtension)
suite16 = unittest.TestLoader().loadTestsFromTestCase(TestGLWidget)
suite17 = unittest.TestLoader().loadTestsFromTestCase(TestCamera)
alltests = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6, suite7, suite8, suite9, suite10,
suite12, suite13, suite14, suite15, suite16, suite17, suite0])
app = QApplication(sys.argv)
unittest.TextTestRunner(verbosity=2).run(alltests)
#sys.exit(app.exec_())
|
{
"content_hash": "9972fdcb63d1f8e2ece05c302ff8990e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 114,
"avg_line_length": 38.32,
"alnum_prop": 0.7865344467640919,
"repo_name": "jjenki11/blaze-chem-rendering",
"id": "7475cc9e9a902a7514bc3c80abe58e5e1ee6f58f",
"size": "1916",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "avogadro/avogadro-1.1.1/libavogadro/src/python/unittest/suite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "2476"
}
],
"symlink_target": ""
}
|
"""Module for testing the update network command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestUpdateNetwork(TestBrokerCommand):
def test_100_update(self):
command = ["update", "network", "--network", "excx-net",
"--network_environment", "excx",
"--building", "ut", "--type", "dmz-net",
"--side", "b", "--comments", "New network comments"]
self.noouttest(command)
def test_110_verify(self):
command = ["show", "network", "--network", "excx-net",
"--network_environment", "excx"]
out = self.commandtest(command)
self.matchoutput(out, "Comments: New network comments", command)
self.matchoutput(out, "Sysloc: ut.ny.na", command)
self.matchoutput(out, "Network Type: dmz-net", command)
self.matchoutput(out, "Side: b", command)
def test_120_update_rename(self):
command = ["update", "network", "--network", "netsvcmap",
"--rename_to", "rename-test", "--comments", "New comment"]
self.noouttest(command)
def test_121_update_rename_verify(self):
command = ["show", "network", "--network", "rename-test"]
out = self.commandtest(command)
self.matchoutput(out, "Network: rename-test", command)
self.matchoutput(out, "Comments: New comment", command)
def test_122_update_rename_existing(self):
net = self.net["np06bals03_v103"]
command = ["update", "network", "--network", "rename-test",
"--rename_to", "np06bals03_v103"]
out,err = self.successtest(command)
self.matchoutput(err, "WARNING: Network name {} is already used for address {}/{}."
.format("np06bals03_v103", net.ip, net.prefixlen), command)
command = ["update", "network", "--ip", net.ip, "--rename_to", "netsvcmap"]
self.noouttest(command)
def test_200_update_utdmz1(self):
net = self.net["ut_dmz1"]
command = ["update_network",
"--ip=%s" % net.ip,
"--network_compartment="]
self.noouttest(command)
def test_201_verify_utdmz1(self):
command = ["search", "network", "--network_compartment", "perimeter.ut"]
self.noouttest(command)
# There should be a test_constraint_network.py one day...
def test_900_delinuse(self):
net = self.net["unknown0"]
command = ["del", "network", "--ip", net.ip]
out = self.badrequesttest(command)
self.matchoutput(out, "Network %s [%s] is still in use" %
(net.name, net), command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateNetwork)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "d4ba7966e7a275f4c70977aa061646bf",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 39.21621621621622,
"alnum_prop": 0.5809786354238456,
"repo_name": "quattor/aquilon",
"id": "2a301554d3f4cdd611875b204bbd419a618c977d",
"size": "3647",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "tests/broker/test_update_network.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
}
|
import copy
from decimal import Decimal
import inspect
from urlparse import urljoin
from jfr_playoff.dto import Match, Team
from jfr_playoff.logger import PlayoffLogger
class ResultInfoClient(object):
def __init__(self, settings, database=None):
self.settings = settings
self.database = database
@property
def priority(self):
return 0
def is_capable(self):
return False
def get_exceptions(self, method):
pass
class ResultInfo(object):
def __init__(self, *args):
self.clients = self._fill_client_list(*args)
@property
def submodule_path(self):
raise NotImplementedError()
@property
def _client_classes(self):
module = __import__(self.submodule_path, fromlist=[''])
for submodule_path in module.CLIENTS:
submodule = __import__(submodule_path, fromlist=[''])
for member in inspect.getmembers(submodule, inspect.isclass):
if member[1].__module__ == submodule_path:
yield member[1]
def _fill_client_list(self, *args):
all_clients = [c(*args) for c in self._client_classes]
clients = [c for c in all_clients if c.is_capable()]
return sorted(clients, key=lambda c: c.priority, reverse=True)
def call_client(self, method, default, *args):
PlayoffLogger.get('resultinfo').info(
'calling %s on result info clients', method)
for client in self.clients:
try:
ret = getattr(client, method)(*args)
PlayoffLogger.get('resultinfo').info(
'%s.%s method returned %s',
client.__class__.__name__, method, ret)
return ret
except Exception as e:
if type(e) \
in client.get_exceptions(method) + (NotImplementedError,):
PlayoffLogger.get('resultinfo').warning(
'%s.%s method raised %s(%s)',
client.__class__.__name__, method,
type(e).__name__, str(e))
else:
raise
PlayoffLogger.get('resultinfo').info(
'%s method returning default: %s', method, default)
return default
class TournamentInfo(ResultInfo):
def __init__(self, settings, database):
ResultInfo.__init__(self, settings, database)
self.final_positions = settings.get('final_positions', [])
@property
def submodule_path(self):
return 'jfr_playoff.data.tournament'
def get_tournament_results(self):
teams = self.call_client('get_tournament_results', [])
if self.is_finished():
PlayoffLogger.get('tournamentinfo').info(
'setting final positions from tournament results: %s',
self.final_positions)
for position in self.final_positions:
if len(teams) >= position:
teams[position-1] = (teams[position-1] + [None] * 4)[0:4]
teams[position-1][3] = position
return teams
def is_finished(self):
return self.call_client('is_finished', True)
def get_results_link(self, suffix='leaderb.html'):
return self.call_client('get_results_link', None, suffix)
class MatchInfo(ResultInfo):
matches = {}
def __init__(self, match_config, teams, database,
aliases=None, starting_positions_certain=True, auto_carryover=False):
ResultInfo.__init__(self, match_config, database)
self.config = match_config
self.teams = teams
self.teams_by_name = { team[0]: team for team in self.teams }
self.database = database
self.aliases = {}
if aliases:
for team, team_aliases in aliases.iteritems():
for alias in team_aliases:
self.aliases[alias] = team
self._starting_positions_certain = starting_positions_certain
self._auto_carryover = auto_carryover
self.info = Match()
self._init_info()
self._fetch_match_link()
@property
def submodule_path(self):
return 'jfr_playoff.data.match'
def _init_info(self):
self.info.id = self.config['id']
MatchInfo.matches[self.info.id] = self.info
self.info.running = 0
self.info.winner_matches = []
self.info.loser_matches = []
for i in range(0, 2):
if 'winner' in self.config['teams'][i]:
self.info.winner_matches += self.config['teams'][i]['winner']
if 'loser' in self.config['teams'][i]:
self.info.loser_matches += self.config['teams'][i]['loser']
self.info.winner_matches = list(set(self.info.winner_matches))
self.info.loser_matches = list(set(self.info.loser_matches))
self.info.winner_place = self.config.get('winner', [])
self.info.loser_place = self.config.get('loser', [])
self.info.teams = []
def _fetch_match_link(self):
link = self.call_client('get_match_link', None)
if link is not None:
self.info.link = link
else:
PlayoffLogger.get('matchinfo').info(
'match #%d link empty', self.info.id)
def _get_predefined_scores(self):
teams = [Team(), Team()]
scores_fetched = False
teams_fetched = False
if 'score' in self.config:
i = 0
for score in self.config['score']:
if isinstance(self.config['score'], dict):
teams[i].score = self.config['score'][score]
try:
team_no = int(score)
teams[i].name = [self.teams[team_no-1][0]]
except ValueError:
teams[i].name = [score]
teams_fetched = True
else:
teams[i].score = score
i += 1
if i == 2:
break
scores_fetched = True
PlayoffLogger.get('matchinfo').info(
'pre-defined scores for match #%d: %s',
self.info.id, teams)
return scores_fetched, teams_fetched, teams
def _get_config_teams(self, teams):
for i in range(0, 2):
match_teams = []
possible_teams = []
if isinstance(self.config['teams'][i], basestring):
match_teams = [self.config['teams'][i]]
elif isinstance(self.config['teams'][i], list):
match_teams = self.config['teams'][i]
else:
if 'winner' in self.config['teams'][i]:
match_teams += [
MatchInfo.matches[winner_match].winner
for winner_match in self.config['teams'][i]['winner']]
possible_teams += [
MatchInfo.matches[winner_match].possible_winner
for winner_match in self.config['teams'][i]['winner']]
if 'loser' in self.config['teams'][i]:
match_teams += [
MatchInfo.matches[loser_match].loser
for loser_match in self.config['teams'][i]['loser']]
possible_teams += [
MatchInfo.matches[loser_match].possible_loser
for loser_match in self.config['teams'][i]['loser']]
if 'place' in self.config['teams'][i]:
placed_teams = [
self.teams[place-1][0]
for place in self.config['teams'][i]['place']]
if self._starting_positions_certain:
match_teams += placed_teams
possible_teams = [None] * len(placed_teams)
else:
possible_teams += placed_teams
match_teams = [None] * len(placed_teams)
teams[i].name = match_teams
teams[i].possible_name = possible_teams
teams[i].selected_team = self.config['selected_teams'][i] \
if 'selected_teams' in self.config else -1
teams[i].known_teams = 1 if teams[i].selected_team >= 0 else len([
team for team in match_teams if team is not None])
PlayoffLogger.get('matchinfo').info(
'config scores for match #%d: %s',
self.info.id, teams)
return teams
def _resolve_team_aliases(self, teams):
return [
self.aliases[team]
if team in self.aliases
else team
for team in teams]
def _fetch_teams_with_scores(self):
(scores_fetched, teams_fetched, self.info.teams) = \
self._get_predefined_scores()
if scores_fetched:
self.info.running = int(self.config.get('running', -1))
if not teams_fetched:
teams = self.call_client(
'fetch_teams', None,
copy.deepcopy(self.info.teams))
if teams is None:
PlayoffLogger.get('matchinfo').warning(
'fetching teams for match #%d failed, reverting to config',
self.info.id)
self.info.teams = self._get_config_teams(self.info.teams)
else:
self.info.teams = teams
for team in range(0, len(self.info.teams)):
if isinstance(self.config['teams'][team], dict):
self.info.teams[team].place = self.config['teams'][team].get(
'place', self.info.teams[team].place)
self.info.teams[team].name = self._resolve_team_aliases(
self.info.teams[team].name)
PlayoffLogger.get('matchinfo').info(
'team list after resolving aliases: %s',
self.info.teams[team].name)
self.info.teams[team].possible_name = self._resolve_team_aliases(
self.info.teams[team].possible_name)
PlayoffLogger.get('matchinfo').info(
'predicted team list after resolving aliases: %s',
self.info.teams[team].possible_name)
def _fetch_board_count(self):
boards_played, boards_to_play = self.call_client(
'board_count', (0, 0))
if boards_played > 0:
self.info.running = -1 \
if boards_played >= boards_to_play \
else boards_played
def _determine_outcome(self):
if (self.info.teams[0].known_teams == 1) \
and (self.info.teams[1].known_teams == 1):
teams = [
team.name[max(0, team.selected_team)]
for team in self.info.teams
]
if self.info.running == -1:
if self.info.teams[0].score > self.info.teams[1].score:
self.info.winner = teams[0]
self.info.loser = teams[1]
else:
self.info.loser = teams[0]
self.info.winner = teams[1]
elif self.info.running > 0:
if self.info.teams[0].score > self.info.teams[1].score:
self.info.possible_winner = teams[0]
self.info.possible_loser = teams[1]
elif self.info.teams[0].score < self.info.teams[1].score:
self.info.possible_loser = teams[0]
self.info.possible_winner = teams[1]
elif self.info.running == 0:
if self._auto_carryover:
team_data = [self.teams_by_name.get(team, []) for team in teams]
if len(team_data[0]) > 4 and len(team_data[1]) > 4:
carry_over = self._auto_carryover / Decimal(100.0) * (team_data[0][4] - team_data[1][4])
if carry_over > 0:
self.info.teams[0].score = carry_over
self.info.teams[1].score = 0.0
else:
self.info.teams[0].score = 0.0
self.info.teams[1].score = -carry_over
PlayoffLogger.get('matchinfo').info(
'calculated carry-over for match #%d: %s, team data: %s',
self.info.id, carry_over, self.info.teams)
def _determine_running_link(self):
if self.info.link is None:
return
self.info.link = self.call_client('running_link', self.info.link)
def set_phase_link(self, phase_link):
prev_link = self.info.link
if self.info.link is None:
self.info.link = phase_link
else:
if self.info.link != '#':
self.info.link = urljoin(phase_link, self.info.link)
PlayoffLogger.get('matchinfo').info(
'applying phase link %s to match #%d: %s',
phase_link, self.info.id, self.info.link)
# re-init result info clients
if (prev_link != self.info.link) and (self.info.link is not None):
PlayoffLogger.get('matchinfo').info(
'config link changed, re-initializing result info client list')
self.config['link'] = self.info.link
ResultInfo.__init__(self, self.config, self.database)
def get_info(self):
self._fetch_teams_with_scores()
self._fetch_board_count()
self._determine_outcome()
if self.info.running > 0:
self._determine_running_link()
return self.info
|
{
"content_hash": "d483545180076ecba6c621fd02aa4657",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 112,
"avg_line_length": 41.147590361445786,
"alnum_prop": 0.5261693873069322,
"repo_name": "emkael/jfrteamy-playoff",
"id": "6e6f36c8814ca9aa5f84472ff79f2cd952ac8409",
"size": "13661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jfr_playoff/data/info.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "247"
},
{
"name": "JavaScript",
"bytes": "10761"
},
{
"name": "Python",
"bytes": "239353"
}
],
"symlink_target": ""
}
|
import unittest
import os
from conans.test.utils.tools import TestClient, TestBufferConanOutput
from conans.test.utils.test_files import hello_source_files
from conans.client.manager import CONANFILE
from conans.model.ref import ConanFileReference, PackageReference
import shutil
from conans.paths import CONANINFO
from conans.client.packager import create_package
from conans.client.loader import ConanFileLoader
from conans.model.settings import Settings
from conans.client.output import ScopedOutput
from conans.model.profile import Profile
myconan1 = """
from conans import ConanFile
import platform
class HelloConan(ConanFile):
name = "Hello"
version = "1.2.1"
files = '*'
def package(self):
self.copy("*", "include", "include/math")
self.copy("include/physics/*.hpp")
self.copy("contrib/*", "include")
self.copy("include/opencv/*")
self.copy("include/opencv2/*")
self.copy("*", "include", "modules/simu/include")
self.copy("*", "include", "modules/3D/include")
self.copy("*", "include", "modules/dev/include")
self.copy("*.a", "lib/my_lib", "my_lib/debug")
self.copy("*.txt", "res/shares", "my_data")
"""
class ExporterTest(unittest.TestCase):
def complete_test(self):
""" basic installation of a new conans
"""
client = TestClient()
client.init_dynamic_vars()
files = hello_source_files()
conan_ref = ConanFileReference.loads("Hello/1.2.1/frodo/stable")
reg_folder = client.paths.export(conan_ref)
client.save(files, path=reg_folder)
client.save({CONANFILE: myconan1,
"infos/%s" % CONANINFO: "//empty",
"include/no_copy/lib0.h": "NO copy",
"include/math/lib1.h": "copy",
"include/math/lib2.h": "copy",
"include/physics/lib.hpp": "copy",
"my_lib/debug/libd.a": "copy",
"my_data/readme.txt": "copy",
"my_data/readme.md": "NO copy",
"contrib/math/math.h": "copy",
"contrib/physics/gravity.h": "copy",
"contrib/contrib.h": "copy",
"include/opencv/opencv.hpp": "copy",
"include/opencv2/opencv2.hpp": "copy",
"modules/simu/src/simu.cpp": "NO copy",
"modules/simu/include/opencv2/simu/simu.hpp": "copy",
"modules/3D/doc/readme.md": "NO copy",
"modules/3D/include/opencv2/3D/3D.hpp": "copy",
"modules/dev/src/dev.cpp": "NO copy",
"modules/dev/include/opencv2/dev/dev.hpp": "copy",
"modules/opencv_mod.hpp": "copy"}, path=reg_folder)
conanfile_path = os.path.join(reg_folder, CONANFILE)
package_ref = PackageReference(conan_ref, "myfakeid")
build_folder = client.paths.build(package_ref)
package_folder = client.paths.package(package_ref)
install_folder = os.path.join(build_folder, "infos")
shutil.copytree(reg_folder, build_folder)
output = ScopedOutput("", TestBufferConanOutput())
loader = ConanFileLoader(None, Settings(), Profile())
conanfile = loader.load_conan(conanfile_path, None)
create_package(conanfile, build_folder, build_folder, package_folder, install_folder,
output, copy_info=True)
# test build folder
self.assertTrue(os.path.exists(build_folder))
self.assertTrue(os.path.exists(os.path.join(package_folder, CONANINFO)))
# test pack folder
self.assertTrue(os.path.exists(package_folder))
def exist(rel_path):
return os.path.exists(os.path.join(package_folder, rel_path))
# Expected files
self.assertTrue(exist("include/lib1.h"))
self.assertTrue(exist("include/lib2.h"))
self.assertTrue(exist("include/physics/lib.hpp"))
self.assertTrue(exist("include/contrib/math/math.h"))
self.assertTrue(exist("include/contrib/physics/gravity.h"))
self.assertTrue(exist("include/contrib/contrib.h"))
self.assertTrue(exist("include/opencv/opencv.hpp"))
self.assertTrue(exist("include/opencv2/opencv2.hpp"))
self.assertTrue(exist("include/opencv2/simu/simu.hpp"))
self.assertTrue(exist("include/opencv2/3D/3D.hpp"))
self.assertTrue(exist("include/opencv2/dev/dev.hpp"))
self.assertTrue(exist("lib/my_lib/libd.a"))
self.assertTrue(exist("res/shares/readme.txt"))
# Not expected files
self.assertFalse(exist("include/opencv2/opencv_mod.hpp"))
self.assertFalse(exist("include/opencv2/simu.hpp"))
self.assertFalse(exist("include/opencv2/3D.hpp"))
self.assertFalse(exist("include/opencv2/dev.hpp"))
self.assertFalse(exist("include/modules/simu/src/simu.cpp"))
self.assertFalse(exist("include/modules/3D/doc/readme.md"))
self.assertFalse(exist("include/modules/dev/src/dev.cpp"))
self.assertFalse(exist("include/opencv2/opencv_mod.hpp"))
self.assertFalse(exist("include/include/no_copy/lib0.h"))
self.assertFalse(exist("res/my_data/readme.md"))
|
{
"content_hash": "5d7c84a585de678a43aa803efd2394c5",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 93,
"avg_line_length": 45.208,
"alnum_prop": 0.5793664838081756,
"repo_name": "tivek/conan",
"id": "decbec13bc26538e0a2afb561cc378405e0d2b98",
"size": "5651",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/test/functional/create_package_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "Groovy",
"bytes": "6080"
},
{
"name": "Python",
"bytes": "2456395"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
"""
error types
Copyright (c) 2010-2012 Mika Eloranta
See LICENSE for details.
"""
class Error(Exception):
"""error"""
class InvalidProperty(Error):
"""invalid property"""
class MissingProperty(Error):
"""missing property"""
class UserError(Error):
"""user error"""
class InvalidRange(Error):
"""invalid range"""
class SettingsError(Error):
"""settings error"""
class VerifyError(Error):
"""verify error"""
class TemplateError(Error):
"""template rendering error"""
class CloudError(Error):
"""cloud error"""
class RemoteError(Error):
"""remote error"""
class RemoteFileDoesNotExist(RemoteError):
"""remote file does not exist"""
class RepoError(Error):
"""repository error"""
class ImporterError(Error):
"""importer error"""
class MissingLibraryError(Error):
"""missing library error"""
class RequirementError(Error):
"""requirement error"""
class ControlError(Error):
"""control error"""
class OperationError(Error):
"""operation error"""
|
{
"content_hash": "9b8d4b0b7b2cc4bd26f352b0663886b9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 42,
"avg_line_length": 17.724137931034484,
"alnum_prop": 0.6741245136186771,
"repo_name": "ohmu/poni",
"id": "ad6814fac263f5b76918b8ac263abe0db67b26c3",
"size": "1028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poni/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1979"
},
{
"name": "Puppet",
"bytes": "363"
},
{
"name": "Python",
"bytes": "356805"
},
{
"name": "Shell",
"bytes": "4337"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: vertica_configuration
version_added: '2.0'
short_description: Updates Vertica configuration parameters.
description:
- Updates Vertica configuration parameters.
options:
name:
description:
- Name of the parameter to update.
required: true
value:
description:
- Value of the parameter to be set.
required: true
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: updating load_balance_policy
vertica_configuration: name=failovertostandbyafter value='8 hours'
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_configuration_facts(cursor, parameter_name=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter_name, parameter_name)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def check(configuration_facts, parameter_name, current_value):
parameter_key = parameter_name.lower()
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
return False
return True
def present(configuration_facts, cursor, parameter_name, current_value):
parameter_key = parameter_name.lower()
changed = False
if current_value and current_value.lower() != configuration_facts[parameter_key]['current_value'].lower():
cursor.execute("select set_config_parameter('{0}', '{1}')".format(parameter_name, current_value))
changed = True
if changed:
configuration_facts.update(get_configuration_facts(cursor, parameter_name))
return changed
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
parameter=dict(required=True, aliases=['name']),
value=dict(default=None),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
parameter_name = module.params['parameter']
current_value = module.params['value']
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
configuration_facts = get_configuration_facts(cursor)
if module.check_mode:
changed = not check(configuration_facts, parameter_name, current_value)
else:
try:
changed = present(configuration_facts, cursor, parameter_name, current_value)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_configuration': configuration_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, parameter=parameter_name, ansible_facts={'vertica_configuration': configuration_facts})
if __name__ == '__main__':
main()
|
{
"content_hash": "944fc68f4d25f2536d5b3379e8a196bc",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 125,
"avg_line_length": 32.53191489361702,
"alnum_prop": 0.6362001308044474,
"repo_name": "nwiizo/workspace_2017",
"id": "c99627a021d7fd241d919b5db80c668f1ec8a858",
"size": "6812",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "ansible-modules-extras/database/vertica/vertica_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "173"
},
{
"name": "C++",
"bytes": "7105"
},
{
"name": "CSS",
"bytes": "50021"
},
{
"name": "Go",
"bytes": "112005"
},
{
"name": "HTML",
"bytes": "66435"
},
{
"name": "JavaScript",
"bytes": "73266"
},
{
"name": "Makefile",
"bytes": "1227"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "PowerShell",
"bytes": "277598"
},
{
"name": "Python",
"bytes": "11925958"
},
{
"name": "Ruby",
"bytes": "3779"
},
{
"name": "Rust",
"bytes": "1484076"
},
{
"name": "Shell",
"bytes": "86558"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import re
import logging
from flexget import plugin
from flexget.event import event
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
log = logging.getLogger('FTDB')
class UrlRewriteFTDB(object):
"""FTDB RSS url_rewrite"""
def url_rewritable(self, task, entry):
# url = entry['url']
if re.match(r'^http://www\.frenchtorrentdb\.com/[^/]+(?!/)[^/]+&rss=1', entry['url']):
return True
return False
def url_rewrite(self, task, entry):
old_url = entry['url']
page_url = old_url.replace('DOWNLOAD', 'INFOS')
page_url = page_url.replace('&rss=1', '')
new_url = self.parse_download_page(page_url, task.requests)
log.debug('PAGE URL NEEDED : %s' % page_url)
log.debug('%s OLD is rewrited to NEW %s' % (old_url, new_url))
entry['url'] = new_url
def parse_download_page(self, page_url, requests):
page = requests.get(page_url)
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(e)
tag_a = soup.find("a", {"class": "dl_link"})
if not tag_a:
if soup.findAll(text="Connexion ?"):
raise UrlRewritingError(
'You are not logged in,\
check if your cookie for\
authentication is up to date'
)
else:
raise UrlRewritingError(
'You have reached your download\
limit per 24hours, so I cannot\
get the torrent'
)
torrent_url = "http://www.frenchtorrentdb.com" + tag_a.get('href') + "&js=1"
log.debug('TORRENT URL is : %s' % torrent_url)
return torrent_url
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteFTDB, 'frenchtorrentdb', interfaces=['urlrewriter'], api_ver=2)
|
{
"content_hash": "819b5d67de0a6fdd7ea49d45229706f7",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 94,
"avg_line_length": 36.14754098360656,
"alnum_prop": 0.5619047619047619,
"repo_name": "tobinjt/Flexget",
"id": "3c08027be8d270915ee92d81f2c64f43f906fea6",
"size": "2205",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/components/sites/sites/frenchtorrentdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3492888"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
"""
VCAP Services module
This module initializes the database connection String
from VCAP_SERVICES in Bluemix if Found
"""
import os
import json
def get_database_uri():
"""
Initialized MySQL database connection
This method will work in the following conditions:
1) In Bluemix with Redis bound through VCAP_SERVICES
2) With MySQL running on the local server as with Travis CI
"""
# Get the credentials from the Bluemix environment
if 'VCAP_SERVICES' in os.environ:
vcap_services = os.environ['VCAP_SERVICES']
services = json.loads(vcap_services)
creds = services['cleardb'][0]['credentials']
#uri = creds["uri"]
username = creds["username"]
password = creds["password"]
hostname = creds["hostname"]
port = creds["port"]
name = creds["name"]
else:
username = 'root'
password = ''
hostname = 'localhost'
port = '3306'
name = 'shopcarts'
connect_string = 'mysql+pymysql://{}:{}@{}:{}/{}'
return connect_string.format(username, password, hostname, port, name)
|
{
"content_hash": "6ff9eb030736c36dba361910b1c0341c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 74,
"avg_line_length": 30.243243243243242,
"alnum_prop": 0.6291331546023236,
"repo_name": "nyu-devops-echo/shopcarts",
"id": "8a1a063e408efa6468517c21836a3d4dd0cc86d7",
"size": "1724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/vcap_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "3513"
},
{
"name": "HTML",
"bytes": "583"
},
{
"name": "JavaScript",
"bytes": "1672"
},
{
"name": "Python",
"bytes": "67489"
},
{
"name": "Shell",
"bytes": "1009"
},
{
"name": "Vue",
"bytes": "11447"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
import tornado.escape
from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode
from tornado.util import u, unicode_type, bytes_type
from tornado.test.util import unittest
linkify_tests = [
# (input, linkify_kwargs, expected_output)
("hello http://world.com/!", {},
u('hello <a href="http://world.com/">http://world.com/</a>!')),
("hello http://world.com/with?param=true&stuff=yes", {},
u('hello <a href="http://world.com/with?param=true&stuff=yes">http://world.com/with?param=true&stuff=yes</a>')),
# an opened paren followed by many chars killed Gruber's regex
("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
u('<a href="http://url.com/w">http://url.com/w</a>(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
# as did too many dots at the end
("http://url.com/withmany.......................................", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>.......................................')),
("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
u('<a href="http://url.com/withmany">http://url.com/withmany</a>((((((((((((((((((((((((((((((((((a)')),
# some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
# plus a fex extras (such as multiple parentheses).
("http://foo.com/blah_blah", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>')),
("http://foo.com/blah_blah/", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>')),
("(Something like http://foo.com/blah_blah)", {},
u('(Something like <a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>)')),
("http://foo.com/blah_blah_(wikipedia)", {},
u('<a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>')),
("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
u('<a href="http://foo.com/blah_(blah)_(wikipedia)_blah">http://foo.com/blah_(blah)_(wikipedia)_blah</a>')),
("(Something like http://foo.com/blah_blah_(wikipedia))", {},
u('(Something like <a href="http://foo.com/blah_blah_(wikipedia)">http://foo.com/blah_blah_(wikipedia)</a>)')),
("http://foo.com/blah_blah.", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>.')),
("http://foo.com/blah_blah/.", {},
u('<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>.')),
("<http://foo.com/blah_blah>", {},
u('<<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>>')),
("<http://foo.com/blah_blah/>", {},
u('<<a href="http://foo.com/blah_blah/">http://foo.com/blah_blah/</a>>')),
("http://foo.com/blah_blah,", {},
u('<a href="http://foo.com/blah_blah">http://foo.com/blah_blah</a>,')),
("http://www.example.com/wpstyle/?p=364.", {},
u('<a href="http://www.example.com/wpstyle/?p=364">http://www.example.com/wpstyle/?p=364</a>.')),
("rdar://1234",
{"permitted_protocols": ["http", "rdar"]},
u('<a href="rdar://1234">rdar://1234</a>')),
("rdar:/1234",
{"permitted_protocols": ["rdar"]},
u('<a href="rdar:/1234">rdar:/1234</a>')),
("http://userid:password@example.com:8080", {},
u('<a href="http://userid:password@example.com:8080">http://userid:password@example.com:8080</a>')),
("http://userid@example.com", {},
u('<a href="http://userid@example.com">http://userid@example.com</a>')),
("http://userid@example.com:8080", {},
u('<a href="http://userid@example.com:8080">http://userid@example.com:8080</a>')),
("http://userid:password@example.com", {},
u('<a href="http://userid:password@example.com">http://userid:password@example.com</a>')),
("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
{"permitted_protocols": ["http", "message"]},
u('<a href="message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e">message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e</a>')),
(u("http://\u27a1.ws/\u4a39"), {},
u('<a href="http://\u27a1.ws/\u4a39">http://\u27a1.ws/\u4a39</a>')),
("<tag>http://example.com</tag>", {},
u('<tag><a href="http://example.com">http://example.com</a></tag>')),
("Just a www.example.com link.", {},
u('Just a <a href="http://www.example.com">www.example.com</a> link.')),
("Just a www.example.com link.",
{"require_protocol": True},
u('Just a www.example.com link.')),
("A http://reallylong.com/link/that/exceedsthelenglimit.html",
{"require_protocol": True, "shorten": True},
u('A <a href="http://reallylong.com/link/that/exceedsthelenglimit.html" title="http://reallylong.com/link/that/exceedsthelenglimit.html">http://reallylong.com/link...</a>')),
("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
{"shorten": True},
u('A <a href="http://reallylongdomainnamethatwillbetoolong.com/hi" title="http://reallylongdomainnamethatwillbetoolong.com/hi">http://reallylongdomainnametha...</a>!')),
("A file:///passwords.txt and http://web.com link", {},
u('A file:///passwords.txt and <a href="http://web.com">http://web.com</a> link')),
("A file:///passwords.txt and http://web.com link",
{"permitted_protocols": ["file"]},
u('A <a href="file:///passwords.txt">file:///passwords.txt</a> and http://web.com link')),
("www.external-link.com",
{"extra_params": 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
("www.external-link.com and www.internal-link.com/blogs extra",
{"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a> and <a href="http://www.internal-link.com/blogs" class="internal">www.internal-link.com/blogs</a> extra')),
("www.external-link.com",
{"extra_params": lambda href: ' rel="nofollow" class="external" '},
u('<a href="http://www.external-link.com" rel="nofollow" class="external">www.external-link.com</a>')),
]
class EscapeTestCase(unittest.TestCase):
def test_linkify(self):
for text, kwargs, html in linkify_tests:
linked = tornado.escape.linkify(text, **kwargs)
self.assertEqual(linked, html)
def test_xhtml_escape(self):
tests = [
("<foo>", "<foo>"),
(u("<foo>"), u("<foo>")),
(b"<foo>", b"<foo>"),
("<>&\"", "<>&""),
("&", "&amp;"),
(u("<\u00e9>"), u("<\u00e9>")),
(b"<\xc3\xa9>", b"<\xc3\xa9>"),
]
for unescaped, escaped in tests:
self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
def test_url_escape_unicode(self):
tests = [
# byte strings are passed through as-is
(u('\u00e9').encode('utf8'), '%C3%A9'),
(u('\u00e9').encode('latin1'), '%E9'),
# unicode strings become utf8
(u('\u00e9'), '%C3%A9'),
]
for unescaped, escaped in tests:
self.assertEqual(url_escape(unescaped), escaped)
def test_url_unescape_unicode(self):
tests = [
('%C3%A9', u('\u00e9'), 'utf8'),
('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
('%C3%A9', utf8(u('\u00e9')), None),
]
for escaped, unescaped, encoding in tests:
# input strings to url_unescape should only contain ascii
# characters, but make sure the function accepts both byte
# and unicode strings.
self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
def test_url_escape_quote_plus(self):
unescaped = '+ #%'
plus_escaped = '%2B+%23%25'
escaped = '%2B%20%23%25'
self.assertEqual(url_escape(unescaped), plus_escaped)
self.assertEqual(url_escape(unescaped, plus=False), escaped)
self.assertEqual(url_unescape(plus_escaped), unescaped)
self.assertEqual(url_unescape(escaped, plus=False), unescaped)
self.assertEqual(url_unescape(plus_escaped, encoding=None),
utf8(unescaped))
self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
utf8(unescaped))
def test_escape_return_types(self):
# On python2 the escape methods should generally return the same
# type as their argument
self.assertEqual(type(xhtml_escape("foo")), str)
self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
def test_json_decode(self):
# json_decode accepts both bytes and unicode, but strings it returns
# are always unicode.
self.assertEqual(json_decode(b'"foo"'), u("foo"))
self.assertEqual(json_decode(u('"foo"')), u("foo"))
# Non-ascii bytes are interpreted as utf8
self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
def test_json_encode(self):
# json deals with strings, not bytes. On python 2 byte strings will
# convert automatically if they are utf8; on python 3 byte strings
# are not allowed.
self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
if bytes_type is str:
self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
|
{
"content_hash": "a905b206a3dec7f7b055e9572ad9bca0",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 212,
"avg_line_length": 46.700934579439256,
"alnum_prop": 0.5952571542925755,
"repo_name": "andr-kun/ncss-murder",
"id": "0370d77dada9b2dddd5ca8b1a7039de8c6117aaa",
"size": "10018",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tornado/test/escape_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12907"
},
{
"name": "HTML",
"bytes": "33994"
},
{
"name": "JavaScript",
"bytes": "127375"
},
{
"name": "Python",
"bytes": "1029285"
}
],
"symlink_target": ""
}
|
try:
from calendarserver.tools.agent import AgentRealm
from calendarserver.tools.agent import InactivityDetector
from twistedcaldav.test.util import TestCase
from twisted.internet.task import Clock
from twisted.web.resource import IResource
from twisted.web.resource import ForbiddenResource
except ImportError:
pass
else:
class FakeRecord(object):
def __init__(self, shortName):
self.shortNames = [shortName]
class AgentTestCase(TestCase):
def test_AgentRealm(self):
realm = AgentRealm("root", ["abc"])
# Valid avatar
_ignore_interface, resource, ignored = realm.requestAvatar(
FakeRecord("abc"), None, IResource
)
self.assertEquals(resource, "root")
# Not allowed avatar
_ignore_interface, resource, ignored = realm.requestAvatar(
FakeRecord("def"), None, IResource
)
self.assertTrue(isinstance(resource, ForbiddenResource))
# Interface unhandled
try:
realm.requestAvatar(FakeRecord("def"), None, None)
except NotImplementedError:
pass
else:
self.fail("Didn't raise NotImplementedError")
class InactivityDectectorTestCase(TestCase):
def test_inactivity(self):
clock = Clock()
self.inactivityReached = False
def becameInactive():
self.inactivityReached = True
id = InactivityDetector(clock, 5, becameInactive)
# After 3 seconds, not inactive
clock.advance(3)
self.assertFalse(self.inactivityReached)
# Activity happens, pushing out the inactivity threshold
id.activity()
clock.advance(3)
self.assertFalse(self.inactivityReached)
# Time passes without activity
clock.advance(3)
self.assertTrue(self.inactivityReached)
id.stop()
# Verify a timeout of 0 does not ever fire
id = InactivityDetector(clock, 0, becameInactive)
self.assertEquals(clock.getDelayedCalls(), [])
class FakeRequest(object):
def getClientIP(self):
return "127.0.0.1"
class FakeOpenDirectory(object):
def returnThisRecord(self, response):
self.recordResponse = response
def getUserRecord(self, ignored, username):
return self.recordResponse
def returnThisAuthResponse(self, response):
self.authResponse = response
def authenticateUserDigest(
self, ignored, node, username, challenge, response, method
):
return self.authResponse
ODNSerror = "Error"
class FakeCredentials(object):
def __init__(self, username, fields):
self.username = username
self.fields = fields
self.method = "POST"
|
{
"content_hash": "d9d6573382ed863fb3e28d7322e304c6",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 29.50980392156863,
"alnum_prop": 0.5993355481727575,
"repo_name": "macosforge/ccs-calendarserver",
"id": "69d82427e61c906f2f3e315c00ff50f4cd5c87f7",
"size": "3617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calendarserver/tools/test/test_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import base64
import logging
import netaddr
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.lib import hub
LOG = logging.getLogger('ryu.lib.ofctl_v1_3')
DEFAULT_TIMEOUT = 1.0
def str_to_int(src):
if isinstance(src, str):
if src.startswith("0x") or src.startswith("0X"):
dst = int(src, 16)
else:
dst = int(src)
else:
dst = src
return dst
def to_action(dp, dic):
ofp = dp.ofproto
parser = dp.ofproto_parser
action_type = dic.get('type')
if action_type == 'OUTPUT':
out_port = int(dic.get('port', ofp.OFPP_ANY))
max_len = int(dic.get('max_len', ofp.OFPCML_MAX))
result = parser.OFPActionOutput(out_port, max_len)
elif action_type == 'COPY_TTL_OUT':
result = parser.OFPActionCopyTtlOut()
elif action_type == 'COPY_TTL_IN':
result = parser.OFPActionCopyTtlIn()
elif action_type == 'SET_MPLS_TTL':
mpls_ttl = int(dic.get('mpls_ttl'))
result = parser.OFPActionSetMplsTtl(mpls_ttl)
elif action_type == 'DEC_MPLS_TTL':
result = parser.OFPActionDecMplsTtl()
elif action_type == 'PUSH_VLAN':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushVlan(ethertype)
elif action_type == 'POP_VLAN':
result = parser.OFPActionPopVlan()
elif action_type == 'PUSH_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushMpls(ethertype)
elif action_type == 'POP_MPLS':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPopMpls(ethertype)
elif action_type == 'SET_QUEUE':
queue_id = int(dic.get('queue_id'))
result = parser.OFPActionSetQueue(queue_id)
elif action_type == 'GROUP':
group_id = int(dic.get('group_id'))
result = parser.OFPActionGroup(group_id)
elif action_type == 'SET_NW_TTL':
nw_ttl = int(dic.get('nw_ttl'))
result = parser.OFPActionSetNwTtl(nw_ttl)
elif action_type == 'DEC_NW_TTL':
result = parser.OFPActionDecNwTtl()
elif action_type == 'SET_FIELD':
field = dic.get('field')
value = dic.get('value')
result = parser.OFPActionSetField(**{field: value})
elif action_type == 'PUSH_PBB':
ethertype = int(dic.get('ethertype'))
result = parser.OFPActionPushPbb(ethertype)
elif action_type == 'POP_PBB':
result = parser.OFPActionPopPbb()
else:
result = None
return result
def to_actions(dp, acts):
inst = []
actions = []
ofp = dp.ofproto
parser = dp.ofproto_parser
for a in acts:
action = to_action(dp, a)
if action is not None:
actions.append(action)
else:
action_type = a.get('type')
if action_type == 'WRITE_ACTIONS':
write_actions = []
write_acts = a.get('actions')
for a in write_acts:
action = to_action(dp, a)
if action is not None:
write_actions.append(action)
else:
LOG.error('Unknown action type: %s', action_type)
if write_actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_WRITE_ACTIONS,
write_actions))
elif action_type == 'CLEAR_ACTIONS':
inst.append(parser.OFPInstructionActions(
ofp.OFPIT_CLEAR_ACTIONS, []))
elif action_type == 'GOTO_TABLE':
table_id = int(a.get('table_id'))
inst.append(parser.OFPInstructionGotoTable(table_id))
elif action_type == 'WRITE_METADATA':
metadata = str_to_int(a.get('metadata'))
metadata_mask = (str_to_int(a['metadata_mask'])
if 'metadata_mask' in a
else parser.UINT64_MAX)
inst.append(
parser.OFPInstructionWriteMetadata(
metadata, metadata_mask))
elif action_type == 'METER':
meter_id = int(a.get('meter_id'))
inst.append(parser.OFPInstructionMeter(meter_id))
else:
LOG.error('Unknown action type: %s', action_type)
if actions:
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
actions))
return inst
def action_to_str(act):
action_type = act.cls_action_type
if action_type == ofproto_v1_3.OFPAT_OUTPUT:
buf = 'OUTPUT:' + str(act.port)
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_OUT:
buf = 'COPY_TTL_OUT'
elif action_type == ofproto_v1_3.OFPAT_COPY_TTL_IN:
buf = 'COPY_TTL_IN'
elif action_type == ofproto_v1_3.OFPAT_SET_MPLS_TTL:
buf = 'SET_MPLS_TTL:' + str(act.mpls_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_MPLS_TTL:
buf = 'DEC_MPLS_TTL'
elif action_type == ofproto_v1_3.OFPAT_PUSH_VLAN:
buf = 'PUSH_VLAN:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_VLAN:
buf = 'POP_VLAN'
elif action_type == ofproto_v1_3.OFPAT_PUSH_MPLS:
buf = 'PUSH_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_MPLS:
buf = 'POP_MPLS:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_SET_QUEUE:
buf = 'SET_QUEUE:' + str(act.queue_id)
elif action_type == ofproto_v1_3.OFPAT_GROUP:
buf = 'GROUP:' + str(act.group_id)
elif action_type == ofproto_v1_3.OFPAT_SET_NW_TTL:
buf = 'SET_NW_TTL:' + str(act.nw_ttl)
elif action_type == ofproto_v1_3.OFPAT_DEC_NW_TTL:
buf = 'DEC_NW_TTL'
elif action_type == ofproto_v1_3.OFPAT_SET_FIELD:
buf = 'SET_FIELD: {%s:%s}' % (act.key, act.value)
elif action_type == ofproto_v1_3.OFPAT_PUSH_PBB:
buf = 'PUSH_PBB:' + str(act.ethertype)
elif action_type == ofproto_v1_3.OFPAT_POP_PBB:
buf = 'POP_PBB'
else:
buf = 'UNKNOWN'
return buf
def actions_to_str(instructions):
actions = []
for instruction in instructions:
if isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionActions):
if instruction.type == ofproto_v1_3.OFPIT_APPLY_ACTIONS:
for a in instruction.actions:
actions.append(action_to_str(a))
elif instruction.type == ofproto_v1_3.OFPIT_WRITE_ACTIONS:
write_actions = []
for a in instruction.actions:
write_actions.append(action_to_str(a))
if write_actions:
actions.append({'WRITE_ACTIONS': write_actions})
elif instruction.type == ofproto_v1_3.OFPIT_CLEAR_ACTIONS:
actions.append('CLEAR_ACTIONS')
else:
actions.append('UNKNOWN')
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionGotoTable):
buf = 'GOTO_TABLE:' + str(instruction.table_id)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionWriteMetadata):
buf = ('WRITE_METADATA:0x%x/0x%x' % (instruction.metadata,
instruction.metadata_mask)
if instruction.metadata_mask
else 'WRITE_METADATA:0x%x' % instruction.metadata)
actions.append(buf)
elif isinstance(instruction,
ofproto_v1_3_parser.OFPInstructionMeter):
buf = 'METER:' + str(instruction.meter_id)
actions.append(buf)
else:
continue
return actions
def to_match(dp, attrs):
convert = {'in_port': int,
'in_phy_port': int,
'metadata': to_match_masked_int,
'dl_dst': to_match_eth,
'dl_src': to_match_eth,
'eth_dst': to_match_eth,
'eth_src': to_match_eth,
'dl_type': int,
'eth_type': int,
'dl_vlan': to_match_vid,
'vlan_vid': to_match_vid,
'vlan_pcp': int,
'ip_dscp': int,
'ip_ecn': int,
'nw_proto': int,
'ip_proto': int,
'nw_src': to_match_ip,
'nw_dst': to_match_ip,
'ipv4_src': to_match_ip,
'ipv4_dst': to_match_ip,
'tp_src': int,
'tp_dst': int,
'tcp_src': int,
'tcp_dst': int,
'udp_src': int,
'udp_dst': int,
'sctp_src': int,
'sctp_dst': int,
'icmpv4_type': int,
'icmpv4_code': int,
'arp_op': int,
'arp_spa': to_match_ip,
'arp_tpa': to_match_ip,
'arp_sha': to_match_eth,
'arp_tha': to_match_eth,
'ipv6_src': to_match_ip,
'ipv6_dst': to_match_ip,
'ipv6_flabel': int,
'icmpv6_type': int,
'icmpv6_code': int,
'ipv6_nd_target': to_match_ip,
'ipv6_nd_sll': to_match_eth,
'ipv6_nd_tll': to_match_eth,
'mpls_label': int,
'mpls_tc': int,
'mpls_bos': int,
'pbb_isid': to_match_masked_int,
'tunnel_id': to_match_masked_int,
'ipv6_exthdr': to_match_masked_int}
keys = {'dl_dst': 'eth_dst',
'dl_src': 'eth_src',
'dl_type': 'eth_type',
'dl_vlan': 'vlan_vid',
'nw_src': 'ipv4_src',
'nw_dst': 'ipv4_dst',
'nw_proto': 'ip_proto'}
if attrs.get('dl_type') == ether.ETH_TYPE_ARP or \
attrs.get('eth_type') == ether.ETH_TYPE_ARP:
if 'nw_src' in attrs and 'arp_spa' not in attrs:
attrs['arp_spa'] = attrs['nw_src']
del attrs['nw_src']
if 'nw_dst' in attrs and 'arp_tpa' not in attrs:
attrs['arp_tpa'] = attrs['nw_dst']
del attrs['nw_dst']
kwargs = {}
for key, value in attrs.items():
if key in keys:
# For old field name
key = keys[key]
if key in convert:
value = convert[key](value)
if key == 'tp_src' or key == 'tp_dst':
# TCP/UDP port
conv = {inet.IPPROTO_TCP: {'tp_src': 'tcp_src',
'tp_dst': 'tcp_dst'},
inet.IPPROTO_UDP: {'tp_src': 'udp_src',
'tp_dst': 'udp_dst'}}
ip_proto = attrs.get('nw_proto', attrs.get('ip_proto', 0))
key = conv[ip_proto][key]
kwargs[key] = value
else:
# others
kwargs[key] = value
else:
LOG.error('Unknown match field: %s', key)
return dp.ofproto_parser.OFPMatch(**kwargs)
def to_match_eth(value):
if '/' in value:
value = value.split('/')
return value[0], value[1]
else:
return value
def to_match_ip(value):
if '/' in value:
(ip_addr, ip_mask) = value.split('/')
if ip_mask.isdigit():
ip = netaddr.ip.IPNetwork(value)
ip_addr = str(ip.ip)
ip_mask = str(ip.netmask)
return ip_addr, ip_mask
else:
return value
def to_match_vid(value):
# NOTE: If "vlan_id/dl_vlan" field is described as decimal int value
# (and decimal string value), it is treated as values of
# VLAN tag, and OFPVID_PRESENT(0x1000) bit is automatically
# applied. OTOH, If it is described as hexadecimal string,
# treated as values of oxm_value (including OFPVID_PRESENT
# bit), and OFPVID_PRESENT bit is NOT automatically applied.
if isinstance(value, int):
# described as decimal int value
return value | ofproto_v1_3.OFPVID_PRESENT
else:
if '/' in value:
val = value.split('/')
return int(val[0], 0), int(val[1], 0)
else:
if value.isdigit():
# described as decimal string value
return int(value, 10) | ofproto_v1_3.OFPVID_PRESENT
else:
return int(value, 0)
def to_match_masked_int(value):
if isinstance(value, str) and '/' in value:
value = value.split('/')
return str_to_int(value[0]), str_to_int(value[1])
else:
return str_to_int(value)
def match_to_str(ofmatch):
keys = {'eth_src': 'dl_src',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
'vlan_vid': 'dl_vlan',
'ipv4_src': 'nw_src',
'ipv4_dst': 'nw_dst',
'ip_proto': 'nw_proto',
'tcp_src': 'tp_src',
'tcp_dst': 'tp_dst',
'udp_src': 'tp_src',
'udp_dst': 'tp_dst'
}
match = {}
ofmatch = ofmatch.to_jsondict()['OFPMatch']
ofmatch = ofmatch['oxm_fields']
for match_field in ofmatch:
key = match_field['OXMTlv']['field']
if key in keys:
key = keys[key]
mask = match_field['OXMTlv']['mask']
value = match_field['OXMTlv']['value']
if key == 'dl_vlan':
value = match_vid_to_str(value, mask)
else:
if mask is not None:
value = str(value) + '/' + str(mask)
match.setdefault(key, value)
return match
def match_vid_to_str(value, mask):
if mask is not None:
value = '0x%04x/0x%04x' % (value, mask)
else:
if value & ofproto_v1_3.OFPVID_PRESENT:
value = str(value & ~ofproto_v1_3.OFPVID_PRESENT)
else:
value = '0x%04x' % value
return value
def send_stats_request(dp, stats, waiters, msgs):
dp.set_xid(stats)
waiters_per_dp = waiters.setdefault(dp.id, {})
lock = hub.Event()
previous_msg_len = len(msgs)
waiters_per_dp[stats.xid] = (lock, msgs)
dp.send_msg(stats)
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
while current_msg_len > previous_msg_len:
previous_msg_len = current_msg_len
lock.wait(timeout=DEFAULT_TIMEOUT)
current_msg_len = len(msgs)
if not lock.is_set():
del waiters_per_dp[stats.xid]
def get_desc_stats(dp, waiters):
stats = dp.ofproto_parser.OFPDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = {}
for msg in msgs:
stats = msg.body
s = {'mfr_desc': stats.mfr_desc,
'hw_desc': stats.hw_desc,
'sw_desc': stats.sw_desc,
'serial_num': stats.serial_num,
'dp_desc': stats.dp_desc}
desc = {str(dp.id): s}
return desc
def get_queue_stats(dp, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueStatsRequest(dp, 0, ofp.OFPP_ANY,
ofp.OFPQ_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
s = []
for msg in msgs:
stats = msg.body
for stat in stats:
s.append({'duration_nsec': stat.duration_nsec,
'duration_sec': stat.duration_sec,
'port_no': stat.port_no,
'queue_id': stat.queue_id,
'tx_bytes': stat.tx_bytes,
'tx_errors': stat.tx_errors,
'tx_packets': stat.tx_packets})
desc = {str(dp.id): s}
return desc
def get_queue_config(dp, port, waiters):
ofp = dp.ofproto
stats = dp.ofproto_parser.OFPQueueGetConfigRequest(dp, port)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
prop_type = {dp.ofproto.OFPQT_MIN_RATE: 'MIN_RATE',
dp.ofproto.OFPQT_MAX_RATE: 'MAX_RATE',
dp.ofproto.OFPQT_EXPERIMENTER: 'EXPERIMENTER',
}
configs = []
for config in msgs:
queue_list = []
for queue in config.queues:
prop_list = []
for prop in queue.properties:
p = {'property': prop_type.get(prop.property, 'UNKNOWN')}
if prop.property == dp.ofproto.OFPQT_MIN_RATE or \
prop.property == dp.ofproto.OFPQT_MAX_RATE:
p['rate'] = prop.rate
elif prop.property == dp.ofproto.OFPQT_EXPERIMENTER:
p['experimenter'] = prop.experimenter
p['data'] = prop.data
prop_list.append(p)
q = {'port': queue.port,
'properties': prop_list,
'queue_id': queue.queue_id}
queue_list.append(q)
c = {'port': config.port,
'queues': queue_list}
configs.append(c)
configs = {str(dp.id): configs}
return configs
def get_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPFlowStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
for stats in msg.body:
actions = actions_to_str(stats.instructions)
match = match_to_str(stats.match)
s = {'priority': stats.priority,
'cookie': stats.cookie,
'idle_timeout': stats.idle_timeout,
'hard_timeout': stats.hard_timeout,
'actions': actions,
'match': match,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'packet_count': stats.packet_count,
'table_id': stats.table_id,
'length': stats.length,
'flags': stats.flags}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_aggregate_flow_stats(dp, waiters, flow={}):
table_id = int(flow.get('table_id', dp.ofproto.OFPTT_ALL))
flags = int(flow.get('flags', 0))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
match = to_match(dp, flow.get('match', {}))
stats = dp.ofproto_parser.OFPAggregateStatsRequest(
dp, flags, table_id, out_port, out_group, cookie, cookie_mask,
match)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
flows = []
for msg in msgs:
stats = msg.body
s = {'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'flow_count': stats.flow_count}
flows.append(s)
flows = {str(dp.id): flows}
return flows
def get_table_stats(dp, waiters):
stats = dp.ofproto_parser.OFPTableStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
s = {'table_id': stat.table_id,
'active_count': stat.active_count,
'lookup_count': stat.lookup_count,
'matched_count': stat.matched_count}
tables.append(s)
desc = {str(dp.id): tables}
return desc
def get_table_features(dp, waiters):
stats = dp.ofproto_parser.OFPTableFeaturesStatsRequest(dp, 0, [])
msgs = []
ofproto = dp.ofproto
send_stats_request(dp, stats, waiters, msgs)
prop_type = {ofproto.OFPTFPT_INSTRUCTIONS: 'INSTRUCTIONS',
ofproto.OFPTFPT_INSTRUCTIONS_MISS: 'INSTRUCTIONS_MISS',
ofproto.OFPTFPT_NEXT_TABLES: 'NEXT_TABLES',
ofproto.OFPTFPT_NEXT_TABLES_MISS: 'NEXT_TABLES_MISS',
ofproto.OFPTFPT_WRITE_ACTIONS: 'WRITE_ACTIONS',
ofproto.OFPTFPT_WRITE_ACTIONS_MISS: 'WRITE_ACTIONS_MISS',
ofproto.OFPTFPT_APPLY_ACTIONS: 'APPLY_ACTIONS',
ofproto.OFPTFPT_APPLY_ACTIONS_MISS: 'APPLY_ACTIONS_MISS',
ofproto.OFPTFPT_MATCH: 'MATCH',
ofproto.OFPTFPT_WILDCARDS: 'WILDCARDS',
ofproto.OFPTFPT_WRITE_SETFIELD: 'WRITE_SETFIELD',
ofproto.OFPTFPT_WRITE_SETFIELD_MISS: 'WRITE_SETFIELD_MISS',
ofproto.OFPTFPT_APPLY_SETFIELD: 'APPLY_SETFIELD',
ofproto.OFPTFPT_APPLY_SETFIELD_MISS: 'APPLY_SETFIELD_MISS',
ofproto.OFPTFPT_EXPERIMENTER: 'EXPERIMENTER',
ofproto.OFPTFPT_EXPERIMENTER_MISS: 'EXPERIMENTER_MISS'
}
p_type_instructions = [ofproto.OFPTFPT_INSTRUCTIONS,
ofproto.OFPTFPT_INSTRUCTIONS_MISS]
p_type_next_tables = [ofproto.OFPTFPT_NEXT_TABLES,
ofproto.OFPTFPT_NEXT_TABLES_MISS]
p_type_actions = [ofproto.OFPTFPT_WRITE_ACTIONS,
ofproto.OFPTFPT_WRITE_ACTIONS_MISS,
ofproto.OFPTFPT_APPLY_ACTIONS,
ofproto.OFPTFPT_APPLY_ACTIONS_MISS]
p_type_oxms = [ofproto.OFPTFPT_MATCH,
ofproto.OFPTFPT_WILDCARDS,
ofproto.OFPTFPT_WRITE_SETFIELD,
ofproto.OFPTFPT_WRITE_SETFIELD_MISS,
ofproto.OFPTFPT_APPLY_SETFIELD,
ofproto.OFPTFPT_APPLY_SETFIELD_MISS]
p_type_experimenter = [ofproto.OFPTFPT_EXPERIMENTER,
ofproto.OFPTFPT_EXPERIMENTER_MISS]
tables = []
for msg in msgs:
stats = msg.body
for stat in stats:
properties = []
for prop in stat.properties:
p = {'type': prop_type.get(prop.type, 'UNKNOWN')}
if prop.type in p_type_instructions:
instruction_ids = []
for id in prop.instruction_ids:
i = {'len': id.len,
'type': id.type}
instruction_ids.append(i)
p['instruction_ids'] = instruction_ids
elif prop.type in p_type_next_tables:
table_ids = []
for id in prop.table_ids:
table_ids.append(id)
p['table_ids'] = table_ids
elif prop.type in p_type_actions:
action_ids = []
for id in prop.action_ids:
i = {'len': id.len,
'type': id.type}
action_ids.append(i)
p['action_ids'] = action_ids
elif prop.type in p_type_oxms:
oxm_ids = []
for id in prop.oxm_ids:
i = {'hasmask': id.hasmask,
'length': id.length,
'type': id.type}
oxm_ids.append(i)
p['oxm_ids'] = oxm_ids
elif prop.type in p_type_experimenter:
pass
properties.append(p)
s = {'table_id': stat.table_id,
'name': stat.name,
'metadata_match': stat.metadata_match,
'metadata_write': stat.metadata_write,
'config': stat.config,
'max_entries': stat.max_entries,
'properties': properties,
}
tables.append(s)
desc = {str(dp.id): tables}
return desc
def get_port_stats(dp, waiters):
stats = dp.ofproto_parser.OFPPortStatsRequest(
dp, 0, dp.ofproto.OFPP_ANY)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
ports = []
for msg in msgs:
for stats in msg.body:
s = {'port_no': stats.port_no,
'rx_packets': stats.rx_packets,
'tx_packets': stats.tx_packets,
'rx_bytes': stats.rx_bytes,
'tx_bytes': stats.tx_bytes,
'rx_dropped': stats.rx_dropped,
'tx_dropped': stats.tx_dropped,
'rx_errors': stats.rx_errors,
'tx_errors': stats.tx_errors,
'rx_frame_err': stats.rx_frame_err,
'rx_over_err': stats.rx_over_err,
'rx_crc_err': stats.rx_crc_err,
'collisions': stats.collisions,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec}
ports.append(s)
ports = {str(dp.id): ports}
return ports
def get_meter_stats(dp, waiters):
stats = dp.ofproto_parser.OFPMeterStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
meters = []
for msg in msgs:
for stats in msg.body:
bands = []
for band in stats.band_stats:
b = {'packet_band_count': band.packet_band_count,
'byte_band_count': band.byte_band_count}
bands.append(b)
s = {'meter_id': stats.meter_id,
'len': stats.len,
'flow_count': stats.flow_count,
'packet_in_count': stats.packet_in_count,
'byte_in_count': stats.byte_in_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'band_stats': bands}
meters.append(s)
meters = {str(dp.id): meters}
return meters
def get_meter_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPMBT_DROP: 'DROP',
ofp.OFPMBT_DSCP_REMARK: 'DSCP_REMARK'}
capa_convert = {ofp.OFPMF_KBPS: 'KBPS',
ofp.OFPMF_PKTPS: 'PKTPS',
ofp.OFPMF_BURST: 'BURST',
ofp.OFPMF_STATS: 'STATS'}
stats = dp.ofproto_parser.OFPMeterFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
for feature in msg.body:
band_types = []
for k, v in type_convert.items():
if (1 << k) & feature.band_types:
band_types.append(v)
capabilities = []
for k, v in capa_convert.items():
if k & feature.capabilities:
capabilities.append(v)
f = {'max_meter': feature.max_meter,
'band_types': band_types,
'capabilities': capabilities,
'max_bands': feature.max_bands,
'max_color': feature.max_color}
features.append(f)
features = {str(dp.id): features}
return features
def get_meter_config(dp, waiters):
flags = {dp.ofproto.OFPMF_KBPS: 'KBPS',
dp.ofproto.OFPMF_PKTPS: 'PKTPS',
dp.ofproto.OFPMF_BURST: 'BURST',
dp.ofproto.OFPMF_STATS: 'STATS'}
band_type = {dp.ofproto.OFPMBT_DROP: 'DROP',
dp.ofproto.OFPMBT_DSCP_REMARK: 'DSCP_REMARK',
dp.ofproto.OFPMBT_EXPERIMENTER: 'EXPERIMENTER'}
stats = dp.ofproto_parser.OFPMeterConfigStatsRequest(
dp, 0, dp.ofproto.OFPM_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
configs = []
for msg in msgs:
for config in msg.body:
bands = []
for band in config.bands:
b = {'type': band_type.get(band.type, ''),
'rate': band.rate,
'burst_size': band.burst_size}
if band.type == dp.ofproto.OFPMBT_DSCP_REMARK:
b['prec_level'] = band.prec_level
elif band.type == dp.ofproto.OFPMBT_EXPERIMENTER:
b['experimenter'] = band.experimenter
bands.append(b)
c_flags = []
for k, v in flags.items():
if k & config.flags:
c_flags.append(v)
c = {'flags': c_flags,
'meter_id': config.meter_id,
'bands': bands}
configs.append(c)
configs = {str(dp.id): configs}
return configs
def get_group_stats(dp, waiters):
stats = dp.ofproto_parser.OFPGroupStatsRequest(
dp, 0, dp.ofproto.OFPG_ALL)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
groups = []
for msg in msgs:
for stats in msg.body:
bucket_stats = []
for bucket_stat in stats.bucket_stats:
c = {'packet_count': bucket_stat.packet_count,
'byte_count': bucket_stat.byte_count}
bucket_stats.append(c)
g = {'length': stats.length,
'group_id': stats.group_id,
'ref_count': stats.ref_count,
'packet_count': stats.packet_count,
'byte_count': stats.byte_count,
'duration_sec': stats.duration_sec,
'duration_nsec': stats.duration_nsec,
'bucket_stats': bucket_stats}
groups.append(g)
groups = {str(dp.id): groups}
return groups
def get_group_features(dp, waiters):
ofp = dp.ofproto
type_convert = {ofp.OFPGT_ALL: 'ALL',
ofp.OFPGT_SELECT: 'SELECT',
ofp.OFPGT_INDIRECT: 'INDIRECT',
ofp.OFPGT_FF: 'FF'}
cap_convert = {ofp.OFPGFC_SELECT_WEIGHT: 'SELECT_WEIGHT',
ofp.OFPGFC_SELECT_LIVENESS: 'SELECT_LIVENESS',
ofp.OFPGFC_CHAINING: 'CHAINING',
ofp.OFPGFC_CHAINING_CHECKS: 'CHAINING_CHECKS'}
act_convert = {ofp.OFPAT_OUTPUT: 'OUTPUT',
ofp.OFPAT_COPY_TTL_OUT: 'COPY_TTL_OUT',
ofp.OFPAT_COPY_TTL_IN: 'COPY_TTL_IN',
ofp.OFPAT_SET_MPLS_TTL: 'SET_MPLS_TTL',
ofp.OFPAT_DEC_MPLS_TTL: 'DEC_MPLS_TTL',
ofp.OFPAT_PUSH_VLAN: 'PUSH_VLAN',
ofp.OFPAT_POP_VLAN: 'POP_VLAN',
ofp.OFPAT_PUSH_MPLS: 'PUSH_MPLS',
ofp.OFPAT_POP_MPLS: 'POP_MPLS',
ofp.OFPAT_SET_QUEUE: 'SET_QUEUE',
ofp.OFPAT_GROUP: 'GROUP',
ofp.OFPAT_SET_NW_TTL: 'SET_NW_TTL',
ofp.OFPAT_DEC_NW_TTL: 'DEC_NW_TTL',
ofp.OFPAT_SET_FIELD: 'SET_FIELD',
ofp.OFPAT_PUSH_PBB: 'PUSH_PBB',
ofp.OFPAT_POP_PBB: 'POP_PBB'}
stats = dp.ofproto_parser.OFPGroupFeaturesStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
features = []
for msg in msgs:
feature = msg.body
types = []
for k, v in type_convert.items():
if (1 << k) & feature.types:
types.append(v)
capabilities = []
for k, v in cap_convert.items():
if k & feature.capabilities:
capabilities.append(v)
max_groups = []
for k, v in type_convert.items():
max_groups.append({v: feature.max_groups[k]})
actions = []
for k1, v1 in type_convert.items():
acts = []
for k2, v2 in act_convert.items():
if (1 << k2) & feature.actions[k1]:
acts.append(v2)
actions.append({v1: acts})
f = {'types': types,
'capabilities': capabilities,
'max_groups': max_groups,
'actions': actions}
features.append(f)
features = {str(dp.id): features}
return features
def get_group_desc(dp, waiters):
type_convert = {dp.ofproto.OFPGT_ALL: 'ALL',
dp.ofproto.OFPGT_SELECT: 'SELECT',
dp.ofproto.OFPGT_INDIRECT: 'INDIRECT',
dp.ofproto.OFPGT_FF: 'FF'}
stats = dp.ofproto_parser.OFPGroupDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
for stats in msg.body:
buckets = []
for bucket in stats.buckets:
actions = []
for action in bucket.actions:
actions.append(action_to_str(action))
b = {'weight': bucket.weight,
'watch_port': bucket.watch_port,
'watch_group': bucket.watch_group,
'actions': actions}
buckets.append(b)
d = {'type': type_convert.get(stats.type),
'group_id': stats.group_id,
'buckets': buckets}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def get_port_desc(dp, waiters):
stats = dp.ofproto_parser.OFPPortDescStatsRequest(dp, 0)
msgs = []
send_stats_request(dp, stats, waiters, msgs)
descs = []
for msg in msgs:
stats = msg.body
for stat in stats:
d = {'port_no': stat.port_no,
'hw_addr': stat.hw_addr,
'name': stat.name,
'config': stat.config,
'state': stat.state,
'curr': stat.curr,
'advertised': stat.advertised,
'supported': stat.supported,
'peer': stat.peer,
'curr_speed': stat.curr_speed,
'max_speed': stat.max_speed}
descs.append(d)
descs = {str(dp.id): descs}
return descs
def mod_flow_entry(dp, flow, cmd):
cookie = int(flow.get('cookie', 0))
cookie_mask = int(flow.get('cookie_mask', 0))
table_id = int(flow.get('table_id', 0))
idle_timeout = int(flow.get('idle_timeout', 0))
hard_timeout = int(flow.get('hard_timeout', 0))
priority = int(flow.get('priority', 0))
buffer_id = int(flow.get('buffer_id', dp.ofproto.OFP_NO_BUFFER))
out_port = int(flow.get('out_port', dp.ofproto.OFPP_ANY))
out_group = int(flow.get('out_group', dp.ofproto.OFPG_ANY))
flags = int(flow.get('flags', 0))
match = to_match(dp, flow.get('match', {}))
inst = to_actions(dp, flow.get('actions', []))
flow_mod = dp.ofproto_parser.OFPFlowMod(
dp, cookie, cookie_mask, table_id, cmd, idle_timeout,
hard_timeout, priority, buffer_id, out_port, out_group,
flags, match, inst)
dp.send_msg(flow_mod)
def mod_meter_entry(dp, flow, cmd):
flags_convert = {'KBPS': dp.ofproto.OFPMF_KBPS,
'PKTPS': dp.ofproto.OFPMF_PKTPS,
'BURST': dp.ofproto.OFPMF_BURST,
'STATS': dp.ofproto.OFPMF_STATS}
flags = 0
if 'flags' in flow:
flow_flags = flow['flags']
if not isinstance(flow_flags, list):
flow_flags = [flow_flags]
for flag in flow_flags:
if flag not in flags_convert:
LOG.error('Unknown flag: %s', flag)
continue
flags |= flags_convert.get(flag)
meter_id = int(flow.get('meter_id', 0))
bands = []
for band in flow.get('bands', []):
band_type = band.get('type')
rate = int(band.get('rate', 0))
burst_size = int(band.get('burst_size', 0))
if band_type == 'DROP':
bands.append(
dp.ofproto_parser.OFPMeterBandDrop(rate, burst_size))
elif band_type == 'DSCP_REMARK':
prec_level = int(band.get('prec_level', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandDscpRemark(
rate, burst_size, prec_level))
elif band_type == 'EXPERIMENTER':
experimenter = int(band.get('experimenter', 0))
bands.append(
dp.ofproto_parser.OFPMeterBandExperimenter(
rate, burst_size, experimenter))
else:
LOG.error('Unknown band type: %s', band_type)
meter_mod = dp.ofproto_parser.OFPMeterMod(
dp, cmd, flags, meter_id, bands)
dp.send_msg(meter_mod)
def mod_group_entry(dp, group, cmd):
type_convert = {'ALL': dp.ofproto.OFPGT_ALL,
'SELECT': dp.ofproto.OFPGT_SELECT,
'INDIRECT': dp.ofproto.OFPGT_INDIRECT,
'FF': dp.ofproto.OFPGT_FF}
type_ = type_convert.get(group.get('type', 'ALL'))
if type_ is None:
LOG.error('Unknown type: %s', group.get('type'))
group_id = int(group.get('group_id', 0))
buckets = []
for bucket in group.get('buckets', []):
weight = int(bucket.get('weight', 0))
watch_port = int(bucket.get('watch_port', dp.ofproto.OFPP_ANY))
watch_group = int(bucket.get('watch_group', dp.ofproto.OFPG_ANY))
actions = []
for dic in bucket.get('actions', []):
action = to_action(dp, dic)
if action is not None:
actions.append(action)
buckets.append(dp.ofproto_parser.OFPBucket(
weight, watch_port, watch_group, actions))
group_mod = dp.ofproto_parser.OFPGroupMod(
dp, cmd, type_, group_id, buckets)
dp.send_msg(group_mod)
def mod_port_behavior(dp, port_config):
port_no = int(port_config.get('port_no', 0))
hw_addr = port_config.get('hw_addr')
config = int(port_config.get('config', 0))
mask = int(port_config.get('mask', 0))
advertise = int(port_config.get('advertise'))
port_mod = dp.ofproto_parser.OFPPortMod(
dp, port_no, hw_addr, config, mask, advertise)
dp.send_msg(port_mod)
def send_experimenter(dp, exp):
experimenter = exp.get('experimenter', 0)
exp_type = exp.get('exp_type', 0)
data_type = exp.get('data_type', 'ascii')
if data_type != 'ascii' and data_type != 'base64':
LOG.error('Unknown data type: %s', data_type)
data = exp.get('data', '')
if data_type == 'base64':
data = base64.b64decode(data)
expmsg = dp.ofproto_parser.OFPExperimenter(
dp, experimenter, exp_type, data)
dp.send_msg(expmsg)
|
{
"content_hash": "f8a134aa6bc8c285a8677ee19cce9226",
"timestamp": "",
"source": "github",
"line_count": 1094,
"max_line_length": 85,
"avg_line_length": 35.40036563071298,
"alnum_prop": 0.5213540590787028,
"repo_name": "ttsubo/ryu",
"id": "94a3a3384f53b3e25409d692d3ca41e249c6ea67",
"size": "39338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ryu/lib/ofctl_v1_3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26231"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "872503"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5243536"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
'''
See also:
'''
import re, os, time
import calendar
from datetime import date; from dateutil.relativedelta import *
from itertools import *
from wsgiref.util import shift_path_info, request_uri
from dateutil.parser import parse
import amara
from amara.lib.iri import join
from akara.services import simple_service
from akara import request
from akara import logger
from string import Template
CAL_TEMPLATE = Template('''
<table class="akaraCalCalendar" xmlns="http://www.w3.org/1999/xhtml">
<thead>
<tr class="akaraCalCalendarTopHeaders">
$prevmonth<th colspan="5">$monthname, $year</th>$nextmonth
</tr>
<tr class="akaraCalCalendarWeekHeaders">
$dayheaders
</tr>
</thead>
<tbody>
$month
</tbody>
</table>
''')
SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/calendar'
@simple_service('GET', SERVICE_ID, 'akara.calendar', 'text/html') #application/xhtml+xml
def akara_calendar(highlight=None):
'''
Return a calendar in HTML
Generates a calendar along the lines of:
< January, 2007 >
Mo Tu We Th Fr Sa Su
1 2 3 4 5
6 7 8 9 10 11 12
13 14 15 16 17 18 19
20 21 22 23 24 25 26
27 28 29 30 31
Marks present date and those that have entries with archive links
Defines the following classes (for use in CSS customization):
- akaraCalCalendar
- calendar table (note: month/year header e.g. January 2007 is in table/th)
- akaraCalCalendarWeekHeaders
- week header (Su, Mo, Tu, ...)
- akaraCalCalendarEmpty
- filler cell (e.g. days after Jan 31)
- akaraCalCalendarLive
- day for which there is an entry (also has links to that day's archives)
And the following IDs:
- akaraCalCalendarToday
- today's calendar day
- akaraCalCalendarSpecificDay
- specific day being rendered (if any)
Some ideas (e.g. CSS styling of the table) from pycalendar.py by Will Guaraldi
Sample request:
curl http://localhost:8880/akara.calendar
curl http://localhost:8880/akara.calendar/2008/12
curl http://localhost:8880/akara.calendar/2008/12?highlight=2008-12-03
'''
baseuri = request.environ['SCRIPT_NAME'] + '/'
today = date.today()
year = shift_path_info(request.environ)
month = shift_path_info(request.environ)
if highlight:
#Fun axiom: date(*map(int, date.today().isoformat().split('-')))
highlight = date(*map(int, highlight.split('-')))
if year and month:
#Use specified year & month
year, month = int(year), int(month)
if (year, month) == (today.year, today.month):
present_day = today.day
else:
present_day = None
else:
#XXX We might want to return Bad Request of they specified year but not day
#Use present year & month
year, month = today.year, today.month
present_day = today.day
#logger.debug("year: " + repr(year))
dayheaders = ''.join(
['<td>%s</td>' % dh
for dh in calendar.weekheader(3).split()]
)
monthcal = calendar.monthcalendar(year, month)
c = []
for wk in monthcal:
c.append('<tr>\n')
for d in wk:
d_int = int(d)
attrs = ''
if d_int < 1:
d = ' '
fulldate = date.max #never to be found in archives
attrs += ' class="akaraCalCalendarEmpty"'
else:
fulldate = date(year, month, d_int)
# "today" trumps "specific day"
if d_int == present_day:
attrs += ' id="akaraCalCalendarToday"'
elif highlight and d_int == highlight.day:
attrs += ' id="akaraCalCalendarSpecificDay"'
#if fulldate in archives:
# attrs += ' class="akaraCalCalendarLive"'
#d = '<a href="%s%i/%i/%s/">%s</a>'%(self.weblog_base_url, year, month, d, d)
# d = '%s'%(d)
c.append('\t<td%s>%s</td>\n' % (attrs, d))
c.append('\n</tr>\n')
monthname = calendar.month_name[month]
prevmonth = date(year, month, 1) + relativedelta(months=-1)
nextmonth = date(year, month, 1) + relativedelta(months=+1)
#Yes, even checking if prevmonth > today, so if someone surfs
#3 month in the future, there will be no month nav links
if prevmonth > today:
prevmonth = ''
else:
#prevmonth = '<th><a href="%s%i/%i/"><<</a></th>'%(self.weblog_base_url, prevmonth.year, prevmonth.month)
prevmonth = '<th><a href="%s"><<</a></th>'%(join(baseuri, str(prevmonth.year), str(prevmonth.month)))
if nextmonth > today:
nextmonth = ''
else:
nextmonth = '<th><a href="%s">>></a></th>'%(join(baseuri, str(nextmonth.year), str(nextmonth.month)))
month = ''.join(c)
cal = CAL_TEMPLATE.safe_substitute(locals())
return cal
|
{
"content_hash": "939f923fb9d9f89ccb04e60ef40232da",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 119,
"avg_line_length": 34,
"alnum_prop": 0.5997179693795326,
"repo_name": "uogbuji/akara",
"id": "8cb965d226ab4c31f1db1764c95ab46d70ca3262",
"size": "4990",
"binary": false,
"copies": "1",
"ref": "refs/heads/pregithub",
"path": "lib/demo/calweb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "723024"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from seahub.test_utils import BaseTestCase
class ConvertCmmtDescLinkTest(BaseTestCase):
def setUp(self):
self.login_as(self.user)
def test_can_render(self):
pass
# resp = self.client.get(reverse('convert_cmmt_desc_link') + '?repo_id=' + self.repo.id + '&cmmt_id=xxx' + '&nm=foo')
# self.assertEqual(200, resp.status_code)
|
{
"content_hash": "91dae7cfbad58c5432e4ba6c839f21d9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 125,
"avg_line_length": 31.615384615384617,
"alnum_prop": 0.6715328467153284,
"repo_name": "saukrIppl/seahub",
"id": "398eb091d4742ff10dc8aad01a03f2297cd34d17",
"size": "411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/seahub/views/init/test_convert_cmmt_desc_link.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "329387"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "722728"
},
{
"name": "Java",
"bytes": "307193"
},
{
"name": "JavaScript",
"bytes": "7293422"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "PLpgSQL",
"bytes": "19598"
},
{
"name": "Python",
"bytes": "9050702"
},
{
"name": "Shell",
"bytes": "9695"
}
],
"symlink_target": ""
}
|
import os
import random
import logging
class LocalFileShuffle:
initialized = False
@classmethod
def initializeIfNeeded(cls):
if cls.initialized:
return
localDirRoot = "/tmp/mdpark/"
tries = 0
foundLocalDir = False
while tries < 10:
path = os.path.join(localDirRoot, str(random.randint(1, 100)))
if not os.path.exists(path):
foundLocalDir = True
os.makedirs(path)
break
if not foundLocalDir:
raise Exception("no dir")
localDir = path
shuffleDir = os.path.join(localDir, "shuffle")
os.makedirs(shuffleDir)
logging.info("shuffle dir: %s", shuffleDir)
cls.shuffleDir = shuffleDir
cls.serverUri = "file:///" + shuffleDir
cls.initialized = True
@classmethod
def getOutputFile(cls, shuffleId, inputId, outputId):
cls.initializeIfNeeded()
path = os.path.join(cls.shuffleDir, str(shuffleId), str(inputId))
if not os.path.exists(path):
os.makedirs(path)
return os.path.join(path, str(outputId))
@classmethod
def getServerUri(cls):
cls.initializeIfNeeded()
return cls.serverUri
nextShuffleId = 0
@classmethod
def newShuffleId(cls):
cls.newShuffleId += 1
return cls.nextShuffleId
|
{
"content_hash": "3b1849426ae05d621cc0b7574a1a8e6f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 26.30188679245283,
"alnum_prop": 0.5946915351506457,
"repo_name": "zzl0/mdpark",
"id": "bc343f70f86ed73da8065cde3f49982449eb0c73",
"size": "1411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mdpark/shuffle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36874"
}
],
"symlink_target": ""
}
|
from django.test import TestCase, Client
from django.core.urlresolvers import reverse
from django.utils import timezone
from geoposition import Geoposition
from seshdash.data.db.influx import Influx
from seshdash.models import Sesh_Site, Sesh_User
class DataAnalysisTestCase(TestCase):
"""
Testing the functions that help in analysing and presenting
the Data sesh collects
"""
def setUp(self):
"""
Initializing
"""
self.client = Client()
self.location = Geoposition(52.5,24.3)
# Setup Influx
self._influx_db_name = 'test_db'
self.i = Influx(database=self._influx_db_name)
try:
self.i.create_database(self._influx_db_name)
except:
self.i.delete_database(self._influx_db_name)
sleep(1)
self.i.create_database(self._influx_db_name)
pass
self.site = Sesh_Site.objects.create(site_name=u"Test site",
comission_date=timezone.datetime(2015, 12, 11, 22, 0),
location_city=u"kigali",
location_country=u"rwanda",
installed_kw=123.0,
position=self.location,
system_voltage=24,
number_of_panels=12,
vrm_site_id=213,
battery_bank_capacity=12321,
has_genset=True,
has_grid=True,
)
self.test_user = Sesh_User.objects.create_user(
username="johndoe",
email="alp@gle.solar",
password="asdasd12345")
def test_get_csv_data(self):
"""
Testing the functions that return csv files
for a given measuremnt
"""
data = {
'measurement': 'battery_voltage',
'start-time': '2015-01-01',
'end-time': '2017-01-01',
'site-name': self.site.site_name
}
self.client.login(username='johndoe',password='asdasd12345')
response = self.client.post(reverse('export-csv-data', args=[self.site.id]), data)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/csv')
|
{
"content_hash": "77ba60d96daf62ee93d64d7e1c12e4ee",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 99,
"avg_line_length": 36.542857142857144,
"alnum_prop": 0.49452697419859265,
"repo_name": "GreatLakesEnergy/sesh-dash-beta",
"id": "b6ef2cbfc58466c1360bc3d9a19ec8d018266e03",
"size": "2568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seshdash/tests/test_data_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119593"
},
{
"name": "HTML",
"bytes": "78888"
},
{
"name": "JavaScript",
"bytes": "125120"
},
{
"name": "PLpgSQL",
"bytes": "133"
},
{
"name": "Python",
"bytes": "407393"
},
{
"name": "Shell",
"bytes": "1549"
}
],
"symlink_target": ""
}
|
"""Graphs the age of the newest index segment on a nutch instance.
Needs statistics.jsp installed on the nutch server."""
# Created 2009 by Olaf Wozniak for Hudora
import sys
import httplib2
import re
import datetime
if len(sys.argv) == 2 and sys.argv[1] == "config":
print """graph_title Last Modified
graph_vlabel lastmodified
lastmodified.label lastmodified"""
exit()
h = httplib2.Http()
resp, content = h.request("http://nutch.hudora.biz/statistics.jsp")
lines = content.split("\n")
for line in lines:
if line.strip().startswith("<li>lastModified: <strong>"):
date_modified = re.findall(r"<strong>(?P<number>.+)</strong>", line)[0]
date_modified_tshift = date_modified[-8:]
date_now = datetime.datetime.now()
date_modified = datetime.datetime.strptime(date_modified, "%Y-%m-%dT%H:%M:%S."+date_modified_tshift)
date_diff = date_now - date_modified
print "lastmodified.value %s" % int(date_diff.seconds/60)
exit()
|
{
"content_hash": "d0f13d9b958583c54c1f9d1b433a9205",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 108,
"avg_line_length": 31.28125,
"alnum_prop": 0.6703296703296703,
"repo_name": "mdornseif/hd-munin-plugins",
"id": "f859578922624434f1ca42e8ed901484d916346e",
"size": "1042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nutch_last_modified.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
'''
This script defines colors and rcParams for matplotlib.pyplot .
It is imported by the figure generation scripts to set the general style of the plots.
'''
import matplotlib.pyplot as plt
# define some gray levels
black = (0.0,0.0,0.0)
darkgray = (0.25, 0.25, 0.25)
midgray = (0.5, 0.5, 0.5)
lightgray = (0.75, 0.75, 0.75)
white = (1.0, 1.0, 1.0)
# define some red levels
darkred = (0.3, 0., 0.)
midred = (0.5, 0., 0.)
lightred = (0.7, 0., 0.)
red = (1., 0., 0.)
# INM colors
myblue = (0., 64./255., 192./255.)
myred = (192./255., 64./255., 0.)
mygreen = (0., 192./255., 64./255.)
myorange = (0.5, 0.25, 0.25)
mypink = (0.75, 0.25, 0.75)
myblue2 = (0., 128./255., 192./255.)
myred2 = (245./255., 157./255., 115./255.)
# coolers colors
myred_hex = '#931621'
myyellow_hex ='#B67431'
myblue1_hex = '#2B4162'
myblue2_hex = '#2C8C99'
mygreen_hex = '#0B6E4F'
# Sanzo Wanda's collection 264
sw_pink = '#ffa6d9'
sw_red = '#e81900'
sw_light_green = '#b3d9a3'
sw_dark_green = '#29bdad'
# Okabe and Ito's colorblind-friendly palette
oi_black = (0., 0., 0.)
oi_orange = (230./255, 159./255, 0.)
oi_light_blue = (86./255, 180./255, 233./255)
oi_green = (0., 158./255, 115./255)
oi_yellow = (240./255, 228./255, 66./255)
oi_blue = (0., 114./255, 178./255)
oi_dark_orange = (213./255, 94./255, 0.)
oi_purple = (204./255, 121./255, 167./255)
# custom densely dashed linestyle
densely_dashed = (0, (5, 1))
#panel_wh_ratio = (1. + np.sqrt(5)) / 2. # golden ratio
class visualization():
def __init__(self):
'''
Initialize an instance of this class to set the pyplot rcParams.
'''
# figure dims
self.SCIwidth = 5.5 # in inches
self.SCIheight2row= 5.5
self.SCIheight1row= 2.75
#self.inchpercm = 2.54
width = self.SCIwidth
height = self.SCIheight2row # width / panel_wh_ratio
scale = 1.2
plt.rcParams['figure.figsize'] = (width, height)
# resolution of figures in dpi
# does not influence eps output
plt.rcParams['figure.dpi'] = 600
# font
plt.rcParams['font.size'] = scale*8
plt.rcParams['axes.titlesize'] = scale*8
plt.rcParams['legend.fontsize'] = scale*6
plt.rcParams['font.family'] = "helvetica"
plt.rcParams['lines.linewidth'] = scale*1.0
# size of markers (points in point plots)
plt.rcParams['lines.markersize'] = scale * 1. #2.5
plt.rcParams['patch.linewidth'] = scale * 1.0
plt.rcParams['axes.linewidth'] = scale * 0.2 # edge linewidth
# ticks distances
plt.rcParams['xtick.major.size'] = scale * 1.5 # major tick size in points
plt.rcParams['xtick.minor.size'] = scale * 1.5 # minor tick size in points
plt.rcParams['lines.markeredgewidth'] = scale * 0.5 # line width of ticks
plt.rcParams['grid.linewidth'] = scale * 0.5
plt.rcParams['xtick.major.pad'] = scale * 2 # distance to major tick label in points
plt.rcParams['xtick.minor.pad'] = scale * 2 # distance to the minor tick label in points
plt.rcParams['ytick.major.size'] = scale * 1.5 # major tick size in points
plt.rcParams['ytick.minor.size'] = scale * 1.5 # minor tick size in points
plt.rcParams['ytick.major.width'] = scale * 0.2 # major tick size in points
plt.rcParams['ytick.minor.width'] = scale * 0.2 # minor tick size in points
plt.rcParams['xtick.major.width'] = scale * 0.2 # major tick size in points
plt.rcParams['xtick.minor.width'] = scale * 0.2 # minor tick size in points
plt.rcParams['ytick.major.pad'] = scale * 2 # distance to major tick label in points
plt.rcParams['ytick.minor.pad'] = scale * 2 # distance to the minor tick label in points
# ticks textsize
plt.rcParams['ytick.labelsize'] = scale * 8
plt.rcParams['xtick.labelsize'] = scale * 8
# use latex to generate the labels in plots
# not needed anymore in newer versions
# using this, font detection fails on adobe illustrator 2010-07-20
plt.rcParams['text.usetex'] = True
plt.rcParams['ps.useafm'] = False # use of afm fonts, results in small files
plt.rcParams['ps.fonttype'] = 3 # Output Type 3 (Type3) or Type 42 (TrueType)
##################
### DIMENSIONS ###
##################
def set_SCI_1row_fig_style(self): #, ratio=panel_wh_ratio):
'''figure size corresponding to one row of usually 3 panels'''
plt.rcParams.update({
'figure.figsize' : [self.SCIwidth,self.SCIheight1row],
})
def set_SCI_2row_fig_style(self):
'''figure size corresponding to two rows of usually 3 panels'''
plt.rcParams.update({
'figure.figsize' : [self.SCIwidth,self.SCIheight2row],
})
############
### MISC ###
############
def remove_ticks(ax):
''' small function to remove all ticks from a panel '''
ax.set_yticks([])
ax.set_xticks([])
def remove_axis_junk(self, ax, which=['right', 'top']):
'''remove upper and right axis'''
# for loc, spine in ax.spines.iteritems():
# if loc in which:
# spine.set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def make_axis_cross(self,ax):
# Move left y-axis and bottim x-axis to centre, passing through (0,0)
ax.spines['left'].set_position('center')
#ax.spines['bottom'].set_position('center')
# Eliminate upper and right axes
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
# Show ticks in the left and lower axes only
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return ax
def legend(self, ax, on=True, loc=1):
plt.sca(ax)
if on:
plt.legend(loc=loc)
return ax
def title(self, ax, title='', verticalalignment='top'):
plt.sca(ax)
plt.suptitle(title)
return ax
|
{
"content_hash": "c227b0ac44001c703e042a3f90fa866c",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 101,
"avg_line_length": 34.03703703703704,
"alnum_prop": 0.5625680087051143,
"repo_name": "INM-6/Python-Module-of-the-Week",
"id": "c56d6077c9dbb46567c7d6a1cc2948b0d5e31f45",
"size": "6433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "session41_plotting_discussion/visualization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "795"
},
{
"name": "C++",
"bytes": "678"
},
{
"name": "CSS",
"bytes": "17737"
},
{
"name": "Cython",
"bytes": "792"
},
{
"name": "HTML",
"bytes": "4241166"
},
{
"name": "Jupyter Notebook",
"bytes": "6418232"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "85871"
},
{
"name": "Ruby",
"bytes": "149"
},
{
"name": "TeX",
"bytes": "11069"
},
{
"name": "Vim script",
"bytes": "10526"
}
],
"symlink_target": ""
}
|
import json
import cli
(options, args) = cli.getOptions()
hadcrut4 = open(options.source, "r")
output = {}
output['name'] = options.name
hadcrutData = []
output['data'] = hadcrutData
for line in hadcrut4.readlines():
currentMonth = {}
data = line.split()
yearMonth = data[0].split('/')
year = int(yearMonth[0])
if(len(yearMonth) == 2):
month = int(yearMonth[1])
currentMonth['month'] = month
currentMonth['year'] = year
currentMonth['mean'] = float(data[1])
currentMonth['lowerBoundBias'] = float(data[2])
currentMonth['upperBoundBias'] = float(data[3])
currentMonth['lowerBoundMeasurement'] = float(data[4])
currentMonth['upperBoundMeasurement'] = float(data[5])
currentMonth['lowerBoundCoverage'] = float(data[6])
currentMonth['upperBoundCoverage'] = float(data[7])
currentMonth['lowerBoundCombination'] = float(data[8])
currentMonth['upperBoundCombination'] = float(data[9])
currentMonth['lowerBoundCombinedAll'] = float(data[10])
currentMonth['upperBoundCombinedAll'] = float(data[11])
hadcrutData.append(currentMonth)
if(options.outputFile):
with open(options.outputFile, 'w') as outfile:
json.dump(output, outfile)
if(options.verbose):
print(json.dumps(output, indent=2))
hadcrut4.close()
|
{
"content_hash": "e075b152ac8547a5082bff96dd270bcd",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 59,
"avg_line_length": 32.6,
"alnum_prop": 0.6809815950920245,
"repo_name": "Penson122/Third-Year-Project",
"id": "9eb0e35d7d70ecacc7b3140d368dffe9e248601b",
"size": "1304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapers/hadcrut4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "431"
},
{
"name": "HTML",
"bytes": "1600"
},
{
"name": "JavaScript",
"bytes": "50159"
},
{
"name": "Python",
"bytes": "4893"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.build_graph.address import Address
from pants.engine.exp.objects import ValidationError
from pants.engine.exp.struct import Struct
class StructTest(unittest.TestCase):
def test_address_no_name(self):
config = Struct(address=Address.parse('a:b'))
self.assertEqual('b', config.name)
def test_address_name_conflict(self):
with self.assertRaises(ValidationError):
Struct(name='a', address=Address.parse('a:b'))
def test_type_alias(self):
self.assertEqual('Struct', Struct().type_alias)
self.assertEqual('aliased', Struct(type_alias='aliased').type_alias)
class Subclass(Struct):
pass
self.assertEqual('Subclass', Subclass().type_alias)
self.assertEqual('aliased_subclass', Subclass(type_alias='aliased_subclass').type_alias)
def test_extend(self):
extends = Struct(age=32, label='green', items=[],
extends=Struct(age=42, other=True, items=[1, 2]))
# Extension is lazy, so we don't pick up the other field yet.
self.assertNotEqual(Struct(age=32, label='green', items=[], other=True), extends)
# But we do pick it up now.
self.assertEqual(Struct(age=32, label='green', items=[], other=True), extends.create())
def test_merge(self):
merges = Struct(age=32, items=[3], knobs={'b': False},
merges=[Struct(age=42,
other=True,
items=[1, 2],
knobs={'a': True, 'b': True})])
# Merging is lazy, so we don't pick up the other field yet.
self.assertNotEqual(Struct(age=32,
items=[1, 2, 3],
knobs={'a': True, 'b': False},
other=True),
merges)
# But we do pick it up now.
self.assertEqual(Struct(age=32,
items=[3, 1, 2],
knobs={'a': True, 'b': True},
other=True),
merges.create())
def test_extend_and_merge(self):
extends_and_merges = Struct(age=32, label='green', items=[5],
extends=Struct(age=42,
other=True,
knobs={'a': True},
items=[1, 2]),
merges=[Struct(age=52,
other=False,
items=[1, 3, 4],
knobs={'a': False, 'b': True}),
Struct(items=[2])])
self.assertEqual(Struct(age=32,
label='green',
other=True,
items=[5, 1, 3, 4, 2],
knobs={'a': False, 'b': True}),
extends_and_merges.create())
def test_validate_concrete(self):
class Subclass(Struct):
def validate_concrete(self):
if self.name != 'jake':
self.report_validation_error('There is only one true good name.')
# A valid name.
jake = Subclass(name='jake')
jake.validate()
# An invalid name, but we're abstract, so don't validate yet.
jack = Subclass(name='jack', abstract=True)
jack.validate()
# An invalid name in a concrete instance, this should raise.
jeb = Subclass(name='jeb')
with self.assertRaises(ValidationError):
jeb.validate()
|
{
"content_hash": "ef36b28269ee1ca12739328934201e42",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 93,
"avg_line_length": 39.322916666666664,
"alnum_prop": 0.5035761589403973,
"repo_name": "dturner-tw/pants",
"id": "53dcfdae02dfccca224649c5afdf48394593671d",
"size": "3922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/engine/exp/test_struct.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11538"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1849"
},
{
"name": "HTML",
"bytes": "70358"
},
{
"name": "Java",
"bytes": "293253"
},
{
"name": "JavaScript",
"bytes": "31042"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4404984"
},
{
"name": "Scala",
"bytes": "85217"
},
{
"name": "Shell",
"bytes": "50774"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
}
|
import mock
from heat.common import template_format
from heat.engine.clients.os import senlin
from heat.engine.resources.openstack.senlin import profile as sp
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
profile_stack_template = """
heat_template_version: 2016-04-08
description: Senlin Profile Template
resources:
senlin-profile:
type: OS::Senlin::Profile
properties:
name: SenlinProfile
type: os.heat.stack-1.0
properties:
template:
heat_template_version: 2014-10-16
resources:
random:
type: OS::Heat::RandomString
"""
profile_spec = {
'type': 'os.heat.stack',
'version': '1.0',
'properties': {
'template': {
'heat_template_version': '2014-10-16',
'resources': {
'random': {
'type': 'OS::Heat::RandomString'
}
}
}
}
}
class FakeProfile(object):
def __init__(self, id='some_id', spec=None):
self.id = id
self.name = "SenlinProfile"
self.metadata = {}
self.spec = spec or profile_spec
class SenlinProfileTest(common.HeatTestCase):
def setUp(self):
super(SenlinProfileTest, self).setUp()
self.senlin_mock = mock.MagicMock()
self.patchobject(sp.Profile, 'client', return_value=self.senlin_mock)
self.patchobject(senlin.ProfileTypeConstraint, 'validate',
return_value=True)
self.fake_p = FakeProfile()
self.t = template_format.parse(profile_stack_template)
def _init_profile(self, template):
self.stack = utils.parse_stack(template)
profile = self.stack['senlin-profile']
return profile
def _create_profile(self, template):
profile = self._init_profile(template)
self.senlin_mock.create_profile.return_value = self.fake_p
scheduler.TaskRunner(profile.create)()
self.assertEqual((profile.CREATE, profile.COMPLETE),
profile.state)
self.assertEqual(self.fake_p.id, profile.resource_id)
return profile
def test_profile_create(self):
self._create_profile(self.t)
expect_kwargs = {
'name': 'SenlinProfile',
'metadata': None,
'spec': profile_spec
}
self.senlin_mock.create_profile.assert_called_once_with(
**expect_kwargs)
def test_profile_delete(self):
self.senlin_mock.delete_profile.return_value = None
profile = self._create_profile(self.t)
scheduler.TaskRunner(profile.delete)()
self.senlin_mock.delete_profile.assert_called_once_with(
profile.resource_id)
def test_profile_update(self):
profile = self._create_profile(self.t)
prop_diff = {'metadata': {'foo': 'bar'}}
self.senlin_mock.get_profile.return_value = self.fake_p
profile.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
self.senlin_mock.update_profile.assert_called_once_with(
self.fake_p, **prop_diff)
|
{
"content_hash": "e8b9f49e47995232311090f23d0ffd07",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 77,
"avg_line_length": 31.77227722772277,
"alnum_prop": 0.6011218448114678,
"repo_name": "noironetworks/heat",
"id": "2a0001a9d3fffc26ac1f19894832d5b9e2d11d82",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/openstack/senlin/test_profile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8804896"
},
{
"name": "Shell",
"bytes": "64533"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('taskmanager', '0002_auto_20170704_2106'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='name')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tags', to='taskmanager.Profile', verbose_name='user')),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
'ordering': ('user', 'name'),
},
),
migrations.AlterUniqueTogether(
name='tag',
unique_together=set([('user', 'name')]),
),
]
|
{
"content_hash": "fa2a57e7de0e44ce1ecb046db7ee5786",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 157,
"avg_line_length": 32.774193548387096,
"alnum_prop": 0.5492125984251969,
"repo_name": "caithess/taskbuster",
"id": "7f0d353c40f720c855b49d75cf31249b90b79e8e",
"size": "1089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskbuster/apps/taskmanager/migrations/0003_auto_20170704_2124.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "7124"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "22767"
}
],
"symlink_target": ""
}
|
from openerp import SUPERUSER_ID
from openerp.osv import osv
class mail_thread(osv.AbstractModel):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'mail.thread'
def _get_inbox_action_xml_id(self, cr, uid, context=None):
""" For a given message, return an action that either
- opens the form view of the related document if model, res_id, and
read access to the document
- opens the Inbox with a default search on the conversation if model,
res_id
- opens the Inbox with context propagated
"""
cur_user = self.pool.get('res.users').browse(cr, SUPERUSER_ID, uid, context=context)
# if uid is a portal user -> action is different
if any(group.is_portal for group in cur_user.groups_id):
return ('portal', 'action_mail_inbox_feeds_portal')
else:
return super(mail_thread, self)._get_inbox_action_xml_id(cr, uid, context=context)
|
{
"content_hash": "65614716bc641fb3d86c2f7aa21d1bb2",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 94,
"avg_line_length": 46.09090909090909,
"alnum_prop": 0.6380670611439843,
"repo_name": "cristianquaglio/odoo",
"id": "44bc4d0bde8875889c59ad63d1f226b4828cb408",
"size": "2004",
"binary": false,
"copies": "383",
"ref": "refs/heads/master",
"path": "addons/portal/mail_thread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "671328"
},
{
"name": "HTML",
"bytes": "212829"
},
{
"name": "JavaScript",
"bytes": "5984109"
},
{
"name": "Makefile",
"bytes": "12332"
},
{
"name": "Mako",
"bytes": "561"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "8366254"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "19163"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "92945"
}
],
"symlink_target": ""
}
|
"""Utility methods for working with WSGI servers."""
import copy
import os
import socket
import eventlet
import eventlet.wsgi
import greenlet
from paste import deploy
import routes.middleware
import webob.dec
import webob.exc
from oslo_log import log as logging
from oslo_service._i18n import _
from oslo_service import _options
from oslo_service import service
from oslo_service import sslutils
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator."""
return [(None, copy.deepcopy(_options.wsgi_opts))]
def register_opts(conf):
"""Registers WSGI config options."""
return conf.register_opts(_options.wsgi_opts)
class InvalidInput(Exception):
message = _("Invalid input received: "
"Unexpected argument for periodic task creation: %(arg)s.")
class Server(service.ServiceBase):
"""Server class to manage a WSGI server, serving a WSGI application."""
# TODO(eezhova): Consider changing the default host value to prevent
# possible binding to all interfaces. The most appropriate value seems
# to be 127.0.0.1, but it has to be verified that the change wouldn't
# break any consuming project.
def __init__(self, conf, name, app, host='0.0.0.0', port=0, # nosec
pool_size=None, protocol=eventlet.wsgi.HttpProtocol,
backlog=128, use_ssl=False, max_url_len=None,
logger_name='eventlet.wsgi.server',
socket_family=None, socket_file=None, socket_mode=None):
"""Initialize, but do not start, a WSGI server.
:param conf: Instance of ConfigOpts.
:param name: Pretty name for logging.
:param app: The WSGI application to serve.
:param host: IP address to serve the application.
:param port: Port number to server the application.
:param pool_size: Maximum number of eventlets to spawn concurrently.
:param protocol: Protocol class.
:param backlog: Maximum number of queued connections.
:param use_ssl: Wraps the socket in an SSL context if True.
:param max_url_len: Maximum length of permitted URLs.
:param logger_name: The name for the logger.
:param socket_family: Socket family.
:param socket_file: location of UNIX socket.
:param socket_mode: UNIX socket mode.
:returns: None
:raises: InvalidInput
:raises: EnvironmentError
"""
self.conf = conf
self.conf.register_opts(_options.wsgi_opts)
self.default_pool_size = self.conf.wsgi_default_pool_size
# Allow operators to customize http requests max header line size.
eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line
self.name = name
self.app = app
self._server = None
self._protocol = protocol
self.pool_size = pool_size or self.default_pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._logger = logging.getLogger(logger_name)
self._use_ssl = use_ssl
self._max_url_len = max_url_len
self.client_socket_timeout = conf.client_socket_timeout or None
if backlog < 1:
raise InvalidInput(reason=_('The backlog must be more than 0'))
if not socket_family or socket_family in [socket.AF_INET,
socket.AF_INET6]:
self.socket = self._get_socket(host, port, backlog)
elif hasattr(socket, "AF_UNIX") and socket_family == socket.AF_UNIX:
self.socket = self._get_unix_socket(socket_file, socket_mode,
backlog)
else:
raise ValueError(_("Unsupported socket family: %s"), socket_family)
(self.host, self.port) = self.socket.getsockname()[0:2]
if self._use_ssl:
sslutils.is_enabled(conf)
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
family = socket.AF_INET
try:
sock = eventlet.listen(bind_addr, family, backlog=backlog)
except EnvironmentError:
LOG.error("Could not bind to %(host)s:%(port)s",
{'host': host, 'port': port})
raise
sock = self._set_socket_opts(sock)
LOG.info("%(name)s listening on %(host)s:%(port)s",
{'name': self.name, 'host': host, 'port': port})
return sock
def _get_unix_socket(self, socket_file, socket_mode, backlog):
sock = eventlet.listen(socket_file, family=socket.AF_UNIX,
backlog=backlog)
if socket_mode is not None:
os.chmod(socket_file, socket_mode)
LOG.info("%(name)s listening on %(socket_file)s:",
{'name': self.name, 'socket_file': socket_file})
return sock
def start(self):
"""Start serving a WSGI application.
:returns: None
"""
# The server socket object will be closed after server exits,
# but the underlying file descriptor will remain open, and will
# give bad file descriptor error. So duplicating the socket object,
# to keep file descriptor usable.
self.dup_socket = self.socket.dup()
if self._use_ssl:
self.dup_socket = sslutils.wrap(self.conf, self.dup_socket)
wsgi_kwargs = {
'func': eventlet.wsgi.server,
'sock': self.dup_socket,
'site': self.app,
'protocol': self._protocol,
'custom_pool': self._pool,
'log': self._logger,
'log_format': self.conf.wsgi_log_format,
'debug': self.conf.wsgi_server_debug,
'keepalive': self.conf.wsgi_keep_alive,
'socket_timeout': self.client_socket_timeout
}
if self._max_url_len:
wsgi_kwargs['url_length_limit'] = self._max_url_len
self._server = eventlet.spawn(**wsgi_kwargs)
def _set_socket_opts(self, _socket):
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
self.conf.tcp_keepidle)
return _socket
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self._pool.resize(self.pool_size)
def stop(self):
"""Stops eventlet server. Doesn't allow accept new connecting.
:returns: None
"""
LOG.info("Stopping WSGI server.")
if self._server is not None:
# let eventlet close socket
self._pool.resize(0)
self._server.kill()
def wait(self):
"""Block, until the server has stopped.
Waits on the server's eventlet to finish, then returns.
:returns: None
"""
try:
if self._server is not None:
num = self._pool.running()
LOG.debug("Waiting WSGI server to finish %d requests.", num)
self._pool.waitall()
except greenlet.GreenletExit:
LOG.info("WSGI server has stopped.")
class Request(webob.Request):
pass
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class ConfigNotFound(Exception):
def __init__(self, path):
msg = _('Could not find config at %(path)s') % {'path': path}
super(ConfigNotFound, self).__init__(msg)
class PasteAppNotFound(Exception):
def __init__(self, name, path):
msg = (_("Could not load paste app '%(name)s' from %(path)s") %
{'name': name, 'path': path})
super(PasteAppNotFound, self).__init__(msg)
class Loader(object):
"""Used to load WSGI applications from paste configurations."""
def __init__(self, conf):
"""Initialize the loader, and attempt to find the config.
:param conf: Application config
:returns: None
"""
conf.register_opts(_options.wsgi_opts)
self.config_path = None
config_path = conf.api_paste_config
if not os.path.isabs(config_path):
self.config_path = conf.find_file(config_path)
elif os.path.exists(config_path):
self.config_path = config_path
if not self.config_path:
raise ConfigNotFound(path=config_path)
def load_app(self, name):
"""Return the paste URLMap wrapped WSGI application.
:param name: Name of the application to load.
:returns: Paste URLMap object wrapping the requested application.
:raises: PasteAppNotFound
"""
try:
LOG.debug("Loading app %(name)s from %(path)s",
{'name': name, 'path': self.config_path})
return deploy.loadapp("config:%s" % self.config_path, name=name)
except LookupError:
LOG.exception("Couldn't lookup app: %s", name)
raise PasteAppNotFound(name=name, path=self.config_path)
|
{
"content_hash": "1677d72cb66af528bf04810e0ce4da00",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 79,
"avg_line_length": 34.795252225519285,
"alnum_prop": 0.5937233498209108,
"repo_name": "openstack/oslo.service",
"id": "8b518bd295f61528092413eb2b1a1767d4de6491",
"size": "12496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_service/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "225398"
}
],
"symlink_target": ""
}
|
import sys
from PIL import Image
import os
try:
impath = sys.argv[1]
size_x = int(sys.argv[2])
size_y = int(sys.argv[3])
except Exception as e:
print "Usage: purgeImageSize.py [directory] [min x size] [min y size]"
imagelist = [i for i in os.listdir(impath) if (i.endswith((".jpg",".png",".JPG",".PNG")))]
count = 0
for image in imagelist:
with Image.open(os.path.join(impath,image)) as img:
width, height = img.size
if(width < size_x or height < size_y):
print "Image: " + image + "; Size: (" + str(width) + "," + str(height) + ")"
os.remove(os.path.join(impath,image))
count += 1
print "\nPurged %d images" % count
|
{
"content_hash": "d8485998cf6aba40fff876938513d4d2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 90,
"avg_line_length": 27.8,
"alnum_prop": 0.5884892086330935,
"repo_name": "SezLux/earthporn-wallpaper-manager",
"id": "ff155be1b9d6378e7dc905e6fb92a50c2ab71022",
"size": "713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "purgeImageSize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7445"
}
],
"symlink_target": ""
}
|
"""Tests for Vanderbilt SPC binary sensor platform."""
from homeassistant.components.binary_sensor import spc
async def test_setup_platform(hass):
"""Test autodiscovery of supported device types."""
added_entities = []
zone_defs = [{
'id': '1',
'type': '3',
'zone_name': 'Kitchen smoke',
'area': '1',
'area_name': 'House',
'input': '0',
'status': '0',
}, {
'id': '3',
'type': '0',
'zone_name': 'Hallway PIR',
'area': '1',
'area_name': 'House',
'input': '0',
'status': '0',
}, {
'id': '5',
'type': '1',
'zone_name': 'Front door',
'area': '1',
'area_name': 'House',
'input': '1',
'status': '0',
}]
def add_entities(entities):
nonlocal added_entities
added_entities = list(entities)
from pyspcwebgw import Zone
zones = [Zone(area=None, spc_zone=z) for z in zone_defs]
await spc.async_setup_platform(hass=hass,
config={},
async_add_entities=add_entities,
discovery_info={'devices': zones})
assert len(added_entities) == 3
assert added_entities[0].device_class == 'smoke'
assert added_entities[0].state == 'off'
assert added_entities[1].device_class == 'motion'
assert added_entities[1].state == 'off'
assert added_entities[2].device_class == 'opening'
assert added_entities[2].state == 'on'
assert all(d.hidden for d in added_entities)
|
{
"content_hash": "ed1c264869dadcbcc2e5f422e6dd277f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 69,
"avg_line_length": 29.10909090909091,
"alnum_prop": 0.5153029356652092,
"repo_name": "persandstrom/home-assistant",
"id": "ec0886aeed8307a98c93ecfdc389fe3eb58f0e4e",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/binary_sensor/test_spc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
from typing import Union
class WorksContainer:
"""
WorksContainer: Class for working with works results
:rtype: list
Usage::
from habanero import Crossref, WorksContainer
cr = Crossref()
res = cr.works(ids=['10.1136/jclinpath-2020-206745', '10.1136/esmoopen-2020-000776'])
x = WorksContainer(res)
x
x.works
x.doi
x.license
x.title
x.abstract
res2 = cr.works(limit = 2)
x = WorksContainer(res2)
x
x.works
x.doi
x.license
x.title
x.abstract
res3 = cr.members(ids = 98, works = True, limit = 5)
x = WorksContainer(res3)
x
x.works
x.doi
x.license
x.title
x.abstract
"""
def __init__(self, input) -> None:
super(WorksContainer, self).__init__()
if not input:
raise ValueError("input len must be > zero")
self.__input = input
self.works = self.works_handler(self.__input)
keys = list(self.works[0].keys())
for key in keys:
values = [work.get(key, None) for work in self.works]
setattr(self, key.lower().replace("-", "_"), values)
def __repr__(self) -> str:
return """<%s: No. works: %s>""" % (
type(self).__name__,
len(self.works),
)
def works_handler(self, x: Union[list, dict]) -> list:
message_type = (
[w["message-type"] for w in x][0]
if isinstance(x, list)
else x["message-type"]
)
if isinstance(x, list):
if x[0]["message-type"] == "work":
x = list(filter(lambda w: w["message-type"] == "work", x))
return [w["message"] for w in x]
elif x[0]["message-type"] == "work-list":
x = list(filter(lambda w: w["message-type"] == "work-list", x))
items = [w["message"]["items"] for w in x]
return [z for sublist in items for z in sublist]
else:
raise TypeError(
f"can only handle Crossref message-type 'work' & 'work-list', got: '{message_type}'"
)
elif isinstance(x, dict) and x["message-type"] == "work-list":
return x["message"]["items"]
elif isinstance(x, dict) and x["message-type"] == "work":
return [x["message"]]
else:
raise TypeError(
f"can only handle Crossref message-type 'work' & 'work-list', got: '{message_type}'"
)
|
{
"content_hash": "c6d32828cc0416ffbde2fef6ded09d37",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 104,
"avg_line_length": 31.61627906976744,
"alnum_prop": 0.47811695476278043,
"repo_name": "sckott/habanero",
"id": "c1e255e10c8a97bfea388813da72450e3795528b",
"size": "2719",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "habanero/crossref/workscontainer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "625"
},
{
"name": "Python",
"bytes": "135763"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
from collections import deque, OrderedDict
import copy
import datetime
import itertools
import logging
from typing import (
Dict,
Optional,
TYPE_CHECKING,
Union,
Callable,
Any,
List,
TypeVar,
Coroutine,
Sequence,
Tuple,
Deque,
Literal,
overload,
)
import weakref
import inspect
import os
from .guild import Guild
from .activity import BaseActivity
from .user import User, ClientUser
from .emoji import Emoji
from .mentions import AllowedMentions
from .partial_emoji import PartialEmoji
from .message import Message
from .channel import *
from .channel import _channel_factory
from .raw_models import *
from .member import Member
from .role import Role
from .enums import ChannelType, try_enum, Status
from . import utils
from .flags import ApplicationFlags, Intents, MemberCacheFlags
from .object import Object
from .invite import Invite
from .integrations import _integration_factory
from .interactions import Interaction
from .ui.view import ViewStore, View
from .scheduled_event import ScheduledEvent
from .stage_instance import StageInstance
from .threads import Thread, ThreadMember
from .sticker import GuildSticker
if TYPE_CHECKING:
from .abc import PrivateChannel
from .message import MessageableChannel
from .guild import GuildChannel, VocalGuildChannel
from .http import HTTPClient
from .voice_client import VoiceProtocol
from .client import Client
from .gateway import DiscordWebSocket
from .app_commands import CommandTree
from .types.snowflake import Snowflake
from .types.activity import Activity as ActivityPayload
from .types.channel import DMChannel as DMChannelPayload
from .types.user import User as UserPayload, PartialUser as PartialUserPayload
from .types.emoji import Emoji as EmojiPayload, PartialEmoji as PartialEmojiPayload
from .types.sticker import GuildSticker as GuildStickerPayload
from .types.guild import Guild as GuildPayload
from .types.message import Message as MessagePayload, PartialMessage as PartialMessagePayload
from .types import gateway as gw
T = TypeVar('T')
Channel = Union[GuildChannel, VocalGuildChannel, PrivateChannel, PartialMessageable]
class ChunkRequest:
def __init__(
self,
guild_id: int,
loop: asyncio.AbstractEventLoop,
resolver: Callable[[int], Any],
*,
cache: bool = True,
) -> None:
self.guild_id: int = guild_id
self.resolver: Callable[[int], Any] = resolver
self.loop: asyncio.AbstractEventLoop = loop
self.cache: bool = cache
self.nonce: str = os.urandom(16).hex()
self.buffer: List[Member] = []
self.waiters: List[asyncio.Future[List[Member]]] = []
def add_members(self, members: List[Member]) -> None:
self.buffer.extend(members)
if self.cache:
guild = self.resolver(self.guild_id)
if guild is None:
return
for member in members:
existing = guild.get_member(member.id)
if existing is None or existing.joined_at is None:
guild._add_member(member)
async def wait(self) -> List[Member]:
future = self.loop.create_future()
self.waiters.append(future)
try:
return await future
finally:
self.waiters.remove(future)
def get_future(self) -> asyncio.Future[List[Member]]:
future = self.loop.create_future()
self.waiters.append(future)
return future
def done(self) -> None:
for future in self.waiters:
if not future.done():
future.set_result(self.buffer)
_log = logging.getLogger(__name__)
async def logging_coroutine(coroutine: Coroutine[Any, Any, T], *, info: str) -> Optional[T]:
try:
await coroutine
except Exception:
_log.exception('Exception occurred during %s', info)
class ConnectionState:
if TYPE_CHECKING:
_get_websocket: Callable[..., DiscordWebSocket]
_get_client: Callable[..., Client]
_parsers: Dict[str, Callable[[Dict[str, Any]], None]]
def __init__(
self,
*,
dispatch: Callable[..., Any],
handlers: Dict[str, Callable[..., Any]],
hooks: Dict[str, Callable[..., Coroutine[Any, Any, Any]]],
http: HTTPClient,
**options: Any,
) -> None:
# Set later, after Client.login
self.loop: asyncio.AbstractEventLoop = utils.MISSING
self.http: HTTPClient = http
self.max_messages: Optional[int] = options.get('max_messages', 1000)
if self.max_messages is not None and self.max_messages <= 0:
self.max_messages = 1000
self.dispatch: Callable[..., Any] = dispatch
self.handlers: Dict[str, Callable[..., Any]] = handlers
self.hooks: Dict[str, Callable[..., Coroutine[Any, Any, Any]]] = hooks
self.shard_count: Optional[int] = None
self._ready_task: Optional[asyncio.Task] = None
self.application_id: Optional[int] = utils._get_as_snowflake(options, 'application_id')
self.heartbeat_timeout: float = options.get('heartbeat_timeout', 60.0)
self.guild_ready_timeout: float = options.get('guild_ready_timeout', 2.0)
if self.guild_ready_timeout < 0:
raise ValueError('guild_ready_timeout cannot be negative')
allowed_mentions = options.get('allowed_mentions')
if allowed_mentions is not None and not isinstance(allowed_mentions, AllowedMentions):
raise TypeError('allowed_mentions parameter must be AllowedMentions')
self.allowed_mentions: Optional[AllowedMentions] = allowed_mentions
self._chunk_requests: Dict[Union[int, str], ChunkRequest] = {}
activity = options.get('activity', None)
if activity:
if not isinstance(activity, BaseActivity):
raise TypeError('activity parameter must derive from BaseActivity.')
activity = activity.to_dict()
status = options.get('status', None)
if status:
if status is Status.offline:
status = 'invisible'
else:
status = str(status)
intents = options.get('intents', None)
if intents is not None:
if not isinstance(intents, Intents):
raise TypeError(f'intents parameter must be Intent not {type(intents)!r}')
else:
intents = Intents.default()
if not intents.guilds:
_log.warning('Guilds intent seems to be disabled. This may cause state related issues.')
self._chunk_guilds: bool = options.get('chunk_guilds_at_startup', intents.members)
# Ensure these two are set properly
if not intents.members and self._chunk_guilds:
raise ValueError('Intents.members must be enabled to chunk guilds at startup.')
cache_flags = options.get('member_cache_flags', None)
if cache_flags is None:
cache_flags = MemberCacheFlags.from_intents(intents)
else:
if not isinstance(cache_flags, MemberCacheFlags):
raise TypeError(f'member_cache_flags parameter must be MemberCacheFlags not {type(cache_flags)!r}')
cache_flags._verify_intents(intents)
self.member_cache_flags: MemberCacheFlags = cache_flags
self._activity: Optional[ActivityPayload] = activity
self._status: Optional[str] = status
self._intents: Intents = intents
self._command_tree: Optional[CommandTree] = None
if not intents.members or cache_flags._empty:
self.store_user = self.store_user_no_intents # type: ignore # This reassignment is on purpose
self.parsers: Dict[str, Callable[[Any], None]]
self.parsers = parsers = {}
for attr, func in inspect.getmembers(self):
if attr.startswith('parse_'):
parsers[attr[6:].upper()] = func
self.clear()
def clear(self, *, views: bool = True) -> None:
self.user: Optional[ClientUser] = None
self._users: weakref.WeakValueDictionary[int, User] = weakref.WeakValueDictionary()
self._emojis: Dict[int, Emoji] = {}
self._stickers: Dict[int, GuildSticker] = {}
self._guilds: Dict[int, Guild] = {}
if views:
self._view_store: ViewStore = ViewStore(self)
self._voice_clients: Dict[int, VoiceProtocol] = {}
# LRU of max size 128
self._private_channels: OrderedDict[int, PrivateChannel] = OrderedDict()
# extra dict to look up private channels by user id
self._private_channels_by_user: Dict[int, DMChannel] = {}
if self.max_messages is not None:
self._messages: Optional[Deque[Message]] = deque(maxlen=self.max_messages)
else:
self._messages: Optional[Deque[Message]] = None
def process_chunk_requests(self, guild_id: int, nonce: Optional[str], members: List[Member], complete: bool) -> None:
removed = []
for key, request in self._chunk_requests.items():
if request.guild_id == guild_id and request.nonce == nonce:
request.add_members(members)
if complete:
request.done()
removed.append(key)
for key in removed:
del self._chunk_requests[key]
def call_handlers(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
func = self.handlers[key]
except KeyError:
pass
else:
func(*args, **kwargs)
async def call_hooks(self, key: str, *args: Any, **kwargs: Any) -> None:
try:
coro = self.hooks[key]
except KeyError:
pass
else:
await coro(*args, **kwargs)
async def async_setup(self) -> None:
pass
@property
def self_id(self) -> Optional[int]:
u = self.user
return u.id if u else None
@property
def intents(self) -> Intents:
ret = Intents.none()
ret.value = self._intents.value
return ret
@property
def voice_clients(self) -> List[VoiceProtocol]:
return list(self._voice_clients.values())
def _get_voice_client(self, guild_id: Optional[int]) -> Optional[VoiceProtocol]:
# the keys of self._voice_clients are ints
return self._voice_clients.get(guild_id) # type: ignore
def _add_voice_client(self, guild_id: int, voice: VoiceProtocol) -> None:
self._voice_clients[guild_id] = voice
def _remove_voice_client(self, guild_id: int) -> None:
self._voice_clients.pop(guild_id, None)
def _update_references(self, ws: DiscordWebSocket) -> None:
for vc in self.voice_clients:
vc.main_ws = ws # type: ignore # Silencing the unknown attribute (ok at runtime).
def store_user(self, data: Union[UserPayload, PartialUserPayload]) -> User:
# this way is 300% faster than `dict.setdefault`.
user_id = int(data['id'])
try:
return self._users[user_id]
except KeyError:
user = User(state=self, data=data)
if user.discriminator != '0000':
self._users[user_id] = user
return user
def store_user_no_intents(self, data: Union[UserPayload, PartialUserPayload]) -> User:
return User(state=self, data=data)
def create_user(self, data: Union[UserPayload, PartialUserPayload]) -> User:
return User(state=self, data=data)
def get_user(self, id: int) -> Optional[User]:
return self._users.get(id)
def store_emoji(self, guild: Guild, data: EmojiPayload) -> Emoji:
# the id will be present here
emoji_id = int(data['id']) # type: ignore
self._emojis[emoji_id] = emoji = Emoji(guild=guild, state=self, data=data)
return emoji
def store_sticker(self, guild: Guild, data: GuildStickerPayload) -> GuildSticker:
sticker_id = int(data['id'])
self._stickers[sticker_id] = sticker = GuildSticker(state=self, data=data)
return sticker
def store_view(self, view: View, message_id: Optional[int] = None) -> None:
self._view_store.add_view(view, message_id)
def prevent_view_updates_for(self, message_id: int) -> Optional[View]:
return self._view_store.remove_message_tracking(message_id)
@property
def persistent_views(self) -> Sequence[View]:
return self._view_store.persistent_views
@property
def guilds(self) -> List[Guild]:
return list(self._guilds.values())
def _get_guild(self, guild_id: Optional[int]) -> Optional[Guild]:
# the keys of self._guilds are ints
return self._guilds.get(guild_id) # type: ignore
def _add_guild(self, guild: Guild) -> None:
self._guilds[guild.id] = guild
def _remove_guild(self, guild: Guild) -> None:
self._guilds.pop(guild.id, None)
for emoji in guild.emojis:
self._emojis.pop(emoji.id, None)
for sticker in guild.stickers:
self._stickers.pop(sticker.id, None)
del guild
@property
def emojis(self) -> List[Emoji]:
return list(self._emojis.values())
@property
def stickers(self) -> List[GuildSticker]:
return list(self._stickers.values())
def get_emoji(self, emoji_id: Optional[int]) -> Optional[Emoji]:
# the keys of self._emojis are ints
return self._emojis.get(emoji_id) # type: ignore
def get_sticker(self, sticker_id: Optional[int]) -> Optional[GuildSticker]:
# the keys of self._stickers are ints
return self._stickers.get(sticker_id) # type: ignore
@property
def private_channels(self) -> List[PrivateChannel]:
return list(self._private_channels.values())
def _get_private_channel(self, channel_id: Optional[int]) -> Optional[PrivateChannel]:
try:
# the keys of self._private_channels are ints
value = self._private_channels[channel_id] # type: ignore
except KeyError:
return None
else:
# Type narrowing can't figure out that channel_id isn't None here
self._private_channels.move_to_end(channel_id) # type: ignore
return value
def _get_private_channel_by_user(self, user_id: Optional[int]) -> Optional[DMChannel]:
# the keys of self._private_channels are ints
return self._private_channels_by_user.get(user_id) # type: ignore
def _add_private_channel(self, channel: PrivateChannel) -> None:
channel_id = channel.id
self._private_channels[channel_id] = channel
if len(self._private_channels) > 128:
_, to_remove = self._private_channels.popitem(last=False)
if isinstance(to_remove, DMChannel) and to_remove.recipient:
self._private_channels_by_user.pop(to_remove.recipient.id, None)
if isinstance(channel, DMChannel) and channel.recipient:
self._private_channels_by_user[channel.recipient.id] = channel
def add_dm_channel(self, data: DMChannelPayload) -> DMChannel:
# self.user is *always* cached when this is called
channel = DMChannel(me=self.user, state=self, data=data) # type: ignore
self._add_private_channel(channel)
return channel
def _remove_private_channel(self, channel: PrivateChannel) -> None:
self._private_channels.pop(channel.id, None)
if isinstance(channel, DMChannel):
recipient = channel.recipient
if recipient is not None:
self._private_channels_by_user.pop(recipient.id, None)
def _get_message(self, msg_id: Optional[int]) -> Optional[Message]:
return utils.find(lambda m: m.id == msg_id, reversed(self._messages)) if self._messages else None
def _add_guild_from_data(self, data: GuildPayload) -> Guild:
guild = Guild(data=data, state=self)
self._add_guild(guild)
return guild
def _guild_needs_chunking(self, guild: Guild) -> bool:
# If presences are enabled then we get back the old guild.large behaviour
return self._chunk_guilds and not guild.chunked and not (self._intents.presences and not guild.large)
def _get_guild_channel(
self, data: PartialMessagePayload, guild_id: Optional[int] = None
) -> Tuple[Union[Channel, Thread], Optional[Guild]]:
channel_id = int(data['channel_id'])
try:
guild_id = guild_id or int(data['guild_id'])
guild = self._get_guild(guild_id)
except KeyError:
channel = DMChannel._from_message(self, channel_id)
guild = None
else:
channel = guild and guild._resolve_channel(channel_id)
return channel or PartialMessageable(state=self, id=channel_id), guild
async def chunker(
self, guild_id: int, query: str = '', limit: int = 0, presences: bool = False, *, nonce: Optional[str] = None
) -> None:
ws = self._get_websocket(guild_id) # This is ignored upstream
await ws.request_chunks(guild_id, query=query, limit=limit, presences=presences, nonce=nonce)
async def query_members(
self, guild: Guild, query: Optional[str], limit: int, user_ids: Optional[List[int]], cache: bool, presences: bool
) -> List[Member]:
guild_id = guild.id
ws = self._get_websocket(guild_id)
if ws is None:
raise RuntimeError('Somehow do not have a websocket for this guild_id')
request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
self._chunk_requests[request.nonce] = request
try:
# start the query operation
await ws.request_chunks(
guild_id, query=query, limit=limit, user_ids=user_ids, presences=presences, nonce=request.nonce
)
return await asyncio.wait_for(request.wait(), timeout=30.0)
except asyncio.TimeoutError:
_log.warning('Timed out waiting for chunks with query %r and limit %d for guild_id %d', query, limit, guild_id)
raise
async def _delay_ready(self) -> None:
try:
states = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(self._ready_state.get(), timeout=self.guild_ready_timeout)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
future = await self.chunk_guild(guild, wait=False)
states.append((guild, future))
else:
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
for guild, future in states:
try:
await asyncio.wait_for(future, timeout=5.0)
except asyncio.TimeoutError:
_log.warning('Shard ID %s timed out waiting for chunks for guild_id %s.', guild.shard_id, guild.id)
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
except asyncio.CancelledError:
pass
else:
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
finally:
self._ready_task = None
def parse_ready(self, data: gw.ReadyEvent) -> None:
if self._ready_task is not None:
self._ready_task.cancel()
self._ready_state = asyncio.Queue()
self.clear(views=False)
self.user = user = ClientUser(state=self, data=data['user'])
self._users[user.id] = user # type: ignore
if self.application_id is None:
try:
application = data['application']
except KeyError:
pass
else:
self.application_id = utils._get_as_snowflake(application, 'id')
self.application_flags: ApplicationFlags = ApplicationFlags._from_value(application['flags'])
for guild_data in data['guilds']:
self._add_guild_from_data(guild_data) # type: ignore
self.dispatch('connect')
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data: gw.ResumedEvent) -> None:
self.dispatch('resumed')
def parse_message_create(self, data: gw.MessageCreateEvent) -> None:
channel, _ = self._get_guild_channel(data)
# channel would be the correct type here
message = Message(channel=channel, data=data, state=self) # type: ignore
self.dispatch('message', message)
if self._messages is not None:
self._messages.append(message)
# we ensure that the channel is either a TextChannel, VoiceChannel, or Thread
if channel and channel.__class__ in (TextChannel, VoiceChannel, Thread):
channel.last_message_id = message.id # type: ignore
def parse_message_delete(self, data: gw.MessageDeleteEvent) -> None:
raw = RawMessageDeleteEvent(data)
found = self._get_message(raw.message_id)
raw.cached_message = found
self.dispatch('raw_message_delete', raw)
if self._messages is not None and found is not None:
self.dispatch('message_delete', found)
self._messages.remove(found)
def parse_message_delete_bulk(self, data: gw.MessageDeleteBulkEvent) -> None:
raw = RawBulkMessageDeleteEvent(data)
if self._messages:
found_messages = [message for message in self._messages if message.id in raw.message_ids]
else:
found_messages = []
raw.cached_messages = found_messages
self.dispatch('raw_bulk_message_delete', raw)
if found_messages:
self.dispatch('bulk_message_delete', found_messages)
for msg in found_messages:
# self._messages won't be None here
self._messages.remove(msg) # type: ignore
def parse_message_update(self, data: gw.MessageUpdateEvent) -> None:
raw = RawMessageUpdateEvent(data)
message = self._get_message(raw.message_id)
if message is not None:
older_message = copy.copy(message)
raw.cached_message = older_message
self.dispatch('raw_message_edit', raw)
message._update(data)
# Coerce the `after` parameter to take the new updated Member
# ref: #5999
older_message.author = message.author
self.dispatch('message_edit', older_message, message)
else:
self.dispatch('raw_message_edit', raw)
if 'components' in data and self._view_store.is_message_tracked(raw.message_id):
self._view_store.update_from_message(raw.message_id, data['components'])
def parse_message_reaction_add(self, data: gw.MessageReactionAddEvent) -> None:
emoji = PartialEmoji.from_dict(data['emoji'])
emoji._state = self
raw = RawReactionActionEvent(data, emoji, 'REACTION_ADD')
member_data = data.get('member')
if member_data:
guild = self._get_guild(raw.guild_id)
if guild is not None:
raw.member = Member(data=member_data, guild=guild, state=self)
else:
raw.member = None
else:
raw.member = None
self.dispatch('raw_reaction_add', raw)
# rich interface here
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
reaction = message._add_reaction(data, emoji, raw.user_id)
user = raw.member or self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_add', reaction, user)
def parse_message_reaction_remove_all(self, data: gw.MessageReactionRemoveAllEvent) -> None:
raw = RawReactionClearEvent(data)
self.dispatch('raw_reaction_clear', raw)
message = self._get_message(raw.message_id)
if message is not None:
old_reactions = message.reactions.copy()
message.reactions.clear()
self.dispatch('reaction_clear', message, old_reactions)
def parse_message_reaction_remove(self, data: gw.MessageReactionRemoveEvent) -> None:
emoji = PartialEmoji.from_dict(data['emoji'])
emoji._state = self
raw = RawReactionActionEvent(data, emoji, 'REACTION_REMOVE')
self.dispatch('raw_reaction_remove', raw)
message = self._get_message(raw.message_id)
if message is not None:
emoji = self._upgrade_partial_emoji(emoji)
try:
reaction = message._remove_reaction(data, emoji, raw.user_id)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
user = self._get_reaction_user(message.channel, raw.user_id)
if user:
self.dispatch('reaction_remove', reaction, user)
def parse_message_reaction_remove_emoji(self, data: gw.MessageReactionRemoveEmojiEvent) -> None:
emoji = PartialEmoji.from_dict(data['emoji'])
emoji._state = self
raw = RawReactionClearEmojiEvent(data, emoji)
self.dispatch('raw_reaction_clear_emoji', raw)
message = self._get_message(raw.message_id)
if message is not None:
try:
reaction = message._clear_emoji(emoji)
except (AttributeError, ValueError): # eventual consistency lol
pass
else:
if reaction:
self.dispatch('reaction_clear_emoji', reaction)
def parse_interaction_create(self, data: gw.InteractionCreateEvent) -> None:
interaction = Interaction(data=data, state=self)
if data['type'] in (2, 4) and self._command_tree: # application command and auto complete
self._command_tree._from_interaction(interaction)
elif data['type'] == 3: # interaction component
# These keys are always there for this interaction type
inner_data = data['data']
custom_id = inner_data['custom_id']
component_type = inner_data['component_type']
self._view_store.dispatch_view(component_type, custom_id, interaction)
elif data['type'] == 5: # modal submit
# These keys are always there for this interaction type
inner_data = data['data']
custom_id = inner_data['custom_id']
components = inner_data['components']
self._view_store.dispatch_modal(custom_id, interaction, components) # type: ignore
self.dispatch('interaction', interaction)
def parse_presence_update(self, data: gw.PresenceUpdateEvent) -> None:
guild_id = utils._get_as_snowflake(data, 'guild_id')
# guild_id won't be None here
guild = self._get_guild(guild_id)
if guild is None:
_log.debug('PRESENCE_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
user = data['user']
member_id = int(user['id'])
member = guild.get_member(member_id)
if member is None:
_log.debug('PRESENCE_UPDATE referencing an unknown member ID: %s. Discarding', member_id)
return
old_member = Member._copy(member)
user_update = member._presence_update(data=data, user=user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
self.dispatch('presence_update', old_member, member)
def parse_user_update(self, data: gw.UserUpdateEvent) -> None:
if self.user:
self.user._update(data)
def parse_invite_create(self, data: gw.InviteCreateEvent) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch('invite_create', invite)
def parse_invite_delete(self, data: gw.InviteDeleteEvent) -> None:
invite = Invite.from_gateway(state=self, data=data)
self.dispatch('invite_delete', invite)
def parse_channel_delete(self, data: gw.ChannelDeleteEvent) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = int(data['id'])
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
guild._remove_channel(channel)
self.dispatch('guild_channel_delete', channel)
if channel.type in (ChannelType.voice, ChannelType.stage_voice):
for s in guild.scheduled_events:
if s.channel_id == channel.id:
guild._scheduled_events.pop(s.id)
self.dispatch('scheduled_event_delete', guild, s)
def parse_channel_update(self, data: gw.ChannelUpdateEvent) -> None:
channel_type = try_enum(ChannelType, data.get('type'))
channel_id = int(data['id'])
if channel_type is ChannelType.group:
channel = self._get_private_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
# the channel is a GroupChannel rather than PrivateChannel
channel._update_group(data) # type: ignore
self.dispatch('private_channel_update', old_channel, channel)
return
else:
_log.debug('CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
channel = guild.get_channel(channel_id)
if channel is not None:
old_channel = copy.copy(channel)
channel._update(guild, data) # type: ignore # the data payload varies based on the channel type.
self.dispatch('guild_channel_update', old_channel, channel)
else:
_log.debug('CHANNEL_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
else:
_log.debug('CHANNEL_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_channel_create(self, data: gw.ChannelCreateEvent) -> None:
factory, ch_type = _channel_factory(data['type'])
if factory is None:
_log.debug('CHANNEL_CREATE referencing an unknown channel type %s. Discarding.', data['type'])
return
guild_id = utils._get_as_snowflake(data, 'guild_id')
guild = self._get_guild(guild_id)
if guild is not None:
# the factory can't be a DMChannel or GroupChannel here
channel = factory(guild=guild, state=self, data=data) # type: ignore
guild._add_channel(channel) # type: ignore
self.dispatch('guild_channel_create', channel)
else:
_log.debug('CHANNEL_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
return
def parse_channel_pins_update(self, data: gw.ChannelPinsUpdateEvent) -> None:
channel_id = int(data['channel_id'])
try:
guild = self._get_guild(int(data['guild_id']))
except KeyError:
guild = None
channel = self._get_private_channel(channel_id)
else:
channel = guild and guild._resolve_channel(channel_id)
if channel is None:
_log.debug('CHANNEL_PINS_UPDATE referencing an unknown channel ID: %s. Discarding.', channel_id)
return
last_pin = utils.parse_time(data.get('last_pin_timestamp'))
if guild is None:
self.dispatch('private_channel_pins_update', channel, last_pin)
else:
self.dispatch('guild_channel_pins_update', channel, last_pin)
def parse_thread_create(self, data: gw.ThreadCreateEvent) -> None:
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_CREATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread = Thread(guild=guild, state=guild._state, data=data)
has_thread = guild.get_thread(thread.id)
guild._add_thread(thread)
if not has_thread:
if data.get('newly_created'):
if thread.parent.__class__ is ForumChannel:
thread.parent.last_message_id = thread.id # type: ignore
self.dispatch('thread_create', thread)
else:
self.dispatch('thread_join', thread)
def parse_thread_update(self, data: gw.ThreadUpdateEvent) -> None:
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread = guild.get_thread(thread_id)
if thread is not None:
old = copy.copy(thread)
thread._update(data)
if thread.archived:
guild._remove_thread(thread)
self.dispatch('thread_update', old, thread)
else:
thread = Thread(guild=guild, state=guild._state, data=data)
if not thread.archived:
guild._add_thread(thread)
self.dispatch('thread_join', thread)
def parse_thread_delete(self, data: gw.ThreadDeleteEvent) -> None:
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_DELETE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread = guild.get_thread(thread_id)
if thread is not None:
guild._remove_thread(thread)
self.dispatch('thread_delete', thread)
def parse_thread_list_sync(self, data: gw.ThreadListSyncEvent) -> None:
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_LIST_SYNC referencing an unknown guild ID: %s. Discarding', guild_id)
return
try:
channel_ids = {int(i) for i in data['channel_ids']}
except KeyError:
# If not provided, then the entire guild is being synced
# So all previous thread data should be overwritten
previous_threads = guild._threads.copy()
guild._clear_threads()
else:
previous_threads = guild._filter_threads(channel_ids)
threads = {d['id']: guild._store_thread(d) for d in data.get('threads', [])}
for member in data.get('members', []):
try:
# note: member['id'] is the thread_id
thread = threads[member['id']]
except KeyError:
continue
else:
thread._add_member(ThreadMember(thread, member))
for thread in threads.values():
old = previous_threads.pop(thread.id, None)
if old is None:
self.dispatch('thread_join', thread)
for thread in previous_threads.values():
self.dispatch('thread_remove', thread)
def parse_thread_member_update(self, data: gw.ThreadMemberUpdate) -> None:
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug('THREAD_MEMBER_UPDATE referencing an unknown thread ID: %s. Discarding', thread_id)
return
member = ThreadMember(thread, data)
thread.me = member
def parse_thread_members_update(self, data: gw.ThreadMembersUpdate) -> None:
guild_id = int(data['guild_id'])
guild: Optional[Guild] = self._get_guild(guild_id)
if guild is None:
_log.debug('THREAD_MEMBERS_UPDATE referencing an unknown guild ID: %s. Discarding', guild_id)
return
thread_id = int(data['id'])
thread: Optional[Thread] = guild.get_thread(thread_id)
if thread is None:
_log.debug('THREAD_MEMBERS_UPDATE referencing an unknown thread ID: %s. Discarding', thread_id)
return
added_members = [ThreadMember(thread, d) for d in data.get('added_members', [])]
removed_member_ids = [int(x) for x in data.get('removed_member_ids', [])]
self_id = self.self_id
for member in added_members:
if member.id != self_id:
thread._add_member(member)
self.dispatch('thread_member_join', member)
else:
thread.me = member
self.dispatch('thread_join', thread)
for member_id in removed_member_ids:
if member_id != self_id:
member = thread._pop_member(member_id)
if member is not None:
self.dispatch('thread_member_remove', member)
else:
self.dispatch('thread_remove', thread)
def parse_guild_member_add(self, data: gw.GuildMemberAddEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is None:
_log.debug('GUILD_MEMBER_ADD referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = Member(guild=guild, data=data, state=self)
if self.member_cache_flags.joined:
guild._add_member(member)
if guild._member_count is not None:
guild._member_count += 1
self.dispatch('member_join', member)
def parse_guild_member_remove(self, data: gw.GuildMemberRemoveEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
if guild._member_count is not None:
guild._member_count -= 1
user_id = int(data['user']['id'])
member = guild.get_member(user_id)
if member is not None:
guild._remove_member(member)
self.dispatch('member_remove', member)
else:
_log.debug('GUILD_MEMBER_REMOVE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_member_update(self, data: gw.GuildMemberUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
user = data['user']
user_id = int(user['id'])
if guild is None:
_log.debug('GUILD_MEMBER_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
member = guild.get_member(user_id)
if member is not None:
old_member = Member._copy(member)
member._update(data)
user_update = member._update_inner_user(user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
self.dispatch('member_update', old_member, member)
else:
if self.member_cache_flags.joined:
member = Member(data=data, guild=guild, state=self) # type: ignore # the data is not complete, contains a delta of values
# Force an update on the inner user if necessary
user_update = member._update_inner_user(user)
if user_update:
self.dispatch('user_update', user_update[0], user_update[1])
guild._add_member(member)
_log.debug('GUILD_MEMBER_UPDATE referencing an unknown member ID: %s. Discarding.', user_id)
def parse_guild_emojis_update(self, data: gw.GuildEmojisUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is None:
_log.debug('GUILD_EMOJIS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
before_emojis = guild.emojis
for emoji in before_emojis:
self._emojis.pop(emoji.id, None)
# guild won't be None here
guild.emojis = tuple(map(lambda d: self.store_emoji(guild, d), data['emojis']))
self.dispatch('guild_emojis_update', guild, before_emojis, guild.emojis)
def parse_guild_stickers_update(self, data: gw.GuildStickersUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is None:
_log.debug('GUILD_STICKERS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
before_stickers = guild.stickers
for emoji in before_stickers:
self._stickers.pop(emoji.id, None)
guild.stickers = tuple(map(lambda d: self.store_sticker(guild, d), data['stickers']))
self.dispatch('guild_stickers_update', guild, before_stickers, guild.stickers)
def _get_create_guild(self, data: gw.GuildCreateEvent) -> Guild:
if data.get('unavailable') is False:
# GUILD_CREATE with unavailable in the response
# usually means that the guild has become available
# and is therefore in the cache
guild = self._get_guild(int(data['id']))
if guild is not None:
guild.unavailable = False
guild._from_data(data)
return guild
return self._add_guild_from_data(data)
def is_guild_evicted(self, guild: Guild) -> bool:
return guild.id not in self._guilds
@overload
async def chunk_guild(self, guild: Guild, *, wait: Literal[True] = ..., cache: Optional[bool] = ...) -> List[Member]:
...
@overload
async def chunk_guild(
self, guild: Guild, *, wait: Literal[False] = ..., cache: Optional[bool] = ...
) -> asyncio.Future[List[Member]]:
...
async def chunk_guild(
self, guild: Guild, *, wait: bool = True, cache: Optional[bool] = None
) -> Union[List[Member], asyncio.Future[List[Member]]]:
cache = cache or self.member_cache_flags.joined
request = self._chunk_requests.get(guild.id)
if request is None:
self._chunk_requests[guild.id] = request = ChunkRequest(guild.id, self.loop, self._get_guild, cache=cache)
await self.chunker(guild.id, nonce=request.nonce)
if wait:
return await request.wait()
return request.get_future()
async def _chunk_and_dispatch(self, guild, unavailable):
try:
await asyncio.wait_for(self.chunk_guild(guild), timeout=60.0)
except asyncio.TimeoutError:
_log.info('Somehow timed out waiting for chunks.')
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_create(self, data: gw.GuildCreateEvent) -> None:
unavailable = data.get('unavailable')
if unavailable is True:
# joined a guild with unavailable == True so..
return
guild = self._get_create_guild(data)
try:
# Notify the on_ready state, if any, that this guild is complete.
self._ready_state.put_nowait(guild)
except AttributeError:
pass
else:
# If we're waiting for the event, put the rest on hold
return
# check if it requires chunking
if self._guild_needs_chunking(guild):
asyncio.create_task(self._chunk_and_dispatch(guild, unavailable))
return
# Dispatch available if newly available
if unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
def parse_guild_update(self, data: gw.GuildUpdateEvent) -> None:
guild = self._get_guild(int(data['id']))
if guild is not None:
old_guild = copy.copy(guild)
guild._from_data(data)
self.dispatch('guild_update', old_guild, guild)
else:
_log.debug('GUILD_UPDATE referencing an unknown guild ID: %s. Discarding.', data['id'])
def parse_guild_delete(self, data: gw.GuildDeleteEvent) -> None:
guild = self._get_guild(int(data['id']))
if guild is None:
_log.debug('GUILD_DELETE referencing an unknown guild ID: %s. Discarding.', data['id'])
return
if data.get('unavailable', False):
# GUILD_DELETE with unavailable being True means that the
# guild that was available is now currently unavailable
guild.unavailable = True
self.dispatch('guild_unavailable', guild)
return
# do a cleanup of the messages cache
if self._messages is not None:
self._messages: Optional[Deque[Message]] = deque(
(msg for msg in self._messages if msg.guild != guild), maxlen=self.max_messages
)
self._remove_guild(guild)
self.dispatch('guild_remove', guild)
def parse_guild_ban_add(self, data: gw.GuildBanAddEvent) -> None:
# we make the assumption that GUILD_BAN_ADD is done
# before GUILD_MEMBER_REMOVE is called
# hence we don't remove it from cache or do anything
# strange with it, the main purpose of this event
# is mainly to dispatch to another event worth listening to for logging
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
user = User(data=data['user'], state=self)
except KeyError:
pass
else:
member = guild.get_member(user.id) or user
self.dispatch('member_ban', guild, member)
def parse_guild_ban_remove(self, data: gw.GuildBanRemoveEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None and 'user' in data:
user = self.store_user(data['user'])
self.dispatch('member_unban', guild, user)
def parse_guild_role_create(self, data: gw.GuildRoleCreateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is None:
_log.debug('GUILD_ROLE_CREATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
return
role_data = data['role']
role = Role(guild=guild, data=role_data, state=self)
guild._add_role(role)
self.dispatch('guild_role_create', role)
def parse_guild_role_delete(self, data: gw.GuildRoleDeleteEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_id = int(data['role_id'])
try:
role = guild._remove_role(role_id)
except KeyError:
return
else:
self.dispatch('guild_role_delete', role)
else:
_log.debug('GUILD_ROLE_DELETE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_role_update(self, data: gw.GuildRoleUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
role_data = data['role']
role_id = int(role_data['id'])
role = guild.get_role(role_id)
if role is not None:
old_role = copy.copy(role)
role._update(role_data)
self.dispatch('guild_role_update', old_role, role)
else:
_log.debug('GUILD_ROLE_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_members_chunk(self, data: gw.GuildMembersChunkEvent) -> None:
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
presences = data.get('presences', [])
if guild is None:
return
members = [Member(guild=guild, data=member, state=self) for member in data.get('members', [])]
_log.debug('Processed a chunk for %s members in guild ID %s.', len(members), guild_id)
if presences:
member_dict: Dict[Snowflake, Member] = {str(member.id): member for member in members}
for presence in presences:
user = presence['user']
member_id = user['id']
member = member_dict.get(member_id)
if member is not None:
member._presence_update(presence, user)
complete = data.get('chunk_index', 0) + 1 == data.get('chunk_count')
self.process_chunk_requests(guild_id, data.get('nonce'), members, complete)
def parse_guild_integrations_update(self, data: gw.GuildIntegrationsUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
self.dispatch('guild_integrations_update', guild)
else:
_log.debug('GUILD_INTEGRATIONS_UPDATE referencing an unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_integration_create(self, data: gw.IntegrationCreateEvent) -> None:
guild_id = int(data.pop('guild_id'))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data['type'])
integration = cls(data=data, guild=guild)
self.dispatch('integration_create', integration)
else:
_log.debug('INTEGRATION_CREATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_integration_update(self, data: gw.IntegrationUpdateEvent) -> None:
guild_id = int(data.pop('guild_id'))
guild = self._get_guild(guild_id)
if guild is not None:
cls, _ = _integration_factory(data['type'])
integration = cls(data=data, guild=guild)
self.dispatch('integration_update', integration)
else:
_log.debug('INTEGRATION_UPDATE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_integration_delete(self, data: gw.IntegrationDeleteEvent) -> None:
guild_id = int(data['guild_id'])
guild = self._get_guild(guild_id)
if guild is not None:
raw = RawIntegrationDeleteEvent(data)
self.dispatch('raw_integration_delete', raw)
else:
_log.debug('INTEGRATION_DELETE referencing an unknown guild ID: %s. Discarding.', guild_id)
def parse_webhooks_update(self, data: gw.WebhooksUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is None:
_log.debug('WEBHOOKS_UPDATE referencing an unknown guild ID: %s. Discarding', data['guild_id'])
return
channel_id = utils._get_as_snowflake(data, 'channel_id')
channel = guild.get_channel(channel_id) # type: ignore # None is okay here
if channel is not None:
self.dispatch('webhooks_update', channel)
else:
_log.debug('WEBHOOKS_UPDATE referencing an unknown channel ID: %s. Discarding.', data['channel_id'])
def parse_stage_instance_create(self, data: gw.StageInstanceCreateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
stage_instance = StageInstance(guild=guild, state=self, data=data)
guild._stage_instances[stage_instance.id] = stage_instance
self.dispatch('stage_instance_create', stage_instance)
else:
_log.debug('STAGE_INSTANCE_CREATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_stage_instance_update(self, data: gw.StageInstanceUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
stage_instance = guild._stage_instances.get(int(data['id']))
if stage_instance is not None:
old_stage_instance = copy.copy(stage_instance)
stage_instance._update(data)
self.dispatch('stage_instance_update', old_stage_instance, stage_instance)
else:
_log.debug('STAGE_INSTANCE_UPDATE referencing unknown stage instance ID: %s. Discarding.', data['id'])
else:
_log.debug('STAGE_INSTANCE_UPDATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_stage_instance_delete(self, data: gw.StageInstanceDeleteEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
stage_instance = guild._stage_instances.pop(int(data['id']))
except KeyError:
pass
else:
self.dispatch('stage_instance_delete', stage_instance)
else:
_log.debug('STAGE_INSTANCE_DELETE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_scheduled_event_create(self, data: gw.GuildScheduledEventCreateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
scheduled_event = ScheduledEvent(state=self, data=data)
guild._scheduled_events[scheduled_event.id] = scheduled_event
self.dispatch('scheduled_event_create', scheduled_event)
else:
_log.debug('SCHEDULED_EVENT_CREATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_scheduled_event_update(self, data: gw.GuildScheduledEventUpdateEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
scheduled_event = guild._scheduled_events.get(int(data['id']))
if scheduled_event is not None:
old_scheduled_event = copy.copy(scheduled_event)
scheduled_event._update(data)
self.dispatch('scheduled_event_update', old_scheduled_event, scheduled_event)
else:
_log.debug('SCHEDULED_EVENT_UPDATE referencing unknown scheduled event ID: %s. Discarding.', data['id'])
else:
_log.debug('SCHEDULED_EVENT_UPDATE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_scheduled_event_delete(self, data: gw.GuildScheduledEventDeleteEvent) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
try:
scheduled_event = guild._scheduled_events.pop(int(data['id']))
except KeyError:
pass
else:
self.dispatch('scheduled_event_delete', scheduled_event)
else:
_log.debug('SCHEDULED_EVENT_DELETE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_scheduled_event_user_add(self, data: gw.GuildScheduledEventUserAdd) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
scheduled_event = guild._scheduled_events.get(int(data['guild_scheduled_event_id']))
if scheduled_event is not None:
user = self.get_user(int(data['user_id']))
if user is not None:
scheduled_event._add_user(user)
self.dispatch('scheduled_event_user_add', scheduled_event, user)
else:
_log.debug('SCHEDULED_EVENT_USER_ADD referencing unknown user ID: %s. Discarding.', data['user_id'])
else:
_log.debug(
'SCHEDULED_EVENT_USER_ADD referencing unknown scheduled event ID: %s. Discarding.',
data['guild_scheduled_event_id'],
)
else:
_log.debug('SCHEDULED_EVENT_USER_ADD referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_guild_scheduled_event_user_remove(self, data: gw.GuildScheduledEventUserRemove) -> None:
guild = self._get_guild(int(data['guild_id']))
if guild is not None:
scheduled_event = guild._scheduled_events.get(int(data['guild_scheduled_event_id']))
if scheduled_event is not None:
user = self.get_user(int(data['user_id']))
if user is not None:
scheduled_event._pop_user(user.id)
self.dispatch('scheduled_event_user_remove', scheduled_event, user)
else:
_log.debug('SCHEDULED_EVENT_USER_REMOVE referencing unknown user ID: %s. Discarding.', data['user_id'])
else:
_log.debug(
'SCHEDULED_EVENT_USER_REMOVE referencing unknown scheduled event ID: %s. Discarding.',
data['guild_scheduled_event_id'],
)
else:
_log.debug('SCHEDULED_EVENT_USER_REMOVE referencing unknown guild ID: %s. Discarding.', data['guild_id'])
def parse_voice_state_update(self, data: gw.VoiceStateUpdateEvent) -> None:
guild = self._get_guild(utils._get_as_snowflake(data, 'guild_id'))
channel_id = utils._get_as_snowflake(data, 'channel_id')
flags = self.member_cache_flags
# self.user is *always* cached when this is called
self_id = self.user.id # type: ignore
if guild is not None:
if int(data['user_id']) == self_id:
voice = self._get_voice_client(guild.id)
if voice is not None:
coro = voice.on_voice_state_update(data)
asyncio.create_task(logging_coroutine(coro, info='Voice Protocol voice state update handler'))
member, before, after = guild._update_voice_state(data, channel_id) # type: ignore
if member is not None:
if flags.voice:
if channel_id is None and flags._voice_only and member.id != self_id:
# Only remove from cache if we only have the voice flag enabled
guild._remove_member(member)
elif channel_id is not None:
guild._add_member(member)
self.dispatch('voice_state_update', member, before, after)
else:
_log.debug('VOICE_STATE_UPDATE referencing an unknown member ID: %s. Discarding.', data['user_id'])
def parse_voice_server_update(self, data: gw.VoiceServerUpdateEvent) -> None:
key_id = int(data['guild_id'])
vc = self._get_voice_client(key_id)
if vc is not None:
coro = vc.on_voice_server_update(data)
asyncio.create_task(logging_coroutine(coro, info='Voice Protocol voice server update handler'))
def parse_typing_start(self, data: gw.TypingStartEvent) -> None:
channel, guild = self._get_guild_channel(data)
if channel is not None:
member = None
user_id = int(data['user_id'])
if isinstance(channel, DMChannel):
member = channel.recipient
elif isinstance(channel, (Thread, TextChannel)) and guild is not None:
member = guild.get_member(user_id)
if member is None:
member_data = data.get('member')
if member_data:
member = Member(data=member_data, state=self, guild=guild)
elif isinstance(channel, GroupChannel):
member = utils.find(lambda x: x.id == user_id, channel.recipients)
if member is not None:
timestamp = datetime.datetime.fromtimestamp(data['timestamp'], tz=datetime.timezone.utc)
self.dispatch('typing', channel, member, timestamp)
def _get_reaction_user(self, channel: MessageableChannel, user_id: int) -> Optional[Union[User, Member]]:
if isinstance(channel, TextChannel):
return channel.guild.get_member(user_id)
return self.get_user(user_id)
def get_reaction_emoji(self, data: PartialEmojiPayload) -> Union[Emoji, PartialEmoji, str]:
emoji_id = utils._get_as_snowflake(data, 'id')
if not emoji_id:
# the name key will be a str
return data['name'] # type: ignore
try:
return self._emojis[emoji_id]
except KeyError:
return PartialEmoji.with_state(
self, animated=data.get('animated', False), id=emoji_id, name=data['name'] # type: ignore
)
def _upgrade_partial_emoji(self, emoji: PartialEmoji) -> Union[Emoji, PartialEmoji, str]:
emoji_id = emoji.id
if not emoji_id:
return emoji.name
try:
return self._emojis[emoji_id]
except KeyError:
return emoji
def get_channel(self, id: Optional[int]) -> Optional[Union[Channel, Thread]]:
if id is None:
return None
pm = self._get_private_channel(id)
if pm is not None:
return pm
for guild in self.guilds:
channel = guild._resolve_channel(id)
if channel is not None:
return channel
def create_message(self, *, channel: MessageableChannel, data: MessagePayload) -> Message:
return Message(state=self, channel=channel, data=data)
class AutoShardedConnectionState(ConnectionState):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.shard_ids: Union[List[int], range] = []
def _update_message_references(self) -> None:
# self._messages won't be None when this is called
for msg in self._messages: # type: ignore
if not msg.guild:
continue
new_guild = self._get_guild(msg.guild.id)
if new_guild is not None and new_guild is not msg.guild:
channel_id = msg.channel.id
channel = new_guild._resolve_channel(channel_id) or Object(id=channel_id)
# channel will either be a TextChannel, Thread or Object
msg._rebind_cached_references(new_guild, channel) # type: ignore
async def async_setup(self) -> None:
self.shards_launched: asyncio.Event = asyncio.Event()
async def chunker(
self,
guild_id: int,
query: str = '',
limit: int = 0,
presences: bool = False,
*,
shard_id: Optional[int] = None,
nonce: Optional[str] = None,
) -> None:
ws = self._get_websocket(guild_id, shard_id=shard_id)
await ws.request_chunks(guild_id, query=query, limit=limit, presences=presences, nonce=nonce)
async def _delay_ready(self) -> None:
await self.shards_launched.wait()
processed = []
max_concurrency = len(self.shard_ids) * 2
current_bucket = []
while True:
# this snippet of code is basically waiting N seconds
# until the last GUILD_CREATE was sent
try:
guild = await asyncio.wait_for(self._ready_state.get(), timeout=self.guild_ready_timeout)
except asyncio.TimeoutError:
break
else:
if self._guild_needs_chunking(guild):
_log.debug('Guild ID %d requires chunking, will be done in the background.', guild.id)
if len(current_bucket) >= max_concurrency:
try:
await utils.sane_wait_for(current_bucket, timeout=max_concurrency * 70.0)
except asyncio.TimeoutError:
fmt = 'Shard ID %s failed to wait for chunks from a sub-bucket with length %d'
_log.warning(fmt, guild.shard_id, len(current_bucket))
finally:
current_bucket = []
# Chunk the guild in the background while we wait for GUILD_CREATE streaming
future = asyncio.ensure_future(self.chunk_guild(guild))
current_bucket.append(future)
else:
future = self.loop.create_future()
future.set_result([])
processed.append((guild, future))
guilds = sorted(processed, key=lambda g: g[0].shard_id)
for shard_id, info in itertools.groupby(guilds, key=lambda g: g[0].shard_id):
children, futures = zip(*info)
# 110 reqs/minute w/ 1 req/guild plus some buffer
timeout = 61 * (len(children) / 110)
try:
await utils.sane_wait_for(futures, timeout=timeout)
except asyncio.TimeoutError:
_log.warning(
'Shard ID %s failed to wait for chunks (timeout=%.2f) for %d guilds', shard_id, timeout, len(guilds)
)
for guild in children:
if guild.unavailable is False:
self.dispatch('guild_available', guild)
else:
self.dispatch('guild_join', guild)
self.dispatch('shard_ready', shard_id)
# remove the state
try:
del self._ready_state
except AttributeError:
pass # already been deleted somehow
# regular users cannot shard so we won't worry about it here.
# clear the current task
self._ready_task = None
# dispatch the event
self.call_handlers('ready')
self.dispatch('ready')
def parse_ready(self, data: gw.ReadyEvent) -> None:
if not hasattr(self, '_ready_state'):
self._ready_state = asyncio.Queue()
self.user: Optional[ClientUser]
self.user = user = ClientUser(state=self, data=data['user'])
# self._users is a list of Users, we're setting a ClientUser
self._users[user.id] = user # type: ignore
if self.application_id is None:
try:
application = data['application']
except KeyError:
pass
else:
self.application_id: Optional[int] = utils._get_as_snowflake(application, 'id')
self.application_flags: ApplicationFlags = ApplicationFlags._from_value(application['flags'])
for guild_data in data['guilds']:
self._add_guild_from_data(guild_data) # type: ignore # _add_guild_from_data requires a complete Guild payload
if self._messages:
self._update_message_references()
self.dispatch('connect')
self.dispatch('shard_connect', data['__shard_id__']) # type: ignore # This is an internal discord.py key
if self._ready_task is None:
self._ready_task = asyncio.create_task(self._delay_ready())
def parse_resumed(self, data: gw.ResumedEvent) -> None:
self.dispatch('resumed')
self.dispatch('shard_resumed', data['__shard_id__']) # type: ignore # This is an internal discord.py key
|
{
"content_hash": "775cf1dbf687fb02076f79162a0cd1c4",
"timestamp": "",
"source": "github",
"line_count": 1644,
"max_line_length": 138,
"avg_line_length": 42.02068126520681,
"alnum_prop": 0.6004313714136823,
"repo_name": "Harmon758/discord.py",
"id": "3ec0d93f2f0697674046948b21840f9f5a26aa7a",
"size": "69082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discord/state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "782"
},
{
"name": "Python",
"bytes": "2149050"
}
],
"symlink_target": ""
}
|
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from __future__ import absolute_import, unicode_literals
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'djangosecure.middleware.SecurityMiddleware',
) + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# Static Assests
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
# DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
# default='Robotix <noreply@robotix.in>')
# EMAIL_HOST = env("DJANGO_EMAIL_HOST", default='smtp.sendgrid.com')
# EMAIL_HOST_PASSWORD = env("SENDGRID_PASSWORD")
# EMAIL_HOST_USER = env('SENDGRID_USERNAME')
# EMAIL_PORT = env.int("EMAIL_PORT", default=587)
# EMAIL_SUBJECT_PREFIX = env("EMAIL_SUBJECT_PREFIX", default='[Robotix] ')
# EMAIL_USE_TLS = True
# SERVER_EMAIL = EMAIL_HOST_USER
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
try:
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = {
'default': env.cache_url("DJANGO_CACHE_URL", default="memcache://127.0.0.1:11211"),
}
# Your production stuff: Below this line define 3rd party library settings
|
{
"content_hash": "ac7d80aa036a7c09ec31ab3839a4bc85",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 100,
"avg_line_length": 38.3469387755102,
"alnum_prop": 0.6221394358701436,
"repo_name": "narayanaditya95/Robotix",
"id": "69af85ae295ece9eeac00cd9036d649cc1be1749",
"size": "3782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/production.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "443687"
},
{
"name": "HTML",
"bytes": "30557"
},
{
"name": "JavaScript",
"bytes": "11382"
},
{
"name": "Makefile",
"bytes": "933"
},
{
"name": "Python",
"bytes": "29848"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
import threading
from h2o.utils.typechecks import assert_is_type
def h2ocluster_shutdown():
"""
Python API test: h2o.cluster().shutdown(prompt=False)
"""
try:
bthread = threading.Thread(target=call_badshutdown())
bthread.daemon=True
bthread.start()
bthread.join(1.0)
except Exception as e:
print("*** Error in thread is caught=> ")
print(e) # if we see this warning message, the error is caught correctly
assert_is_type(e, TypeError)
assert "badparam" in e.args[0], "h2o.shutdown() command is not working."
thread = threading.Thread(target=call_shutdown)
thread.daemon =True
thread.start()
thread.join(1.0)
def call_shutdown():
h2o.cluster().shutdown(prompt=True) # call shutdown but do not actually shut anything down.
def call_badshutdown():
h2o.cluster().shutdown(badparam=1, prompt=True) # call shutdown but do not actually shut anything down.
if __name__ == "__main__":
pyunit_utils.standalone_test(h2ocluster_shutdown)
else:
h2ocluster_shutdown()
|
{
"content_hash": "ea5451cfe8fb5ef9cab5a2d34fc260a5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 109,
"avg_line_length": 30.871794871794872,
"alnum_prop": 0.6694352159468439,
"repo_name": "spennihana/h2o-3",
"id": "65caf0b07441f74daadddbd1d4fbe9a875c28032",
"size": "1204",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2ocluster_shutdown.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "257122"
},
{
"name": "CoffeeScript",
"bytes": "273112"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "125187"
},
{
"name": "HTML",
"bytes": "2111506"
},
{
"name": "Java",
"bytes": "9481047"
},
{
"name": "JavaScript",
"bytes": "87944"
},
{
"name": "Jupyter Notebook",
"bytes": "6165027"
},
{
"name": "Makefile",
"bytes": "42233"
},
{
"name": "Python",
"bytes": "4982123"
},
{
"name": "R",
"bytes": "2699289"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "32768"
},
{
"name": "Shell",
"bytes": "179758"
},
{
"name": "TeX",
"bytes": "657375"
}
],
"symlink_target": ""
}
|
from .base import *
from .controller import *
|
{
"content_hash": "0649aecfc22c3329724dcd29442d3fc1",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "altio/foundation",
"id": "a1be7bfe56aed3d3015c982e453bf68c61da8311",
"size": "46",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "foundation/backend/views/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1700"
},
{
"name": "HTML",
"bytes": "60043"
},
{
"name": "JavaScript",
"bytes": "6017"
},
{
"name": "Python",
"bytes": "223531"
}
],
"symlink_target": ""
}
|
import os
from generic import obj
from simulation import Simulation,SimulationInput,SimulationAnalyzer
from debug import *
class Convert4qmcInput(SimulationInput):
def __init__(self,
app_name = 'convert4qmc',
prefix = None,
gamess_ascii = None,
ci = None,
read_initial_guess = None,
natural_orbitals = None,
threshold = None,
zero_ci = False,
add_3body_J = False
):
self.prefix = prefix
self.app_name = app_name
self.gamess_ascii = gamess_ascii
self.ci = ci
self.read_initial_guess = read_initial_guess
self.natural_orbitals = natural_orbitals
self.threshold = threshold
self.zero_ci = zero_ci
self.add_3body_J = add_3body_J
#end def __init__
def set_app_name(self,app_name):
self.app_name = app_name
#end def set_app_name
def app_command(self):
c = self.app_name
if self.prefix!=None:
c += ' -prefix '+self.prefix
#end if
if self.gamess_ascii!=None:
c += ' -gamessAscii '+self.gamess_ascii
#end if
if self.ci!=None:
c += ' -ci '+self.ci
#end if
if self.threshold!=None:
c += ' -threshold '+str(self.threshold)
#end if
if self.zero_ci:
c += ' -zeroCi'
#end if
if self.read_initial_guess!=None:
c += ' -readInitialGuess '+str(self.read_initial_guess)
#end if
if self.natural_orbitals!=None:
c += ' -NaturalOrbitals '+str(self.natural_orbitals)
#end if
if self.add_3body_J:
c += ' -add3BodyJ'
#end if
return c
#end def app_command
def read(self,filepath):
None
#end def read
def write_contents(self):
return self.app_command()
#end def write_contents
def output_files(self):
prefix = 'sample'
if self.prefix!=None:
prefix = self.prefix
#end if
wfn_file = prefix+'.Gaussian-G2.xml'
ptcl_file = prefix+'.Gaussian-G2.ptcl.xml'
return wfn_file,ptcl_file
#end def output_files
#end class Convert4qmcInput
def generate_convert4qmc_input(**kwargs):
return Convert4qmcInput(**kwargs)
#end def generate_convert4qmc_input
class Convert4qmcAnalyzer(SimulationAnalyzer):
def __init__(self,arg0):
if isinstance(arg0,Simulation):
self.infile = arg0.infile
else:
self.infile = arg0
#end if
#end def __init__
def analyze(self):
None
#end def analyze
#end class Convert4qmcAnalyzer
class Convert4qmc(Simulation):
input_type = Convert4qmcInput
analyzer_type = Convert4qmcAnalyzer
generic_identifier = 'convert4qmc'
application = 'convert4qmc'
application_properties = set(['serial'])
application_results = set(['orbitals'])
def set_app_name(self,app_name):
self.app_name = app_name
self.input.set_app_name(app_name)
#end def set_app_name
def propagate_identifier(self):
None
#self.input.prefix = self.identifier
#end def propagate_identifier
def check_result(self,result_name,sim):
calculating_result = False
if result_name=='orbitals':
calculating_result = True
else:
calculating_result = False
self.error('ability to check for result '+result_name+' has not been implemented')
#end if
return calculating_result
#end def check_result
def get_result(self,result_name,sim):
result = obj()
input = self.input
if result_name=='orbitals':
wfn_file,ptcl_file = input.output_files()
result.location = os.path.join(self.locdir,wfn_file)
else:
self.error('ability to get result '+result_name+' has not been implemented')
#end if
return result
#end def get_result
def incorporate_result(self,result_name,result,sim):
if result_name=='orbitals':
self.input.gamess_ascii = os.path.relpath(result.location,self.locdir)
self.job.app_command = self.input.app_command()
else:
self.error('ability to incorporate result '+result_name+' has not been implemented')
#end if
#end def incorporate_result
def check_sim_status(self):
output = open(os.path.join(self.locdir,self.outfile),'r').read()
#errors = open(os.path.join(self.locdir,self.errfile),'r').read()
success = 'QMCGaussianParserBase::dump' in output
for filename in self.input.output_files():
success &= os.path.exists(os.path.join(self.locdir,filename))
#end for
self.failed = not success
self.finished = self.job.finished
#end def check_sim_status
def get_output_files(self):
output_files = []
return output_files
#end def get_output_files
def app_command(self):
return self.input.app_command()
#end def app_command
#end class Convert4qmc
def generate_convert4qmc(**kwargs):
sim_args,inp_args = Simulation.separate_inputs(kwargs)
if not 'input' in sim_args:
sim_args.input = generate_convert4qmc_input(**inp_args)
#end if
convert4qmc = Convert4qmc(**sim_args)
return convert4qmc
#end def generate_convert4qmc
|
{
"content_hash": "57258d89705a8c94c7a24a8f5534156c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 96,
"avg_line_length": 30.130208333333332,
"alnum_prop": 0.5636992221261884,
"repo_name": "habanero-rice/hcpp",
"id": "9fa70383793698fc833064baf04a8f97af58cca2",
"size": "5787",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/performance-regression/full-apps/qmcpack/nexus/library/convert4qmc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "26226"
},
{
"name": "C",
"bytes": "330261"
},
{
"name": "C++",
"bytes": "255831"
},
{
"name": "Cuda",
"bytes": "10347"
},
{
"name": "Makefile",
"bytes": "7838"
},
{
"name": "Perl",
"bytes": "1748"
},
{
"name": "Shell",
"bytes": "16630"
}
],
"symlink_target": ""
}
|
import errno
import os
import re
import sys
from collections import defaultdict
from pathlib import Path
import click
import yaml
from packaging.version import Version
from setuptools.config import read_configuration
START_MARKER = '# BEGIN GENERATED REQUIREMENTS'
END_MARKER = '# END GENERATED REQUIREMENTS'
def _find_plugins():
subdirs = sorted(Path(x) for x in next(os.walk('.'))[1]
if x[0] != '.' and x != '_meta' and os.path.exists(os.path.join(x, 'setup.cfg')))
for subdir in subdirs:
path = subdir / 'setup.cfg'
metadata = read_configuration(path)['metadata']
name = metadata['name']
version = metadata['version']
if name is None or version is None:
click.secho(f'Could not extract name/version from {path}', fg='red', bold=True)
continue
minver = str(Version(version))
yield name, minver
def _get_config():
rv = {'extras': {}, 'skip': []}
try:
f = open('_meta/meta.yaml')
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
else:
with f:
rv.update(yaml.safe_load(f))
return rv
def _update_meta(data):
path = Path('_meta/setup.cfg')
content = path.read_text()
new_content = re.sub(r'(?<={}\n).*(?=\n{})'.format(re.escape(START_MARKER), re.escape(END_MARKER)), data, content,
flags=re.DOTALL)
if content == new_content:
return False
path.write_text(new_content)
return True
@click.command()
@click.argument('nextver')
def cli(nextver):
if not os.path.isdir('_meta/'):
click.secho('Could not find meta package (_meta subdir)', fg='red', bold=True)
sys.exit(1)
nextver = Version(nextver)
if nextver.dev is None:
nextver = Version(str(nextver) + '-dev')
config = _get_config()
plugins_require = []
extras_require = defaultdict(list)
for name, minver in sorted(_find_plugins()):
if name in config['skip']:
continue
pkgspec = f'{name}>={minver},<{nextver}'
if name in config['extras']:
extras_require[config['extras'][name]].append(pkgspec)
else:
plugins_require.append(pkgspec)
output = []
for entry in plugins_require:
output.append(f' {entry}')
if extras_require:
if output:
output.append('')
output.append('[options.extras_require]')
for extra, pkgspecs in sorted(extras_require.items()):
output.append(f'{extra} =')
for pkg in sorted(pkgspecs):
output.append(f' {pkg}')
if _update_meta('\n'.join(output)):
click.secho('Updated meta package', fg='green')
else:
click.secho('Meta package already up to date', fg='yellow')
if __name__ == '__main__':
cli()
|
{
"content_hash": "cfef611d508389e0516ca14da992f2d3",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 118,
"avg_line_length": 28.95959595959596,
"alnum_prop": 0.5866759679107081,
"repo_name": "ThiefMaster/indico-plugins",
"id": "c669e74e39b1f8d30a96e4b6d60cc8eb393671cd",
"size": "3110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "update-meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4662"
},
{
"name": "HTML",
"bytes": "48203"
},
{
"name": "JavaScript",
"bytes": "15189"
},
{
"name": "Python",
"bytes": "307878"
},
{
"name": "Shell",
"bytes": "2172"
}
],
"symlink_target": ""
}
|
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.mllib.common import inherit_doc
__all__ = ['KMeans', 'KMeansModel']
class KMeansModel(JavaModel):
"""
Model fitted by KMeans.
"""
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@inherit_doc
class KMeans(JavaEstimator, HasFeaturesCol, HasPredictionCol, HasMaxIter, HasTol, HasSeed):
"""
K-means clustering with support for multiple parallel runs and a k-means++ like initialization
mode (the k-means|| algorithm by Bahmani et al). When multiple concurrent runs are requested,
they are executed together with joint passes over the data for efficiency.
>>> from pyspark.mllib.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),
... (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]
>>> df = sqlContext.createDataFrame(data, ["features"])
>>> kmeans = KMeans(k=2, seed=1)
>>> model = kmeans.fit(df)
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "prediction")
>>> rows = transformed.collect()
>>> rows[0].prediction == rows[1].prediction
True
>>> rows[2].prediction == rows[3].prediction
True
"""
# a placeholder to make it appear in the generated doc
k = Param(Params._dummy(), "k", "number of clusters to create")
initMode = Param(Params._dummy(), "initMode",
"the initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++")
initSteps = Param(Params._dummy(), "initSteps", "steps for k-means initialization mode")
@keyword_only
def __init__(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
"""
__init__(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
self.k = Param(self, "k", "number of clusters to create")
self.initMode = Param(self, "initMode",
"the initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++")
self.initSteps = Param(self, "initSteps", "steps for k-means initialization mode")
self._setDefault(k=2, initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
def setParams(self, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None):
"""
setParams(self, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=5, tol=1e-4, maxIter=20, seed=None)
Sets params for KMeans.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = KMeans().setK(10)
>>> algo.getK()
10
"""
self._paramMap[self.k] = value
return self
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
>>> algo = KMeans()
>>> algo.getInitMode()
'k-means||'
>>> algo = algo.setInitMode("random")
>>> algo.getInitMode()
'random'
"""
self._paramMap[self.initMode] = value
return self
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
>>> algo = KMeans().setInitSteps(10)
>>> algo.getInitSteps()
10
"""
self._paramMap[self.initSteps] = value
return self
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.clustering tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
|
{
"content_hash": "1a78863ee874db002d66ce1465bf9fd6",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 100,
"avg_line_length": 35.922077922077925,
"alnum_prop": 0.5905639913232104,
"repo_name": "tophua/spark1.52",
"id": "cb4c16e25a7a3100a170de3fa50ba48404c201c4",
"size": "6317",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/clustering.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26914"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "15314"
},
{
"name": "Dockerfile",
"bytes": "4597"
},
{
"name": "HiveQL",
"bytes": "2018996"
},
{
"name": "Java",
"bytes": "1763581"
},
{
"name": "JavaScript",
"bytes": "68648"
},
{
"name": "Makefile",
"bytes": "7771"
},
{
"name": "Python",
"bytes": "1552537"
},
{
"name": "R",
"bytes": "452786"
},
{
"name": "Roff",
"bytes": "23131"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "16031983"
},
{
"name": "Shell",
"bytes": "147300"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "154646"
}
],
"symlink_target": ""
}
|
def tk_processor(request):
PORTAL_URL = request.scheme + '://' + request.get_host()
return {'PORTAL_URL': PORTAL_URL}
|
{
"content_hash": "5e02626570e46b4c5c523f32bec7353e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 60,
"avg_line_length": 42,
"alnum_prop": 0.6428571428571429,
"repo_name": "samitnuk/talks_keeper",
"id": "5432bccea882c5dbcabaab62bb5d5a19d09f0836",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/context_processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34083"
},
{
"name": "HTML",
"bytes": "11872"
},
{
"name": "JavaScript",
"bytes": "76768"
},
{
"name": "Python",
"bytes": "24510"
}
],
"symlink_target": ""
}
|
from pytest import mark
from translate.storage.placeables import base, general, parse, xliff, StringElem
class TestStringElem:
ORIGSTR = u'Ģët <a href="http://www.example.com" alt="Ģët &brand;!">&brandLong;</a>'
def setup_method(self, method):
self.elem = parse(self.ORIGSTR, general.parsers)
def test_parse(self):
assert unicode(self.elem) == self.ORIGSTR
def test_tree(self):
assert len(self.elem.sub) == 4
assert unicode(self.elem.sub[0]) == u'Ģët '
assert unicode(self.elem.sub[1]) == u'<a href="http://www.example.com" alt="Ģët &brand;!">'
assert unicode(self.elem.sub[2]) == u'&brandLong;'
assert unicode(self.elem.sub[3]) == u'</a>'
assert len(self.elem.sub[0].sub) == 1 and self.elem.sub[0].sub[0] == u'Ģët '
assert len(self.elem.sub[1].sub) == 1 and self.elem.sub[1].sub[0] == u'<a href="http://www.example.com" alt="Ģët &brand;!">'
assert len(self.elem.sub[2].sub) == 1 and self.elem.sub[2].sub[0] == u'&brandLong;'
assert len(self.elem.sub[3].sub) == 1 and self.elem.sub[3].sub[0] == u'</a>'
def test_add(self):
assert self.elem + ' ' == self.ORIGSTR + ' '
# ... and __radd__() ... doesn't work
#assert ' ' + self.elem == ' ' + self.ORIGSTR
def test_contains(self):
assert 'href' in self.elem
assert u'hrȩf' not in self.elem
def test_getitem(self):
assert self.elem[0] == u'Ģ'
assert self.elem[2] == 't'
def test_getslice(self):
assert self.elem[0:3] == u'Ģët'
def test_iter(self):
for chunk in self.elem:
assert issubclass(chunk.__class__, StringElem)
def test_len(self):
assert len(self.elem) == len(self.ORIGSTR)
def test_mul(self):
assert self.elem * 2 == self.ORIGSTR * 2
# ... and __rmul__()
assert 2 * self.elem == 2 * self.ORIGSTR
def test_elem_offset(self):
assert self.elem.elem_offset(self.elem.sub[0]) == 0
assert self.elem.elem_offset(self.elem.sub[1]) == 4
def test_elem_at_offset(self):
assert self.elem.elem_at_offset(0) is self.elem.sub[0]
assert self.elem.elem_at_offset(self.elem.find('!')) is self.elem.sub[1]
def test_find(self):
assert self.elem.find('example') == 24
assert self.elem.find(u'example') == 24
searchelem = parse(u'&brand;', general.parsers)
assert self.elem.find(searchelem) == 46
def test_find_elems_with(self):
assert self.elem.find_elems_with(u'Ģët') == [self.elem.sub[0], self.elem.sub[1]]
assert len(self.elem.find_elems_with('a')) == 3
def test_flatten(self):
assert u''.join([unicode(i) for i in self.elem.flatten()]) == self.ORIGSTR
def test_delete_range_case1(self):
# Case 1: Entire string #
elem = self.elem.copy()
deleted, parent, offset = elem.delete_range(0, len(elem))
assert deleted == self.elem
assert parent is None and offset is None
def test_delete_range_case2(self):
# Case 2: An entire element #
elem = self.elem.copy()
offset = elem.elem_offset(elem.sub[2])
deleted, parent, offset = elem.delete_range(offset, offset + len(elem.sub[2]))
assert deleted == self.elem.sub[2]
assert parent is elem
assert offset == len(elem.sub[0]) + len(elem.sub[1])
def test_delete_range_case3(self):
# Case 3: Within a single element #
elem = self.elem.copy()
deleted, parent, offset = elem.delete_range(1, 2)
assert deleted == StringElem(u'ë')
assert parent is elem.sub[0]
assert offset == 1
def test_delete_range_case4(self):
# Case 4: Across multiple elements #
elem = self.elem.copy()
# Delete the last two elements
deleted, parent, offset = elem.delete_range(elem.elem_offset(elem.sub[2]), len(elem))
assert deleted == self.elem
assert parent is None
assert offset is None
assert len(elem.sub) == 2
assert unicode(elem) == u'Ģët <a href="http://www.example.com" alt="Ģët &brand;!">'
# A separate test case where the delete range include elements between
# the start- and end elements.
origelem = parse(u'foo %s bar', general.parsers)
elem = origelem.copy()
assert len(elem.sub) == 3
deleted, parent, offset = elem.delete_range(3, 7)
assert deleted == origelem
assert parent is None
assert offset is None
assert unicode(elem) == 'foobar'
def test_insert(self):
# Test inserting at the beginning
elem = self.elem.copy()
elem.insert(0, u'xxx')
assert unicode(elem.sub[0]) == u'xxx' + unicode(self.elem.sub[0])
# Test inserting at the end
elem = self.elem.copy()
elem.insert(len(elem) + 1, u'xxx')
assert elem.flatten()[-1] == StringElem(u'xxx')
# Test inserting in the middle of an existing string
elem = self.elem.copy()
elem.insert(2, u'xxx')
assert unicode(elem.sub[0]) == u'Ģëxxxt '
# Test inserting between elements
elem = self.elem.copy()
elem.insert(56, u'xxx')
assert unicode(elem)[56:59] == u'xxx'
def test_isleaf(self):
for child in self.elem.sub:
assert child.isleaf()
def test_prune(self):
elem = StringElem(u'foo')
child = StringElem(u'bar')
elem.sub.append(child)
elem.prune()
assert elem == StringElem(u'foobar')
class TestConverters:
def setup_method(self, method):
self.elem = parse(TestStringElem.ORIGSTR, general.parsers)
def test_to_base_placeables(self):
basetree = base.to_base_placeables(self.elem)
# The following asserts say that, even though tree and newtree represent the same string
# (the unicode() results are the same), they are composed of different classes (and so
# their repr()s are different
assert unicode(self.elem) == unicode(basetree)
assert repr(self.elem) != repr(basetree)
@mark.xfail(reason="Test needs fixing, disabled for now")
def test_to_general_placeables(self):
basetree = base.to_base_placeables(self.elem)
gentree = general.to_general_placeables(basetree)
assert gentree == self.elem
@mark.xfail(reason="Test needs fixing, disabled for now")
def test_to_xliff_placeables(self):
basetree = base.to_base_placeables(self.elem)
xliff_from_base = xliff.to_xliff_placeables(basetree)
assert unicode(xliff_from_base) != unicode(self.elem)
assert repr(xliff_from_base) != repr(self.elem)
xliff_from_gen = xliff.to_xliff_placeables(self.elem)
assert unicode(xliff_from_gen) != unicode(self.elem)
assert repr(xliff_from_gen) != repr(self.elem)
assert unicode(xliff_from_base) == unicode(xliff_from_gen)
assert repr(xliff_from_base) == repr(xliff_from_gen)
if __name__ == '__main__':
for test in [TestStringElem(), TestConverters()]:
for method in dir(test):
if method.startswith('test_') and callable(getattr(test, method)):
getattr(test, method)()
test.elem.print_tree()
|
{
"content_hash": "a2ebfda22fbb114ba5457cb8493c26c8",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 132,
"avg_line_length": 37.42564102564103,
"alnum_prop": 0.6042751438750342,
"repo_name": "rlr/fjord",
"id": "ab7f9cb2c997e0e9ae8c584c18381d9c64558ac8",
"size": "8097",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/storage/placeables/test_base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "159723"
},
{
"name": "HTML",
"bytes": "133287"
},
{
"name": "JavaScript",
"bytes": "304461"
},
{
"name": "Python",
"bytes": "823931"
},
{
"name": "Shell",
"bytes": "11741"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListApplications(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListApplications Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListApplications, self).__init__(temboo_session, '/Library/Twilio/Applications/ListApplications')
def new_input_set(self):
return ListApplicationsInputSet()
def _make_result_set(self, result, path):
return ListApplicationsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListApplicationsChoreographyExecution(session, exec_id, path)
class ListApplicationsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListApplications
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(ListApplicationsInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(ListApplicationsInputSet, self)._set_input('AuthToken', value)
def set_FriendlyName(self, value):
"""
Set the value of the FriendlyName input for this Choreo. ((optional, string) Only return applications with friendly names that exactly match this name.)
"""
super(ListApplicationsInputSet, self)._set_input('FriendlyName', value)
def set_PageSize(self, value):
"""
Set the value of the PageSize input for this Choreo. ((optional, integer) The number of results per page.)
"""
super(ListApplicationsInputSet, self)._set_input('PageSize', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page of results to retrieve. Defaults to 0.)
"""
super(ListApplicationsInputSet, self)._set_input('Page', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(ListApplicationsInputSet, self)._set_input('ResponseFormat', value)
class ListApplicationsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListApplications Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class ListApplicationsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListApplicationsResultSet(response, path)
|
{
"content_hash": "9bb4dbbb420c5e051e1a6ac693711e0c",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 176,
"avg_line_length": 43.80246913580247,
"alnum_prop": 0.6964487034949267,
"repo_name": "jordanemedlock/psychtruths",
"id": "5a63e46d8172e5f9743d34230dee09e4f7c86518",
"size": "4437",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/Library/Twilio/Applications/ListApplications.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
import sys
from oslo.config import cfg
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.common import constants as q_const
from neutron.common import exceptions as q_exc
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.common import utils
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import dhcp_rpc_base
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_rpc_base
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.openstack.common.rpc import proxy
from neutron.plugins.common import constants as svc_constants
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.openvswitch.common import config # noqa
from neutron.plugins.openvswitch.common import constants
from neutron.plugins.openvswitch import ovs_db_v2
LOG = logging.getLogger(__name__)
class OVSRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
l3_rpc_base.L3RpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
RPC_API_VERSION = '1.1'
def __init__(self, notifier, tunnel_type):
self.notifier = notifier
self.tunnel_type = tunnel_type
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def get_port_from_device(cls, device):
port = ovs_db_v2.get_port_from_device(device)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested from %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port = ovs_db_v2.get_port(device)
if port:
binding = ovs_db_v2.get_network_binding(None, port['network_id'])
entry = {'device': device,
'network_id': port['network_id'],
'port_id': port['id'],
'admin_state_up': port['admin_state_up'],
'network_type': binding.network_type,
'segmentation_id': binding.segmentation_id,
'physical_network': binding.physical_network}
new_status = (q_const.PORT_STATUS_ACTIVE if port['admin_state_up']
else q_const.PORT_STATUS_DOWN)
if port['status'] != new_status:
ovs_db_v2.set_port_status(port['id'], new_status)
else:
entry = {'device': device}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s no longer exists on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
if port:
entry = {'device': device,
'exists': True}
plugin = manager.NeutronManager.get_plugin()
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
elif port['status'] != q_const.PORT_STATUS_DOWN:
# Set port status to DOWN
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_DOWN)
else:
entry = {'device': device,
'exists': False}
LOG.debug(_("%s can not be found in database"), device)
return entry
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
port = ovs_db_v2.get_port(device)
LOG.debug(_("Device %(device)s up on %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
if port:
if (host and
not plugin.get_port_host(rpc_context, port['id']) == host):
LOG.debug(_("Device %(device)s not bound to the"
" agent host %(host)s"),
{'device': device, 'host': host})
return
elif port['status'] != q_const.PORT_STATUS_ACTIVE:
ovs_db_v2.set_port_status(port['id'],
q_const.PORT_STATUS_ACTIVE)
else:
LOG.debug(_("%s can not be found in database"), device)
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the datbase with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
# Update the database with the IP
tunnel = ovs_db_v2.add_tunnel_endpoint(tunnel_ip)
tunnels = ovs_db_v2.get_tunnel_endpoints()
entry = dict()
entry['tunnels'] = tunnels
# Notify all other listening agents
self.notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel.id, self.tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
'''Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
self.topic_tunnel_update = topics.get_topic_name(topic,
constants.TUNNEL,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
def tunnel_update(self, context, tunnel_ip, tunnel_id, tunnel_type):
self.fanout_cast(context,
self.make_msg('tunnel_update',
tunnel_ip=tunnel_ip,
tunnel_id=tunnel_id,
tunnel_type=tunnel_type),
topic=self.topic_tunnel_update)
class OVSNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
addr_pair_db.AllowedAddressPairsMixin):
"""Implement the Neutron abstractions using Open vSwitch.
Depending on whether tunneling is enabled, either a GRE, VXLAN tunnel or
a new VLAN is created for each network. An agent is relied upon to
perform the actual OVS configuration on each host.
The provider extension is also supported. As discussed in
https://bugs.launchpad.net/neutron/+bug/1023156, this class could
be simplified, and filtering on extended attributes could be
handled, by adding support for extended attributes to the
NeutronDbPluginV2 base class. When that occurs, this class should
be updated to take advantage of it.
The port binding extension enables an external application relay
information to and from the plugin.
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
_supported_extension_aliases = ["provider", "external-net", "router",
"ext-gw-mode", "binding", "quotas",
"security-group", "agent", "extraroute",
"l3_agent_scheduler",
"dhcp_agent_scheduler",
"extra_dhcp_opt",
"allowed-address-pairs"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
sg_rpc.disable_security_group_extension_if_noop_driver(aliases)
self._aliases = aliases
return self._aliases
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_provider_ovs'])
def __init__(self, configfile=None):
self.base_binding_dict = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS,
portbindings.CAPABILITIES: {
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
ovs_db_v2.initialize()
self._parse_network_vlan_ranges()
ovs_db_v2.sync_vlan_allocations(self.network_vlan_ranges)
self.tenant_network_type = cfg.CONF.OVS.tenant_network_type
if self.tenant_network_type not in [svc_constants.TYPE_LOCAL,
svc_constants.TYPE_VLAN,
svc_constants.TYPE_GRE,
svc_constants.TYPE_VXLAN,
svc_constants.TYPE_NONE]:
LOG.error(_("Invalid tenant_network_type: %s. "
"Server terminated!"),
self.tenant_network_type)
sys.exit(1)
self.enable_tunneling = cfg.CONF.OVS.enable_tunneling
self.tunnel_type = None
if self.enable_tunneling:
self.tunnel_type = (cfg.CONF.OVS.tunnel_type or
svc_constants.TYPE_GRE)
elif cfg.CONF.OVS.tunnel_type:
self.tunnel_type = cfg.CONF.OVS.tunnel_type
self.enable_tunneling = True
self.tunnel_id_ranges = []
if self.enable_tunneling:
self._parse_tunnel_id_ranges()
ovs_db_v2.sync_tunnel_allocations(self.tunnel_id_ranges)
elif self.tenant_network_type in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_("Tunneling disabled but tenant_network_type is '%s'. "
"Server terminated!"), self.tenant_network_type)
sys.exit(1)
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver
)
def setup_rpc(self):
# RPC support
self.service_topics = {svc_constants.CORE: topics.PLUGIN,
svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN}
self.conn = rpc.create_connection(new=True)
self.notifier = AgentNotifierApi(topics.AGENT)
self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
self.agent_notifiers[q_const.AGENT_TYPE_L3] = (
l3_rpc_agent_api.L3AgentNotify
)
self.callbacks = OVSRpcCallbacks(self.notifier, self.tunnel_type)
self.dispatcher = self.callbacks.create_rpc_dispatcher()
for svc_topic in self.service_topics.values():
self.conn.create_consumer(svc_topic, self.dispatcher, fanout=False)
# Consume from all consumers in a thread
self.conn.consume_in_thread()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.OVS.network_vlan_ranges)
except Exception as ex:
LOG.error(_("%s. Server terminated!"), ex)
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _parse_tunnel_id_ranges(self):
for entry in cfg.CONF.OVS.tunnel_id_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
self.tunnel_id_ranges.append((int(tun_min), int(tun_max)))
except ValueError as ex:
LOG.error(_("Invalid tunnel ID range: "
"'%(range)s' - %(e)s. Server terminated!"),
{'range': entry, 'e': ex})
sys.exit(1)
LOG.info(_("Tunnel ID ranges: %s"), self.tunnel_id_ranges)
def _extend_network_dict_provider_ovs(self, network, net_db,
net_binding=None):
# this method used in two cases: when binding is provided explicitly
# and when it is a part of db model object
binding = net_db.binding if net_db else net_binding
network[provider.NETWORK_TYPE] = binding.network_type
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_FLAT:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = None
elif binding.network_type == svc_constants.TYPE_VLAN:
network[provider.PHYSICAL_NETWORK] = binding.physical_network
network[provider.SEGMENTATION_ID] = binding.segmentation_id
elif binding.network_type == svc_constants.TYPE_LOCAL:
network[provider.PHYSICAL_NETWORK] = None
network[provider.SEGMENTATION_ID] = None
def _process_provider_create(self, context, attrs):
network_type = attrs.get(provider.NETWORK_TYPE)
physical_network = attrs.get(provider.PHYSICAL_NETWORK)
segmentation_id = attrs.get(provider.SEGMENTATION_ID)
network_type_set = attributes.is_attr_set(network_type)
physical_network_set = attributes.is_attr_set(physical_network)
segmentation_id_set = attributes.is_attr_set(segmentation_id)
if not (network_type_set or physical_network_set or
segmentation_id_set):
return (None, None, None)
if not network_type_set:
msg = _("provider:network_type required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_FLAT:
if segmentation_id_set:
msg = _("provider:segmentation_id specified for flat network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = constants.FLAT_VLAN_ID
elif network_type == svc_constants.TYPE_VLAN:
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("provider:segmentation_id out of range "
"(%(min_id)s through %(max_id)s)") %
{'min_id': q_const.MIN_VLAN_TAG,
'max_id': q_const.MAX_VLAN_TAG})
raise q_exc.InvalidInput(error_message=msg)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
if not self.enable_tunneling:
msg = _("%s networks are not enabled") % network_type
raise q_exc.InvalidInput(error_message=msg)
if physical_network_set:
msg = _("provider:physical_network specified for %s "
"network") % network_type
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if not segmentation_id_set:
msg = _("provider:segmentation_id required")
raise q_exc.InvalidInput(error_message=msg)
elif network_type == svc_constants.TYPE_LOCAL:
if physical_network_set:
msg = _("provider:physical_network specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
physical_network = None
if segmentation_id_set:
msg = _("provider:segmentation_id specified for local "
"network")
raise q_exc.InvalidInput(error_message=msg)
else:
segmentation_id = None
else:
msg = _("provider:network_type %s not supported") % network_type
raise q_exc.InvalidInput(error_message=msg)
if network_type in [svc_constants.TYPE_VLAN, svc_constants.TYPE_FLAT]:
if physical_network_set:
if physical_network not in self.network_vlan_ranges:
msg = _("Unknown provider:physical_network "
"%s") % physical_network
raise q_exc.InvalidInput(error_message=msg)
elif 'default' in self.network_vlan_ranges:
physical_network = 'default'
else:
msg = _("provider:physical_network required")
raise q_exc.InvalidInput(error_message=msg)
return (network_type, physical_network, segmentation_id)
def create_network(self, context, network):
(network_type, physical_network,
segmentation_id) = self._process_provider_create(context,
network['network'])
session = context.session
#set up default security groups
tenant_id = self._get_tenant_id_for_create(
context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with session.begin(subtransactions=True):
if not network_type:
# tenant network
network_type = self.tenant_network_type
if network_type == svc_constants.TYPE_NONE:
raise q_exc.TenantNetworksDisabled()
elif network_type == svc_constants.TYPE_VLAN:
(physical_network,
segmentation_id) = ovs_db_v2.reserve_vlan(session)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
segmentation_id = ovs_db_v2.reserve_tunnel(session)
# no reservation needed for TYPE_LOCAL
else:
# provider network
if network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.reserve_specific_vlan(session, physical_network,
segmentation_id)
elif network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.reserve_specific_tunnel(session, segmentation_id)
# no reservation needed for TYPE_LOCAL
net = super(OVSNeutronPluginV2, self).create_network(context,
network)
binding = ovs_db_v2.add_network_binding(session, net['id'],
network_type,
physical_network,
segmentation_id)
self._process_l3_create(context, net, network['network'])
# passing None as db model to use binding object
self._extend_network_dict_provider_ovs(net, None, binding)
# note - exception will rollback entire transaction
LOG.debug(_("Created network: %s"), net['id'])
return net
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).update_network(context, id,
network)
self._process_l3_update(context, net, network['network'])
return net
def delete_network(self, context, id):
session = context.session
with session.begin(subtransactions=True):
binding = ovs_db_v2.get_network_binding(session, id)
super(OVSNeutronPluginV2, self).delete_network(context, id)
if binding.network_type in constants.TUNNEL_NETWORK_TYPES:
ovs_db_v2.release_tunnel(session, binding.segmentation_id,
self.tunnel_id_ranges)
elif binding.network_type in [svc_constants.TYPE_VLAN,
svc_constants.TYPE_FLAT]:
ovs_db_v2.release_vlan(session, binding.physical_network,
binding.segmentation_id,
self.network_vlan_ranges)
# the network_binding record is deleted via cascade from
# the network record, so explicit removal is not necessary
self.notifier.network_delete(context, id)
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
net = super(OVSNeutronPluginV2, self).get_network(context,
id, None)
return self._fields(net, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None,
limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(OVSNeutronPluginV2,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
return [self._fields(net, fields) for net in nets]
def create_port(self, context, port):
# Set port status as 'DOWN'. This will be updated by agent
port['port']['status'] = q_const.PORT_STATUS_DOWN
port_data = port['port']
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
port = super(OVSNeutronPluginV2, self).create_port(context, port)
self._process_portbindings_create_and_update(context,
port_data, port)
self._process_port_create_security_group(context, port, sgids)
self._process_port_create_extra_dhcp_opts(context, port,
dhcp_opts)
port[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, port,
port_data.get(addr_pair.ADDRESS_PAIRS)))
self.notify_security_groups_member_updated(context, port)
return port
def update_port(self, context, id, port):
session = context.session
need_port_update_notify = False
changed_fixed_ips = 'fixed_ips' in port['port']
with session.begin(subtransactions=True):
original_port = super(OVSNeutronPluginV2, self).get_port(
context, id)
updated_port = super(OVSNeutronPluginV2, self).update_port(
context, id, port)
if self.is_address_pairs_attribute_updated(original_port, port):
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, updated_port,
port['port'][addr_pair.ADDRESS_PAIRS])
need_port_update_notify = True
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(
context, updated_port)
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
self._process_portbindings_create_and_update(context,
port['port'],
updated_port)
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
binding = ovs_db_v2.get_network_binding(None,
updated_port['network_id'])
self.notifier.port_update(context, updated_port,
binding.network_type,
binding.segmentation_id,
binding.physical_network)
return updated_port
def delete_port(self, context, id, l3_port_check=True):
# if needed, check to see if this is a port owned by
# and l3-router. If so, we should prevent deletion.
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
session = context.session
with session.begin(subtransactions=True):
self.disassociate_floatingips(context, id)
port = self.get_port(context, id)
self._delete_port_security_group_bindings(context, id)
super(OVSNeutronPluginV2, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
|
{
"content_hash": "bafaa9e6a1b51479562a545ba2ebb4ba",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 79,
"avg_line_length": 47.135702746365105,
"alnum_prop": 0.5653425643486307,
"repo_name": "oeeagle/quantum",
"id": "450ce3dd241230bd2806a7b65c74fc4a754447ea",
"size": "30191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/plugins/openvswitch/ovs_neutron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1497"
},
{
"name": "Python",
"bytes": "7346644"
},
{
"name": "Shell",
"bytes": "8983"
}
],
"symlink_target": ""
}
|
ENTRY_ID_PREFIX = 'sss_'
# The below asset type specific strings are appended to the standard
# ENTRY_ID_PREFIX when generating Sisense Entry IDs.
ENTRY_ID_PART_DASHBOARD = 'db_'
ENTRY_ID_PART_FOLDER = 'fd_'
ENTRY_ID_PART_WIDGET = 'wg_'
# Name of the column used to store fields metadata in Data Catalog entries.
ENTRY_COLUMN_FIELDS = 'fields'
# Name of the column used to store metadata for nested JAQL Query filters in
# Data Catalog entries.
ENTRY_COLUMN_FILTER_BY = 'filterBy'
# Name of the column used to store filters metadata in Data Catalog entries.
ENTRY_COLUMN_FILTERS = 'filters'
# Name of the column used to store formula metadata in Data Catalog entries.
ENTRY_COLUMN_FORMULA = 'formula'
# The Sisense type for Dashboard assets.
SISENSE_ASSET_TYPE_DASHBOARD = 'dashboard'
# The Sisense type for Folder assets.
SISENSE_ASSET_TYPE_FOLDER = 'folder'
# The ID of the Tag Template created to store additional metadata for
# Dashboard-related Entries.
TAG_TEMPLATE_ID_DASHBOARD = 'sisense_dashboard_metadata'
# The ID of the Tag Template created to store additional metadata for
# Folder-related Entries.
TAG_TEMPLATE_ID_FOLDER = 'sisense_folder_metadata'
# The ID of the Tag Template created to store lineage metadata for
# JAQL-dependent entities.
TAG_TEMPLATE_ID_JAQL = 'sisense_jaql_metadata'
# The ID of the Tag Template created to store additional metadata for
# Widget-related Entries.
TAG_TEMPLATE_ID_WIDGET = 'sisense_widget_metadata'
# The user specified type of Dashboard-related Entries.
USER_SPECIFIED_TYPE_DASHBOARD = 'Dashboard'
# The user specified type of Folder-related Entries.
USER_SPECIFIED_TYPE_FOLDER = 'Folder'
# The user specified type of Widget-related Entries.
USER_SPECIFIED_TYPE_WIDGET = 'Widget'
# Name of the field used by Sisense Dashboards to store their filters.
DASHBOARD_FILTERS_FIELD_NAME = 'filters'
# Name of the field used by Sisense JAQL Queries to store their formula
# contexts.
JAQL_CONTEXT_FIELD_NAME = 'context'
# Name of the field used by Sisense JAQL Queries to store their filters.
JAQL_FILTER_FIELD_NAME = 'filter'
# Name of the field used by Sisense JAQL Query filters to store their own JAQL
# Queries.
JAQL_FILTER_BY_FIELD_NAME = 'by'
# Name of the field used by Sisense JAQL Queries to store their formulas.
JAQL_FORMULA_FIELD_NAME = 'formula'
# Name of the panel used by Sisense Widgets to store their filters.
WIDGET_FILTERS_PANEL_NAME = 'filters'
|
{
"content_hash": "7b8ad12acf05f06b72de48e9b5586586",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 43.142857142857146,
"alnum_prop": 0.7731788079470199,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors-bi",
"id": "01a9639de487d3c715e3c1d29a7dfeeaba28959f",
"size": "3065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-sisense-connector/src/google/datacatalog_connectors/sisense/prepare/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3191"
},
{
"name": "Python",
"bytes": "980579"
},
{
"name": "Shell",
"bytes": "9469"
}
],
"symlink_target": ""
}
|
"""
.. module:: Leader
Leader
*************
:Description: Leader Algorithm Clustering
:Authors: bejar
:Version:
:Created on: 07/07/2014 8:29
"""
__author__ = 'bejar'
import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.metrics.pairwise import euclidean_distances
class Leader(BaseEstimator, ClusterMixin, TransformerMixin):
"""Leader Algorithm Clustering
Partition of a dataset using the leader algorithm
Parameters:
radius: float
Clustering radius for asigning examples to a cluster
"""
def __init__(self, radius):
self.radius = radius
self.cluster_centers_ = None
self.labels_ = None
self.cluster_sizes_ = None
def num_clusters(self):
return self.cluster_centers_.shape[0]
def fit(self, X):
"""
Clusters the examples
:param X:
:return:
"""
self.cluster_centers_, self.labels_, self.cluster_sizes_ = self._fit_process(X)
return self
def predict(self, X):
"""
Returns the nearest cluster for a data matrix
@param X:
@return:
"""
clasif = []
for i in range(X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), self.cluster_centers_)
if mdist <= self.radius:
clasif.append(ncl)
else:
clasif.append(-1)
return clasif
def _fit_process(self, X):
"""
Clusters incrementally the examples
:param X:
:return:
"""
assignments = []
scenters = np.zeros((1, X.shape[1]))
centers = np.zeros((1, X.shape[1]))
# Initialize with the first example
scenters[0] = X[0]
centers[0] = X[0]
assignments.append([0])
csizes = np.array([1])
# Cluster the rest of examples
for i in range(1, X.shape[0]):
ncl, mdist = self._find_nearest_cluster(X[i].reshape(1, -1), centers)
# if distance is less than radius, introduce example in nearest class
if mdist <= self.radius:
scenters[ncl] += X[i]
csizes[ncl] += 1
centers[ncl] = scenters[ncl] / csizes[ncl]
assignments[ncl].append(i)
else: # Create a new cluster
scenters = np.append(scenters, np.array([X[i]]), 0)
centers = np.append(centers, np.array([X[i]]), 0)
csizes = np.append(csizes, [1], 0)
assignments.append([i])
labels = np.zeros(X.shape[0])
for l, ej in enumerate(assignments):
for e in ej:
labels[e] = l
return centers, labels, csizes
@staticmethod
def _find_nearest_cluster(examp, centers):
"""
Finds the nearest cluster for an example
:param examp:
:param centers:
:return:
"""
dist = euclidean_distances(centers, examp)
pmin = np.argmin(dist)
vmin = np.min(dist)
return pmin, vmin
if __name__ == '__main__':
from sklearn.datasets import make_blobs, load_iris, make_circles
X, y_data = make_circles(n_samples=1000, noise=0.5, random_state=4, factor=0.5)
ld = Leader(radius=.01)
ld.fit(X)
print(ld.predict(np.array([[0, 0]])))
|
{
"content_hash": "4698739399a008a9e5cf1f1f15a814a2",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 95,
"avg_line_length": 25.417910447761194,
"alnum_prop": 0.5504991192014093,
"repo_name": "bejar/kemlglearn",
"id": "149d69e54a8da7297b52cd9950cd9fd875b88a98",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kemlglearn/cluster/Leader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126378"
}
],
"symlink_target": ""
}
|
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import threading
import warnings
import six
from google.protobuf.internal import api_implementation
_USE_C_DESCRIPTORS = False
if api_implementation.Type() == 'cpp':
# Used by MakeDescriptor in cpp mode
import binascii
import os
from google.protobuf.pyext import _message
_USE_C_DESCRIPTORS = True
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
if _USE_C_DESCRIPTORS:
# This metaclass allows to override the behavior of code like
# isinstance(my_descriptor, FieldDescriptor)
# and make it return True when the descriptor is an instance of the extension
# type written in C++.
class DescriptorMetaclass(type):
def __instancecheck__(cls, obj):
if super(DescriptorMetaclass, cls).__instancecheck__(obj):
return True
if isinstance(obj, cls._C_DESCRIPTOR_CLASS):
return True
return False
else:
# The standard metaclass; nothing changes.
DescriptorMetaclass = type
class _Lock(object):
"""Wrapper class of threading.Lock(), which is allowed by 'with'."""
def __new__(cls):
self = object.__new__(cls)
self._lock = threading.Lock() # pylint: disable=protected-access
return self
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_tb):
self._lock.release()
_lock = threading.Lock()
def _Deprecated(name):
if _Deprecated.count > 0:
_Deprecated.count -= 1
warnings.warn(
'Call to deprecated create function %s(). Note: Create unlinked '
'descriptors is going to go away. Please use get/find descriptors from '
'generated code or query the descriptor_pool.'
% name,
category=DeprecationWarning, stacklevel=3)
# Deprecated warnings will print 100 times at most which should be enough for
# users to notice and do not cause timeout.
_Deprecated.count = 100
_internal_create_key = object()
class DescriptorBase(six.with_metaclass(DescriptorMetaclass)):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionality.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
if _USE_C_DESCRIPTORS:
# The class, or tuple of classes, that are considered as "virtual
# subclasses" of this descriptor class.
_C_DESCRIPTOR_CLASS = ()
def __init__(self, options, serialized_options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
self._serialized_options = serialized_options
# Does this descriptor have non-default options?
self.has_options = (options is not None) or (serialized_options is not None)
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2,
self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
with _lock:
if self._serialized_options is None:
self._options = options_class()
else:
self._options = _ParseOptions(options_class(),
self._serialized_options)
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None, serialized_options=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name (str): The class name of the above options.
name (str): Name of this protocol message type.
full_name (str): Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file (FileDescriptor): Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_options: Protocol message serialized options or None.
"""
super(_NestedDescriptorBase, self).__init__(
options, serialized_options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldn't be serialized, due to to few constructor
arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
Attributes:
name (str): Name of this protocol message type.
full_name (str): Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type (Descriptor): Reference to the descriptor of the type
containing us, or None if this is top-level.
fields (list[FieldDescriptor]): Field descriptors for all fields in
this type.
fields_by_number (dict(int, FieldDescriptor)): Same
:class:`FieldDescriptor` objects as in :attr:`fields`, but indexed
by "number" attribute in each FieldDescriptor.
fields_by_name (dict(str, FieldDescriptor)): Same
:class:`FieldDescriptor` objects as in :attr:`fields`, but indexed by
"name" attribute in each :class:`FieldDescriptor`.
nested_types (list[Descriptor]): Descriptor references
for all protocol message types nested within this one.
nested_types_by_name (dict(str, Descriptor)): Same Descriptor
objects as in :attr:`nested_types`, but indexed by "name" attribute
in each Descriptor.
enum_types (list[EnumDescriptor]): :class:`EnumDescriptor` references
for all enums contained within this type.
enum_types_by_name (dict(str, EnumDescriptor)): Same
:class:`EnumDescriptor` objects as in :attr:`enum_types`, but
indexed by "name" attribute in each EnumDescriptor.
enum_values_by_name (dict(str, EnumValueDescriptor)): Dict mapping
from enum value name to :class:`EnumValueDescriptor` for that value.
extensions (list[FieldDescriptor]): All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name (dict(str, FieldDescriptor)): Same FieldDescriptor
objects as :attr:`extensions`, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable (bool): Does this type define any extension ranges?
oneofs (list[OneofDescriptor]): The list of descriptors for oneof fields
in this message.
oneofs_by_name (dict(str, OneofDescriptor)): Same objects as in
:attr:`oneofs`, but indexed by "name" attribute.
file (FileDescriptor): Reference to file descriptor.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.Descriptor
def __new__(cls, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
serialized_options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin
syntax=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindMessageTypeByName(full_name)
# NOTE(tmarek): The file argument redefining a builtin is nothing we can
# fix right now since we don't know how many clients already rely on the
# name of the argument.
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
serialized_options=None,
is_extendable=True, extension_ranges=None, oneofs=None,
file=None, serialized_start=None, serialized_end=None, # pylint: disable=redefined-builtin
syntax=None, create_key=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
if create_key is not _internal_create_key:
_Deprecated('Descriptor')
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self._fields_by_camelcase_name = None
self.nested_types = nested_types
for nested_type in nested_types:
nested_type.containing_type = self
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self.oneofs = oneofs if oneofs is not None else []
self.oneofs_by_name = dict((o.name, o) for o in self.oneofs)
for oneof in self.oneofs:
oneof.containing_type = self
self.syntax = syntax or "proto2"
@property
def fields_by_camelcase_name(self):
"""Same FieldDescriptor objects as in :attr:`fields`, but indexed by
:attr:`FieldDescriptor.camelcase_name`.
"""
if self._fields_by_camelcase_name is None:
self._fields_by_camelcase_name = dict(
(f.camelcase_name, f) for f in self.fields)
return self._fields_by_camelcase_name
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overridden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
Attributes:
name (str): Name of this field, exactly as it appears in .proto.
full_name (str): Name of this field, including containing scope. This is
particularly relevant for extensions.
index (int): Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number (int): Tag number declared for this field in the .proto file.
type (int): (One of the TYPE_* constants below) Declared type.
cpp_type (int): (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label (int): (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value (bool): True if this field has a default value defined,
otherwise false.
default_value (Varies): Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type (Descriptor): Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type (Descriptor): If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type (EnumDescriptor): If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope (Descriptor): Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options (descriptor_pb2.FieldOptions): Protocol message field options or
None to use default field options.
containing_oneof (OneofDescriptor): If the field is a member of a oneof
union, contains its descriptor. Otherwise, None.
file (FileDescriptor): Reference to file descriptor.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Must be consistent with C++ constants kMaxNumber, kFirstReservedNumber,
# and kLastReservedNumber in descriptor.h
MAX_FIELD_NUMBER = (1 << 29) - 1
FIRST_RESERVED_FIELD_NUMBER = 19000
LAST_RESERVED_FIELD_NUMBER = 19999
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FieldDescriptor
def __new__(cls, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None, create_key=None): # pylint: disable=redefined-builtin
_message.Message._CheckCalledFromGeneratedFile()
if is_extension:
return _message.default_pool.FindExtensionByName(full_name)
else:
return _message.default_pool.FindFieldByName(full_name)
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
serialized_options=None,
has_default_value=True, containing_oneof=None, json_name=None,
file=None, create_key=None): # pylint: disable=redefined-builtin
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
if create_key is not _internal_create_key:
_Deprecated('FieldDescriptor')
super(FieldDescriptor, self).__init__(
options, serialized_options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.file = file
self._camelcase_name = None
if json_name is None:
self.json_name = _ToJsonName(name)
else:
self.json_name = json_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
self.containing_oneof = containing_oneof
if api_implementation.Type() == 'cpp':
if is_extension:
self._cdescriptor = _message.default_pool.FindExtensionByName(full_name)
else:
self._cdescriptor = _message.default_pool.FindFieldByName(full_name)
else:
self._cdescriptor = None
@property
def camelcase_name(self):
"""Camelcase name of this field.
Returns:
str: the name in CamelCase.
"""
if self._camelcase_name is None:
self._camelcase_name = _ToCamelCase(self.name)
return self._camelcase_name
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
int: descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
Attributes:
name (str): Name of the enum type.
full_name (str): Full name of the type, including package name
and any enclosing type(s).
values (list[EnumValueDescriptors]): List of the values
in this enum.
values_by_name (dict(str, EnumValueDescriptor)): Same as :attr:`values`,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number (dict(int, EnumValueDescriptor)): Same as :attr:`values`,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type (Descriptor): Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file (FileDescriptor): Reference to file descriptor.
options (descriptor_pb2.EnumOptions): Enum options message or
None to use default enum options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumDescriptor
def __new__(cls, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindEnumTypeByName(full_name)
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
if create_key is not _internal_create_key:
_Deprecated('EnumDescriptor')
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
# Values are reversed to ensure that the first alias is retained.
self.values_by_number = dict((v.number, v) for v in reversed(values))
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto (descriptor_pb2.EnumDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
Attributes:
name (str): Name of this value.
index (int): Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number (int): Actual number assigned to this enum value.
type (EnumDescriptor): :class:`EnumDescriptor` to which this value
belongs. Set by :class:`EnumDescriptor`'s constructor if we're
passed into one.
options (descriptor_pb2.EnumValueOptions): Enum value options message or
None to use default enum value options options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.EnumValueDescriptor
def __new__(cls, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
# There is no way we can build a complete EnumValueDescriptor with the
# given parameters (the name of the Enum is not known, for example).
# Fortunately generated files just pass it to the EnumDescriptor()
# constructor, which will ignore it, so returning None is good enough.
return None
def __init__(self, name, index, number,
type=None, # pylint: disable=redefined-builtin
options=None, serialized_options=None, create_key=None):
"""Arguments are as described in the attribute description above."""
if create_key is not _internal_create_key:
_Deprecated('EnumValueDescriptor')
super(EnumValueDescriptor, self).__init__(
options, serialized_options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class OneofDescriptor(DescriptorBase):
"""Descriptor for a oneof field.
Attributes:
name (str): Name of the oneof field.
full_name (str): Full name of the oneof field, including package name.
index (int): 0-based index giving the order of the oneof field inside
its containing type.
containing_type (Descriptor): :class:`Descriptor` of the protocol message
type that contains this field. Set by the :class:`Descriptor` constructor
if we're passed into one.
fields (list[FieldDescriptor]): The list of field descriptors this
oneof can contain.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.OneofDescriptor
def __new__(
cls, name, full_name, index, containing_type, fields, options=None,
serialized_options=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile()
return _message.default_pool.FindOneofByName(full_name)
def __init__(
self, name, full_name, index, containing_type, fields, options=None,
serialized_options=None, create_key=None):
"""Arguments are as described in the attribute description above."""
if create_key is not _internal_create_key:
_Deprecated('OneofDescriptor')
super(OneofDescriptor, self).__init__(
options, serialized_options, 'OneofOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_type = containing_type
self.fields = fields
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
Attributes:
name (str): Name of the service.
full_name (str): Full name of the service, including package name.
index (int): 0-indexed index giving the order that this services
definition appears within the .proto file.
methods (list[MethodDescriptor]): List of methods provided by this
service.
methods_by_name (dict(str, MethodDescriptor)): Same
:class:`MethodDescriptor` objects as in :attr:`methods_by_name`, but
indexed by "name" attribute in each :class:`MethodDescriptor`.
options (descriptor_pb2.ServiceOptions): Service options message or
None to use default service options.
file (FileDescriptor): Reference to file info.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.ServiceDescriptor
def __new__(cls, name, full_name, index, methods, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindServiceByName(full_name)
def __init__(self, name, full_name, index, methods, options=None,
serialized_options=None, file=None, # pylint: disable=redefined-builtin
serialized_start=None, serialized_end=None, create_key=None):
if create_key is not _internal_create_key:
_Deprecated('ServiceDescriptor')
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end, serialized_options=serialized_options)
self.index = index
self.methods = methods
self.methods_by_name = dict((m.name, m) for m in methods)
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor.
Args:
name (str): Name of the method.
Returns:
MethodDescriptor or None: the descriptor for the requested method, if
found.
"""
return self.methods_by_name.get(name, None)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto (descriptor_pb2.ServiceDescriptorProto): An empty descriptor proto.
"""
# This function is overridden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
Attributes:
name (str): Name of the method within the service.
full_name (str): Full name of method.
index (int): 0-indexed index of the method inside the service.
containing_service (ServiceDescriptor): The service that contains this
method.
input_type (Descriptor): The descriptor of the message that this method
accepts.
output_type (Descriptor): The descriptor of the message that this method
returns.
options (descriptor_pb2.MethodOptions or None): Method options message, or
None to use default method options.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.MethodDescriptor
def __new__(cls, name, full_name, index, containing_service,
input_type, output_type, options=None, serialized_options=None,
create_key=None):
_message.Message._CheckCalledFromGeneratedFile() # pylint: disable=protected-access
return _message.default_pool.FindMethodByName(full_name)
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None, serialized_options=None,
create_key=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
if create_key is not _internal_create_key:
_Deprecated('MethodDescriptor')
super(MethodDescriptor, self).__init__(
options, serialized_options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
Note that :attr:`enum_types_by_name`, :attr:`extensions_by_name`, and
:attr:`dependencies` fields are only set by the
:py:mod:`google.protobuf.message_factory` module, and not by the generated
proto code.
Attributes:
name (str): Name of file, relative to root of source tree.
package (str): Name of the package
syntax (str): string indicating syntax of the file (can be "proto2" or
"proto3")
serialized_pb (bytes): Byte string of serialized
:class:`descriptor_pb2.FileDescriptorProto`.
dependencies (list[FileDescriptor]): List of other :class:`FileDescriptor`
objects this :class:`FileDescriptor` depends on.
public_dependencies (list[FileDescriptor]): A subset of
:attr:`dependencies`, which were declared as "public".
message_types_by_name (dict(str, Descriptor)): Mapping from message names
to their :class:`Desctiptor`.
enum_types_by_name (dict(str, EnumDescriptor)): Mapping from enum names to
their :class:`EnumDescriptor`.
extensions_by_name (dict(str, FieldDescriptor)): Mapping from extension
names declared at file scope to their :class:`FieldDescriptor`.
services_by_name (dict(str, ServiceDescriptor)): Mapping from services'
names to their :class:`ServiceDescriptor`.
pool (DescriptorPool): The pool this descriptor belongs to. When not
passed to the constructor, the global default pool is used.
"""
if _USE_C_DESCRIPTORS:
_C_DESCRIPTOR_CLASS = _message.FileDescriptor
def __new__(cls, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None, create_key=None):
# FileDescriptor() is called from various places, not only from generated
# files, to register dynamic proto files and messages.
# pylint: disable=g-explicit-bool-comparison
if serialized_pb == b'':
# Cpp generated code must be linked in if serialized_pb is ''
try:
return _message.default_pool.FindFileByName(name)
except KeyError:
raise RuntimeError('Please link in cpp generated lib for %s' % (name))
elif serialized_pb:
return _message.default_pool.AddSerializedFile(serialized_pb)
else:
return super(FileDescriptor, cls).__new__(cls)
def __init__(self, name, package, options=None,
serialized_options=None, serialized_pb=None,
dependencies=None, public_dependencies=None,
syntax=None, pool=None, create_key=None):
"""Constructor."""
if create_key is not _internal_create_key:
_Deprecated('FileDescriptor')
super(FileDescriptor, self).__init__(
options, serialized_options, 'FileOptions')
if pool is None:
from google.protobuf import descriptor_pool
pool = descriptor_pool.Default()
self.pool = pool
self.message_types_by_name = {}
self.name = name
self.package = package
self.syntax = syntax or "proto2"
self.serialized_pb = serialized_pb
self.enum_types_by_name = {}
self.extensions_by_name = {}
self.services_by_name = {}
self.dependencies = (dependencies or [])
self.public_dependencies = (public_dependencies or [])
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def _ToCamelCase(name):
"""Converts name to camel-case and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
if result:
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
# Lower-case the first letter.
if result and result[0].isupper():
result[0] = result[0].lower()
return ''.join(result)
def _OptionsOrNone(descriptor_proto):
"""Returns the value of the field `options`, or None if it is not set."""
if descriptor_proto.HasField('options'):
return descriptor_proto.options
else:
return None
def _ToJsonName(name):
"""Converts name to Json name and returns it."""
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result)
def MakeDescriptor(desc_proto, package='', build_file_if_cpp=True,
syntax=None):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Handles nested descriptors. Note that this is limited to the scope of defining
a message inside of another message. Composite fields can currently only be
resolved if the message is defined in the same scope as the field.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
build_file_if_cpp: Update the C++ descriptor pool if api matches.
Set to False on recursion, so no duplicates are created.
syntax: The syntax/semantics that should be used. Set to "proto3" to get
proto3 field presence semantics.
Returns:
A Descriptor for protobuf messages.
"""
if api_implementation.Type() == 'cpp' and build_file_if_cpp:
# The C++ implementation requires all descriptors to be backed by the same
# definition in the C++ descriptor pool. To do this, we build a
# FileDescriptorProto with the same definition as this descriptor and build
# it into the pool.
from google.protobuf import descriptor_pb2
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.message_type.add().MergeFrom(desc_proto)
# Generate a random name for this proto file to prevent conflicts with any
# imported ones. We need to specify a file name so the descriptor pool
# accepts our FileDescriptorProto, but it is not important what that file
# name is actually set to.
proto_name = binascii.hexlify(os.urandom(16)).decode('ascii')
if package:
file_descriptor_proto.name = os.path.join(package.replace('.', '/'),
proto_name + '.proto')
file_descriptor_proto.package = package
else:
file_descriptor_proto.name = proto_name + '.proto'
_message.default_pool.Add(file_descriptor_proto)
result = _message.default_pool.FindFileByName(file_descriptor_proto.name)
if _USE_C_DESCRIPTORS:
return result.message_types_by_name[desc_proto.name]
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
# Create Descriptors for enum types
enum_types = {}
for enum_proto in desc_proto.enum_type:
full_name = '.'.join(full_message_name + [enum_proto.name])
enum_desc = EnumDescriptor(
enum_proto.name, full_name, None, [
EnumValueDescriptor(enum_val.name, ii, enum_val.number,
create_key=_internal_create_key)
for ii, enum_val in enumerate(enum_proto.value)],
create_key=_internal_create_key)
enum_types[full_name] = enum_desc
# Create Descriptors for nested types
nested_types = {}
for nested_proto in desc_proto.nested_type:
full_name = '.'.join(full_message_name + [nested_proto.name])
# Nested types are just those defined inside of the message, not all types
# used by fields in the message, so no loops are possible here.
nested_desc = MakeDescriptor(nested_proto,
package='.'.join(full_message_name),
build_file_if_cpp=False,
syntax=syntax)
nested_types[full_name] = nested_desc
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
enum_desc = None
nested_desc = None
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
if field_proto.HasField('type_name'):
type_name = field_proto.type_name
full_type_name = '.'.join(full_message_name +
[type_name[type_name.rfind('.')+1:]])
if full_type_name in nested_types:
nested_desc = nested_types[full_type_name]
elif full_type_name in enum_types:
enum_desc = enum_types[full_type_name]
# Else type_name references a non-local type, which isn't implemented
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, nested_desc, enum_desc, None, False, None,
options=_OptionsOrNone(field_proto), has_default_value=False,
json_name=json_name, create_key=_internal_create_key)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
list(nested_types.values()), list(enum_types.values()), [],
options=_OptionsOrNone(desc_proto),
create_key=_internal_create_key)
|
{
"content_hash": "2b0503aaae1409377a04d92596aac769",
"timestamp": "",
"source": "github",
"line_count": 1112,
"max_line_length": 107,
"avg_line_length": 38.40467625899281,
"alnum_prop": 0.6758066782185173,
"repo_name": "scheib/chromium",
"id": "7583ea39f7a2ee5cd2f6e2f763a9d8c7c610b1db",
"size": "44337",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "third_party/protobuf/python/google/protobuf/descriptor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.contrib import admin
from .views import (
post_list,
post_create,
post_detail,
post_update,
post_delete,
)
urlpatterns = [
url(r'^$', post_list, name='list'),
url(r'^create/$', post_create, name='create'),
url(r'^(?P<slug>[\w-]+)/$', post_detail, name='detail'),
url(r'^(?P<slug>[\w-]+)/edit/$', post_update, name='edit'),
url(r'^(?P<slug>[\w-]+)/delete/$', post_delete, name='delete'),
#url(r'^posts/$', "<appname>.views.<function_name>"),
]
|
{
"content_hash": "362d5911ca2a66c772993c0a879f39cd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.6007751937984496,
"repo_name": "DJMedhaug/BizSprint",
"id": "9c2474bd6f3f265224bea00427aa147828f5c87c",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1242519"
},
{
"name": "HTML",
"bytes": "250009"
},
{
"name": "JavaScript",
"bytes": "965426"
},
{
"name": "PHP",
"bytes": "390755"
},
{
"name": "Python",
"bytes": "129777"
},
{
"name": "Shell",
"bytes": "3675"
}
],
"symlink_target": ""
}
|
import sys
import pytest
from pathlib import Path
from unittest import mock
from growler.middleware.renderer import StringRenderer
from growler.http.response import HTTPResponse
@pytest.fixture
def res():
return mock.create_autospec(HTTPResponse)
@pytest.fixture
def ren(res):
return StringRenderer()
@pytest.fixture
def viewdir(tmpdir):
return Path(str(tmpdir))
@pytest.fixture
def sr(viewdir):
return StringRenderer(viewdir)
def test_string_renderer_fixture(sr):
assert isinstance(sr, StringRenderer)
def test_render_file(sr, viewdir):
txt = """Hello World"""
view = viewdir.joinpath("hello.html")
view.touch()
if sys.version_info < (3, 5): # python3.4 compat
with open(str(view), 'w') as file:
file.write(txt)
else:
view.write_text(txt)
res = sr.render_source("hello.html")
assert res == txt
|
{
"content_hash": "90a96945d0bfe7ead25dafc62c786739",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 54,
"avg_line_length": 20.558139534883722,
"alnum_prop": 0.6934389140271493,
"repo_name": "pyGrowler/Growler",
"id": "a9f7fba18169b4c3d0b8225e4705123b86c46519",
"size": "1053",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/test_mw_string_renderer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249705"
}
],
"symlink_target": ""
}
|
import os
from bruker2nifti._utils import bruker_read_files
from bruker2nifti._getters import get_list_scans, get_subject_name
from bruker2nifti._cores import scan2struct, write_struct
class Bruker2Nifti(object):
"""
Facade to collect users preferences on the conversion and accessing the core methods for the conversion
(scan_to_struct, write_struct).
Nomenclature:
study: a series of acquisition related to the same subject, acquired in the same scanning session and usually
containing multiple scans. It is provided as a folder structure containing the scans produced with paravision (PV)
software. Patient/subject information are embedded in the study (opposed hierarchy as in the DICOM files).
scan or experiment, sub-scans and sub-volumes: individual folder image acquired with various protocols. To a scan
can belong more than one processed image, or reconstruction. Each processed image can be a single volume or can
contain more than one sub-volume embedded in the same processed image.
header: header of the nifti format.
img_data: data of the nifti format, stored in a 2d or 3d matrix.
struct: intermediate structure (python dictionary) proposed in this code, aimed at collecting the information from
the raw Bruker and to progressively creating the nifti images.
"""
def __init__(self, pfo_study_bruker_input, pfo_study_nifti_output, study_name=None):
"""
Initialise the Facade class to the converter.
:param pfo_study_bruker_input: path to folder (pfo) to the Bruker input data folder.
:param pfo_study_nifti_output: path to folder (pfo) where the converted study will be stored.
:param study_name: optional name of the study. If None, the name parsed from the Bruker study will be used.
"""
self.pfo_study_bruker_input = pfo_study_bruker_input
self.pfo_study_nifti_output = pfo_study_nifti_output
self.study_name = study_name
# converter settings for the nifti values
self.nifti_version = 1
self.qform_code = 1
self.sform_code = 2
self.save_human_readable = True
self.save_b0_if_dwi = (
True
) # if DWI, it saves the first layer as a single nfti image.
self.correct_slope = True
self.correct_offset = True
# advanced sample positioning
self.sample_upside_down = False
self.frame_body_as_frame_head = False
# chose to convert extra files:
self.get_acqp = False
self.get_method = False
self.get_reco = False
# advanced selections:
self.scans_list = (
None
) # you can select a subset of scans in the study to be converted.
self.list_new_name_each_scan = (
None
) # you can select specific names for the subset self.scans_list.
self.verbose = 1
# automatic filling of advanced selections class attributes
self.explore_study()
def explore_study(self):
"""
Automatic filling of the advanced selections class attributes.
It also checks if the given attributes are meaningful.
:return:
"""
if not os.path.isdir(self.pfo_study_bruker_input):
raise IOError("Input folder does not exist.")
if not os.path.isdir(self.pfo_study_nifti_output):
raise IOError("Output folder does not exist.")
if self.scans_list is None:
self.scans_list = get_list_scans(
self.pfo_study_bruker_input, print_structure=False
)
assert isinstance(self.scans_list, list)
msg = (
"No scans found, are you sure the input folder contains a Bruker study?"
)
if not len(self.scans_list) > 0:
raise IOError(msg)
if self.study_name is None or self.study_name is "":
_study_name = get_subject_name(self.pfo_study_bruker_input).replace(
" ", "_"
)
self.study_name = "".join(e for e in _study_name if e.isalnum())
if self.list_new_name_each_scan is None:
list_new_name_each_scan = [
self.study_name + "_" + ls for ls in self.scans_list
]
self.list_new_name_each_scan = list_new_name_each_scan
assert isinstance(self.list_new_name_each_scan, list)
# if self.list_new_nifti_file_names is None:
# self.list_new_nifti_file_names = self.list_new_name_each_scan
else:
if not len(self.scans_list) == len(self.list_new_name_each_scan):
msg = (
"list_name_each_scan {0} does not have the same amount of scans in the "
"study: {1}".format(self.list_new_name_each_scan, self.scans_list)
)
raise IOError(msg)
def show_study_structure(self):
"""
Print to console the structure of the selected study.
:return: [None] only print to console information.
"""
if not os.path.isdir(self.pfo_study_bruker_input):
raise IOError("Input folder does not exist.")
print("Study folder structure: ")
scans_list = get_list_scans(self.pfo_study_bruker_input)
print("\n")
print("List of scans: {}".format(scans_list))
pfi_first_scan = os.path.join(self.pfo_study_bruker_input, scans_list[0])
acqp = bruker_read_files("acqp", pfi_first_scan)
print("Version: {}".format(acqp["ACQ_sw_version"][0]))
def convert_scan(
self,
pfo_input_scan,
pfo_output_converted,
nifti_file_name=None,
create_output_folder_if_not_exists=True,
):
"""
:param pfo_input_scan: path to folder (pfo) containing a scan from Bruker, see documentation for the difference
between Bruker 'scan' and Bruker 'study'.
:param pfo_output_converted: path to the folder where the converted scan will be stored.
:param create_output_folder_if_not_exists: [True] if the output folder does not exist will be created.
:param nifti_file_name: [None] filename of the nifti image that will be saved into the pfo_output folder.
If None, the filename will be obtained from the parameter file of the study.
:return: [None] save the data parsed from the raw Bruker scan into a folder, including the nifti image.
"""
if not os.path.isdir(pfo_input_scan):
raise IOError("Input folder does not exist.")
if create_output_folder_if_not_exists:
os.makedirs(pfo_output_converted)
struct_scan = scan2struct(
pfo_input_scan,
correct_slope=self.correct_slope,
correct_offset=self.correct_offset,
sample_upside_down=self.sample_upside_down,
nifti_version=self.nifti_version,
qform_code=self.qform_code,
sform_code=self.sform_code,
get_acqp=self.get_acqp,
get_method=self.get_method,
get_reco=self.get_reco,
frame_body_as_frame_head=self.frame_body_as_frame_head,
)
if struct_scan is not None:
write_struct(
struct_scan,
pfo_output_converted,
fin_scan=nifti_file_name,
save_human_readable=self.save_human_readable,
save_b0_if_dwi=self.save_b0_if_dwi,
verbose=self.verbose,
)
def convert(self):
"""
To call the converter, once all the settings of the converter are selected and modified by the user.
:return: Convert the Bruker study, whose path is stored in the class variable self.pfo_study_bruker_input
in the specified folder stored in self.pfo_study_nifti_output, and according to the other class attributes.
Example:
>> bru = Bruker2Nifti('/path/to/my/study', '/path/output', study_name='study1')
>> bru.show_study_structure
>> bru.verbose = 2
>> bru.correct_slope = True
>> bru.get_acqp = False
>> bru.get_method = True # I want to see the method parameter file converted as well.
>> bru.get_reco = False
>> # Convert the study:
>> bru.convert()
"""
pfo_nifti_study = os.path.join(self.pfo_study_nifti_output, self.study_name)
os.makedirs(pfo_nifti_study)
print("\nStudy conversion \n{}\nstarted:\n".format(self.pfo_study_bruker_input))
for bruker_scan_name, scan_name in zip(
self.scans_list, self.list_new_name_each_scan
):
pfo_scan_bruker = os.path.join(
self.pfo_study_bruker_input, bruker_scan_name
)
pfo_scan_nifti = os.path.join(pfo_nifti_study, scan_name)
print("\nConverting experiment {}:\n".format(bruker_scan_name))
self.convert_scan(
pfo_scan_bruker,
pfo_scan_nifti,
create_output_folder_if_not_exists=True,
nifti_file_name=scan_name,
)
print("\nStudy converted and saved in \n{}".format(self.pfo_study_nifti_output))
|
{
"content_hash": "07a01c5016e4d07f6ad0d5ad629280ea",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 119,
"avg_line_length": 42.51376146788991,
"alnum_prop": 0.6159905049633146,
"repo_name": "SebastianoF/bruker2nifti",
"id": "d529ac216fa3f64c8b62b54f70eafe3c8cc2e729",
"size": "9268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bruker2nifti/converter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133156"
},
{
"name": "Shell",
"bytes": "5173"
},
{
"name": "TeX",
"bytes": "891"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import twitter_utils
setup(name='twitter_utils',
version=twitter_utils.version,
description='Series of utility functions for twitter',
url='http://github.com/geosoco/twitter_utils',
author='John Robinson',
author_email='pubsoco@geosoco.com',
license='BSD',
packages=['twitter_utils'],
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
])
|
{
"content_hash": "870b6b65582c72d54edb44c18c5f2208",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 31.941176470588236,
"alnum_prop": 0.6353591160220995,
"repo_name": "emCOMP/twitter_utils",
"id": "1df255b70fdbc9d1b78546d9fe85cfb9aa3ca04c",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5080"
}
],
"symlink_target": ""
}
|
import web
from anvillib.avatar import avatar, logo
import common
from hashlib import sha256
import user
class ProjectError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def count_proj():
counts = common.db.query("SELECT COUNT(id) AS count FROM project")
return counts[0].count
def list_proj():
return common.db.query('SELECT * FROM project ORDER BY created DESC')
def get_user_projects(user):
results = common.db.query("SELECT project FROM commiters WHERE user='%d'" % user.id)
projs = []
for row in results:
projs.append(Project(id=row.project))
return projs
class Project:
id = None
name = ""
owner = ""
homepage = ""
description = ""
created = ""
def __init__(self, name=None, id=None):
if name != None or id != None:
where = ""
if id != None:
where="id=" + web.db.sqlquote(id)
elif name != None:
where="name=" + web.db.sqlquote(name)
projects = common.db.select('project', where=where)
if len(projects) == 0:
raise ProjectError("Unknown project")
else:
proj = projects[0]
self.id = proj.id
self.name = proj.name
self.homepage = proj.homepage
self.description = proj.description
self.created = proj.created
self.owner = user.User(id=proj.owner)
#end __init__
def save(self):
if self.id != None:
common.db.update('project', where="id=" + web.db.sqlquote(self.id),
name=self.name, description=self.description, owner=self.owner.id,
homepage=self.homepage)
else:
self.id = common.db.insert('project', name=self.name, seqname="id",
description=self.description, owner=self.owner.id,
homepage=self.homepage)
#end save
def logo(self):
return logo(self.homepage)
#end logo
def isadmin(self, username):
commiters = self.get_commiters()
for c in commiters:
if c.name == username:
return True
return False
def get_commiters(self):
"""Returns the list of members of the project."""
commiters_a = common.db.select('commiters', where=("project=%d" % self.id))
commiters = []
for c in commiters_a:
commiters.append(user.User(id=c.user))
return commiters
def add_commiter(self, user_id):
"""Adds a commiter to the project."""
common.db.insert('commiters', project=self.id, user=user_id)
def rem_commiter(self, user_id):
"""Removes a commiter from the project."""
common.db.delete('commiters', where=("project=%d AND user=%d" % (self.id, user_id)))
#end Project
|
{
"content_hash": "ed06f87e91a3b3fff49e282810455f49",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 92,
"avg_line_length": 31.557894736842105,
"alnum_prop": 0.5583722481654436,
"repo_name": "Etenil/anvil",
"id": "36d84c8b18e7152f2b61ae7c46302786c06ad1dc",
"size": "2998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "41953"
},
{
"name": "Python",
"bytes": "118009"
}
],
"symlink_target": ""
}
|
"""
Grammar and AST for query expressions.
"""
from __future__ import unicode_literals
import re
import pypeg2
from pypeg2 import Keyword, Symbol, attr, blank
import sqlalchemy
# Regex for value strings in clauses.
value_regex = re.compile(r'[^()\s]+')
# Check symbols for not being a keyword.
Symbol.check_keywords = True
class Node(object):
"""
Base class for all AST nodes.
"""
pass
class Leaf(Node):
"""
Base class for AST leaf nodes.
"""
def accept(self, visitor):
return visitor.visit(self)
class Tautology(Leaf):
"""
AST node for a tautology.
"""
grammar = '*'
def accept(self, visitor):
return visitor.visit(self)
class Clause(Leaf):
"""
AST node for a clause specifying some field to have some value.
Attributes:
`field`
A symbol as left operand in the clause.
`value`
A symbol as right operand in the clause.
"""
def __init__(self, field=None, value=None):
self.field = field
self.value = value
grammar = attr('field', Symbol), ':', attr('value', value_regex)
class ExpressionNode(Node):
"""
Base class for AST expression nodes.
Attributes:
`expression`
Expression contained in this node.
"""
def __init__(self, expression=None):
self.expression = expression
def accept(self, visitor):
return visitor.visit(self, self.expression.accept(visitor))
class Expression(ExpressionNode):
"""
Top-level AST node for an expression.
"""
pass
class Term(ExpressionNode):
"""
AST node for an expression term.
"""
pass
class Negation(ExpressionNode):
"""
AST node for a negated expression.
"""
grammar = Keyword('not'), blank, attr('expression', Term)
class Grouping(ExpressionNode):
"""
AST node for a grouped expression.
"""
grammar = ('(', attr('expression', Expression), ')'),
class BinaryNode(Node):
"""
Base clas for binary AST expression nodes.
Attributes:
`left`
An expression as left operand.
`right`
An expression as right operand.
"""
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def accept(self, visitor):
return visitor.visit(self,
self.left.accept(visitor),
self.right.accept(visitor))
class Conjunction(BinaryNode):
"""
AST node for a conjunction.
"""
grammar = (attr('left', Term), blank, Keyword('and'), blank,
attr('right', Expression))
class Disjunction(BinaryNode):
"""
AST node for a disjunction.
"""
grammar = (attr('left', Term), blank, Keyword('or'), blank,
attr('right', Expression))
Term.grammar = attr('expression', [Grouping,
Negation,
Clause,
Tautology])
Expression.grammar = attr('expression', [Conjunction,
Disjunction,
Term])
class Visitor(object):
"""
Decorator for visitor methods on AST nodes.
>>> class PrettyPrinter(object):
... visitor = Visitor()
...
... @visitor(Tautology)
... def visit(self, node):
... return 'true
...
... @visitor(Conjunction)
... def visit(self, node, left, right):
... return '%s and %s' % (left, right)
"""
def __init__(self, base=None):
"""
Create a new visitor method decorator.
:arg base: Base visitor to use as fallback in method resolution.
:type base: Visitor
"""
self._methods = {}
self.base = base
def register_method(self, node_type, method):
"""
Register `method` as visitor method for nodes of type `node_type`.
"""
self._methods[node_type] = method
def resolve(self, node_type):
"""
Find visitor method for nodes of type `node_type`.
.. note:: First the entire subtype chain for `node_type` (following
method resolution order) is tried. After that, the same thing is
done in the base visitor.
"""
for cls in node_type.__mro__:
try:
return self._methods[cls]
except KeyError:
pass
if self.base:
return self.base.resolve(node_type)
raise KeyError(node_type)
def __call__(self, node_type):
"""
Create a visitor method.
"""
def visit(self_, node, *args, **kwargs):
method = self.resolve(type(node))
return method(self_, node, *args, **kwargs)
def decorator(method):
self.register_method(node_type, method)
return visit
return decorator
class Identity(object):
"""
Identity function on query expression ASTs (changes nothing).
Creates a deep copy of the visited AST. Use this as a base for visitors
that change specific parts of the AST. For example, this rewrites
conjunctions to disjunctions::
>>> class Switcher(object):
... visitor = Visitor(Identity)
...
... @visitor(Conjunction)
... def visit(self, node, left, right):
... new_node = Disjunction()
... new_node.left = left
... new_node.right = right
... return new_node
"""
visitor = Visitor()
@visitor(Tautology)
def visit(self, node):
return Tautology()
@visitor(Clause)
def visit(self, node):
return Clause(Symbol(node.field), node.value)
@visitor(Expression)
def visit(self, node, expression):
return Expression(expression)
@visitor(Term)
def visit(self, node, expression):
return Term(expression)
@visitor(Negation)
def visit(self, node, expression):
return Negation(expression)
@visitor(Grouping)
def visit(self, node, expression):
return Grouping(expression)
@visitor(Conjunction)
def visit(self, node, left, right):
return Conjunction(left, right)
@visitor(Disjunction)
def visit(self, node, left, right):
return Disjunction(left, right)
class PrettyPrinter(object):
"""
Pretty printer for query expression ASTs.
Should yield the same as `pypeg2.compose`.
"""
visitor = Visitor()
@visitor(Tautology)
def visit(self, node):
return '*'
@visitor(Clause)
def visit(self, node):
return '%s:%s' % (node.field, node.value)
@visitor(ExpressionNode)
def visit(self, node, expression):
return expression
@visitor(Negation)
def visit(self, node, expression):
return 'not %s' % expression
@visitor(Grouping)
def visit(self, node, expression):
return '(%s)' % expression
@visitor(Conjunction)
def visit(self, node, left, right):
return '%s and %s' % (left, right)
@visitor(Disjunction)
def visit(self, node, left, right):
return '%s or %s' % (left, right)
class QueryCriterionBuilder(object):
"""
Create an SQLAlchemy filter criterion from a query expression AST.
"""
visitor = Visitor()
def __init__(self, build_clause):
"""
The `build_clause` argument should be a function that, given a clause
field name and value, returns a corresponding SQLAlchemy filter
criterion for the clause.
"""
self.build_clause = build_clause
@visitor(Tautology)
def visit(self, node):
return sqlalchemy.true()
@visitor(Clause)
def visit(self, node):
return self.build_clause(node.field, node.value)
@visitor(ExpressionNode)
def visit(self, node, expression):
return expression
@visitor(Negation)
def visit(self, node, expression):
return ~expression
@visitor(Conjunction)
def visit(self, node, left, right):
return sqlalchemy.and_(left, right)
@visitor(Disjunction)
def visit(self, node, left, right):
return sqlalchemy.or_(left, right)
class ClauseValueUpdater(object):
"""
Update values in all clauses in a query expression AST.
"""
visitor = Visitor(Identity.visitor)
def __init__(self, update_value):
"""
The `update_value` argument should be a function that, given a clause
field name an value, returns a new value for the clause.
"""
self.update_value = update_value
@visitor(Clause)
def visit(self, node):
return Clause(Symbol(node.field),
self.update_value(node.field, node.value))
class TautologyTester(object):
"""
Test if a query expression AST matches exactly '*' (syntactically).
"""
visitor = Visitor()
@visitor(Node)
def visit(self, node, *args):
return False
@visitor(Term)
def visit(self, node, expression):
return expression
@visitor(Expression)
def visit(self, node, expression):
return expression
@visitor(Tautology)
def visit(self, node):
return True
class SingletonTester(object):
"""
Test if a query expression AST matches exactly one sample (syntactically).
"""
visitor = Visitor()
@visitor(Node)
def visit(self, node, *args):
return False
@visitor(Term)
def visit(self, node, expression):
return expression
@visitor(Expression)
def visit(self, node, expression):
return expression
@visitor(Clause)
def visit(self, node):
return node.field == 'sample'
class ClauseTester(object):
"""
Test if a predicate holds for all clauses in a query expression AST.
"""
visitor = Visitor()
def __init__(self, predicate):
self.predicate = predicate
@visitor(Tautology)
def visit(self, node):
return True
@visitor(Clause)
def visit(self, node):
return self.predicate(node.field, node.value)
@visitor(ExpressionNode)
def visit(self, node, expression):
return expression
@visitor(Conjunction)
def visit(self, node, left, right):
return left and right
@visitor(Disjunction)
def visit(self, node, left, right):
return left and right
# Some convenience interfaces follow below.
def parse(expression_string):
"""
Parse given query expression string and return its abstract syntax tree.
"""
return pypeg2.parse(expression_string, Expression)
def deep_copy(expression):
"""
Return an identical deep copy of the given query expression AST.
"""
return expression.accept(Identity())
def pretty_print(expression):
"""
Pretty-print given query expression AST to a string.
"""
return expression.accept(PrettyPrinter())
def build_query_criterion(expression, build_clause):
"""
Create an SQLAlchemy filter criterion from a query expression AST.
:arg build_clause: Given a field name and value of a clause `field:value`,
this function should return a corresponding SQLAlchemy filter criterion.
:type build_clause: function
>>> def match_by_name(field, value):
... return UserTable.name == '"%s"' % value
>>> expression = parse('not user:lance')
>>> criterion = build_query_criterion(expression, match_by_name)
>>> criterion.compile(compile_kwargs={'literal_binds': True}).string
u'user_table.name != "lance"'
"""
return expression.accept(QueryCriterionBuilder(build_clause))
def update_clause_values(expression, update_value):
"""
Update values in all clauses in a query expression AST.
:arg build_clause: Given a field name and value of a clause `field:value`,
this function should return a new value for the clause.
:type build_clause: function
>>> def add_last_name(field, value):
... return value + ' armstrong'
>>> expression = parse('not user:lance')
>>> pretty_print(update_clause_values(expression, add_last_name))
u'not user:lance armstrong'
"""
return expression.accept(ClauseValueUpdater(update_value))
def test_clauses(expression, predicate):
"""
Test if a predicate holds for all clauses in a query expression AST.
:arg predicate: Given a field name and value of a clause `field:value`,
this function should return `True` or `False`.
:type predicate: function
>>> def is_digit(field, value):
... return value.isdigit()
>>> expression = parse('not (x:5 and y:zero) or z:77')
>>> test_clauses(expression, is_digit)
False
"""
return expression.accept(ClauseTester(predicate))
def is_tautology(expression):
"""
Test if a query expression AST is a tautology (syntactically).
"""
return expression.accept(TautologyTester())
def is_singleton(expression):
"""
Test if a query expression AST matches exactly one sample (syntactically).
"""
return expression.accept(SingletonTester())
def make_conjunction(left, right):
"""
Given two query expression ASTs, return their conjunction.
"""
return Expression(Conjunction(Term(Grouping(left)), right))
|
{
"content_hash": "b7f66f2f7684b357fc6ae449c1e5b78b",
"timestamp": "",
"source": "github",
"line_count": 546,
"max_line_length": 78,
"avg_line_length": 24.675824175824175,
"alnum_prop": 0.6020188525198545,
"repo_name": "varda/varda",
"id": "d90aa6077c71f229ac98d0fc19f12b949d39648e",
"size": "13473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "varda/expressions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "353449"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.