commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
164215cfde181626e458903b10b1d2ff961f4101
|
d1_mn_generic/src/gmn/app/migrations/0004_auto_20170523_0137.py
|
d1_mn_generic/src/gmn/app/migrations/0004_auto_20170523_0137.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_mediatype_mediatypeproperty'),
]
operations = [
migrations.CreateModel(
name='SeriesIdToHeadPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_pid', to='app.IdNamespace')),
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='SeriesIdToPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_sid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sciobj',
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sid',
),
migrations.DeleteModel(
name='SeriesIdToScienceObject',
),
]
|
Add Django db migration for new SID implementation
|
Add Django db migration for new SID implementation
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add Django db migration for new SID implementation
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_mediatype_mediatypeproperty'),
]
operations = [
migrations.CreateModel(
name='SeriesIdToHeadPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_pid', to='app.IdNamespace')),
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='SeriesIdToPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_sid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sciobj',
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sid',
),
migrations.DeleteModel(
name='SeriesIdToScienceObject',
),
]
|
<commit_before><commit_msg>Add Django db migration for new SID implementation<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_mediatype_mediatypeproperty'),
]
operations = [
migrations.CreateModel(
name='SeriesIdToHeadPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_pid', to='app.IdNamespace')),
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='SeriesIdToPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_sid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sciobj',
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sid',
),
migrations.DeleteModel(
name='SeriesIdToScienceObject',
),
]
|
Add Django db migration for new SID implementation# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_mediatype_mediatypeproperty'),
]
operations = [
migrations.CreateModel(
name='SeriesIdToHeadPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_pid', to='app.IdNamespace')),
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='SeriesIdToPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_sid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sciobj',
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sid',
),
migrations.DeleteModel(
name='SeriesIdToScienceObject',
),
]
|
<commit_before><commit_msg>Add Django db migration for new SID implementation<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 01:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0003_mediatype_mediatypeproperty'),
]
operations = [
migrations.CreateModel(
name='SeriesIdToHeadPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_pid', to='app.IdNamespace')),
('sid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtoheadpersistentid_sid', to='app.IdNamespace')),
],
),
migrations.CreateModel(
name='SeriesIdToPersistentId',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_pid', to='app.IdNamespace')),
('sid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='seriesidtopersistentid_sid', to='app.IdNamespace')),
],
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sciobj',
),
migrations.RemoveField(
model_name='seriesidtoscienceobject',
name='sid',
),
migrations.DeleteModel(
name='SeriesIdToScienceObject',
),
]
|
|
f0d044b4e46c6aa25530057c772167389e485f04
|
BanHammer/blacklist/management/commands/update_third_party_rules.py
|
BanHammer/blacklist/management/commands/update_third_party_rules.py
|
from django.core.management.base import BaseCommand, CommandError
import urllib2
import re
STORE_FOLDER = '/usr/local/third_party_ips'
ET_COMPROMISED_IPS_URL = 'https://rules.emergingthreatspro.com/blockrules/compromised-ips.txt'
ET_COMPROMISED_IPS_REV = 'https://rules.emergingthreatspro.com/blockrules/COMPrev'
ET_COMPROMISED_IPS_CONTENT_FILE = 'et_compromised_ips'
ET_COMPROMISED_IPS_REV_FILE = 'et_compromised_ips.rev'
DSHIELD_BLOCK_URL = 'https://isc.sans.edu/block.txt'
DSHIELD_BLOCK_CONTENT_FILE = 'dshield_block'
DSHIELD_BLOCK_REV_FILE = 'dshield_block.rev'
class Command(BaseCommand):
help = 'Update 3rd party rules (et and dshield)'
def handle(self, *args, **options):
self._et_compromised_ips()
self._dshield_block()
def _et_compromised_ips(self):
url = urllib2.urlopen(ET_COMPROMISED_IPS_URL)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_CONTENT_FILE), 'w')
f.write(url.read())
f.close()
url = urllib2.urlopen(ET_COMPROMISED_IPS_REV)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_REV_FILE), 'w')
f.write(url.read())
f.close()
def _dshield_block(self):
url = urllib2.urlopen(DSHIELD_BLOCK_URL)
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_CONTENT_FILE), 'w')
content = url.read()
networks = ''
for l in content.splitlines():
res = re.search('^(\d+\.\d+.\d+\.\d+)\t(\d+\.\d+.\d+\.\d+)\t(\d+)\t', l)
if res:
networks += '%s/%s\n' % (res.group(1), res.group(3))
f.write(networks)
f.close()
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_REV_FILE), 'w')
rev = re.search('updated: (.*)\n', content, re.IGNORECASE).group(1)
f.write(rev)
f.close()
|
Update third party rules script
|
Update third party rules script
|
Python
|
bsd-3-clause
|
mozilla/BanHammer,mozilla/BanHammer,mozilla/BanHammer,mozilla/BanHammer
|
Update third party rules script
|
from django.core.management.base import BaseCommand, CommandError
import urllib2
import re
STORE_FOLDER = '/usr/local/third_party_ips'
ET_COMPROMISED_IPS_URL = 'https://rules.emergingthreatspro.com/blockrules/compromised-ips.txt'
ET_COMPROMISED_IPS_REV = 'https://rules.emergingthreatspro.com/blockrules/COMPrev'
ET_COMPROMISED_IPS_CONTENT_FILE = 'et_compromised_ips'
ET_COMPROMISED_IPS_REV_FILE = 'et_compromised_ips.rev'
DSHIELD_BLOCK_URL = 'https://isc.sans.edu/block.txt'
DSHIELD_BLOCK_CONTENT_FILE = 'dshield_block'
DSHIELD_BLOCK_REV_FILE = 'dshield_block.rev'
class Command(BaseCommand):
help = 'Update 3rd party rules (et and dshield)'
def handle(self, *args, **options):
self._et_compromised_ips()
self._dshield_block()
def _et_compromised_ips(self):
url = urllib2.urlopen(ET_COMPROMISED_IPS_URL)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_CONTENT_FILE), 'w')
f.write(url.read())
f.close()
url = urllib2.urlopen(ET_COMPROMISED_IPS_REV)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_REV_FILE), 'w')
f.write(url.read())
f.close()
def _dshield_block(self):
url = urllib2.urlopen(DSHIELD_BLOCK_URL)
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_CONTENT_FILE), 'w')
content = url.read()
networks = ''
for l in content.splitlines():
res = re.search('^(\d+\.\d+.\d+\.\d+)\t(\d+\.\d+.\d+\.\d+)\t(\d+)\t', l)
if res:
networks += '%s/%s\n' % (res.group(1), res.group(3))
f.write(networks)
f.close()
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_REV_FILE), 'w')
rev = re.search('updated: (.*)\n', content, re.IGNORECASE).group(1)
f.write(rev)
f.close()
|
<commit_before><commit_msg>Update third party rules script<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
import urllib2
import re
STORE_FOLDER = '/usr/local/third_party_ips'
ET_COMPROMISED_IPS_URL = 'https://rules.emergingthreatspro.com/blockrules/compromised-ips.txt'
ET_COMPROMISED_IPS_REV = 'https://rules.emergingthreatspro.com/blockrules/COMPrev'
ET_COMPROMISED_IPS_CONTENT_FILE = 'et_compromised_ips'
ET_COMPROMISED_IPS_REV_FILE = 'et_compromised_ips.rev'
DSHIELD_BLOCK_URL = 'https://isc.sans.edu/block.txt'
DSHIELD_BLOCK_CONTENT_FILE = 'dshield_block'
DSHIELD_BLOCK_REV_FILE = 'dshield_block.rev'
class Command(BaseCommand):
help = 'Update 3rd party rules (et and dshield)'
def handle(self, *args, **options):
self._et_compromised_ips()
self._dshield_block()
def _et_compromised_ips(self):
url = urllib2.urlopen(ET_COMPROMISED_IPS_URL)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_CONTENT_FILE), 'w')
f.write(url.read())
f.close()
url = urllib2.urlopen(ET_COMPROMISED_IPS_REV)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_REV_FILE), 'w')
f.write(url.read())
f.close()
def _dshield_block(self):
url = urllib2.urlopen(DSHIELD_BLOCK_URL)
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_CONTENT_FILE), 'w')
content = url.read()
networks = ''
for l in content.splitlines():
res = re.search('^(\d+\.\d+.\d+\.\d+)\t(\d+\.\d+.\d+\.\d+)\t(\d+)\t', l)
if res:
networks += '%s/%s\n' % (res.group(1), res.group(3))
f.write(networks)
f.close()
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_REV_FILE), 'w')
rev = re.search('updated: (.*)\n', content, re.IGNORECASE).group(1)
f.write(rev)
f.close()
|
Update third party rules scriptfrom django.core.management.base import BaseCommand, CommandError
import urllib2
import re
STORE_FOLDER = '/usr/local/third_party_ips'
ET_COMPROMISED_IPS_URL = 'https://rules.emergingthreatspro.com/blockrules/compromised-ips.txt'
ET_COMPROMISED_IPS_REV = 'https://rules.emergingthreatspro.com/blockrules/COMPrev'
ET_COMPROMISED_IPS_CONTENT_FILE = 'et_compromised_ips'
ET_COMPROMISED_IPS_REV_FILE = 'et_compromised_ips.rev'
DSHIELD_BLOCK_URL = 'https://isc.sans.edu/block.txt'
DSHIELD_BLOCK_CONTENT_FILE = 'dshield_block'
DSHIELD_BLOCK_REV_FILE = 'dshield_block.rev'
class Command(BaseCommand):
help = 'Update 3rd party rules (et and dshield)'
def handle(self, *args, **options):
self._et_compromised_ips()
self._dshield_block()
def _et_compromised_ips(self):
url = urllib2.urlopen(ET_COMPROMISED_IPS_URL)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_CONTENT_FILE), 'w')
f.write(url.read())
f.close()
url = urllib2.urlopen(ET_COMPROMISED_IPS_REV)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_REV_FILE), 'w')
f.write(url.read())
f.close()
def _dshield_block(self):
url = urllib2.urlopen(DSHIELD_BLOCK_URL)
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_CONTENT_FILE), 'w')
content = url.read()
networks = ''
for l in content.splitlines():
res = re.search('^(\d+\.\d+.\d+\.\d+)\t(\d+\.\d+.\d+\.\d+)\t(\d+)\t', l)
if res:
networks += '%s/%s\n' % (res.group(1), res.group(3))
f.write(networks)
f.close()
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_REV_FILE), 'w')
rev = re.search('updated: (.*)\n', content, re.IGNORECASE).group(1)
f.write(rev)
f.close()
|
<commit_before><commit_msg>Update third party rules script<commit_after>from django.core.management.base import BaseCommand, CommandError
import urllib2
import re
STORE_FOLDER = '/usr/local/third_party_ips'
ET_COMPROMISED_IPS_URL = 'https://rules.emergingthreatspro.com/blockrules/compromised-ips.txt'
ET_COMPROMISED_IPS_REV = 'https://rules.emergingthreatspro.com/blockrules/COMPrev'
ET_COMPROMISED_IPS_CONTENT_FILE = 'et_compromised_ips'
ET_COMPROMISED_IPS_REV_FILE = 'et_compromised_ips.rev'
DSHIELD_BLOCK_URL = 'https://isc.sans.edu/block.txt'
DSHIELD_BLOCK_CONTENT_FILE = 'dshield_block'
DSHIELD_BLOCK_REV_FILE = 'dshield_block.rev'
class Command(BaseCommand):
help = 'Update 3rd party rules (et and dshield)'
def handle(self, *args, **options):
self._et_compromised_ips()
self._dshield_block()
def _et_compromised_ips(self):
url = urllib2.urlopen(ET_COMPROMISED_IPS_URL)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_CONTENT_FILE), 'w')
f.write(url.read())
f.close()
url = urllib2.urlopen(ET_COMPROMISED_IPS_REV)
f = open('%s/%s' % (STORE_FOLDER, ET_COMPROMISED_IPS_REV_FILE), 'w')
f.write(url.read())
f.close()
def _dshield_block(self):
url = urllib2.urlopen(DSHIELD_BLOCK_URL)
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_CONTENT_FILE), 'w')
content = url.read()
networks = ''
for l in content.splitlines():
res = re.search('^(\d+\.\d+.\d+\.\d+)\t(\d+\.\d+.\d+\.\d+)\t(\d+)\t', l)
if res:
networks += '%s/%s\n' % (res.group(1), res.group(3))
f.write(networks)
f.close()
f = open('%s/%s' % (STORE_FOLDER, DSHIELD_BLOCK_REV_FILE), 'w')
rev = re.search('updated: (.*)\n', content, re.IGNORECASE).group(1)
f.write(rev)
f.close()
|
|
eceb4ff9f9c8cee19ebade50f7c88766a1fd7e14
|
windberg_results/cms_app.py
|
windberg_results/cms_app.py
|
from cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
from cms.apphook_pool import apphook_pool
class WindbergResultApp(CMSApp):
name = _('Result App')
urls = ['windberg_results.urls']
apphook_pool.register(WindbergResultApp)
|
Add the cms app for results
|
Add the cms app for results
|
Python
|
bsd-3-clause
|
janLo/Windberg-web,janLo/Windberg-web
|
Add the cms app for results
|
from cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
from cms.apphook_pool import apphook_pool
class WindbergResultApp(CMSApp):
name = _('Result App')
urls = ['windberg_results.urls']
apphook_pool.register(WindbergResultApp)
|
<commit_before><commit_msg>Add the cms app for results<commit_after>
|
from cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
from cms.apphook_pool import apphook_pool
class WindbergResultApp(CMSApp):
name = _('Result App')
urls = ['windberg_results.urls']
apphook_pool.register(WindbergResultApp)
|
Add the cms app for resultsfrom cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
from cms.apphook_pool import apphook_pool
class WindbergResultApp(CMSApp):
name = _('Result App')
urls = ['windberg_results.urls']
apphook_pool.register(WindbergResultApp)
|
<commit_before><commit_msg>Add the cms app for results<commit_after>from cms.app_base import CMSApp
from django.utils.translation import ugettext_lazy as _
from cms.apphook_pool import apphook_pool
class WindbergResultApp(CMSApp):
name = _('Result App')
urls = ['windberg_results.urls']
apphook_pool.register(WindbergResultApp)
|
|
4208c6e963ae59380999fae45930656c287a627d
|
nettests/core/dnsspoof.py
|
nettests/core/dnsspoof.py
|
from twisted.internet import defer
from twisted.python import usage
from scapy.all import IP, UDP, DNS, DNSQR
from ooni.templates import scapyt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [['resolver', 'r', None,
'Specify the resolver that should be used for DNS queries (ip:port)'],
['hostname', 'h', None,
'Specify the hostname of a censored site'],
['backend', 'b', '8.8.8.8:53',
'Specify the IP address of a good DNS resolver (ip:port)']
]
class DNSSpoof(scapyt.ScapyTest):
name = "DNS Spoof"
timeout = 2
usageOptions = UsageOptions
requiredOptions = ['hostname', 'resolver']
def setUp(self):
self.resolverAddr, self.resolverPort = self.localOptions['resolver'].split(':')
self.resolverPort = int(self.resolverPort)
self.controlResolverAddr, self.controlResolverPort = self.localOptions['backend'].split(':')
self.controlResolverPort = int(self.controlResolverPort)
self.hostname = self.localOptions['hostname']
def postProcessor(self, report):
"""
This is not tested, but the concept is that if the two responses
match up then spoofing is occuring.
"""
test_answer = report['test_a_lookup']['answered_packets'][0][1]
control_answer = report['test_control_a_lookup']['answered_packets'][0][1]
if test_answer[UDP] == control_answer[UDP]:
self.report['spoofing'] = True
else:
self.report['spoofing'] = False
return
@defer.inlineCallbacks
def test_a_lookup(self):
question = IP(dst=self.resolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname, self.resolverAddr, self.resolverPort))
answered, unanswered = yield self.sr1(question)
@defer.inlineCallbacks
def test_control_a_lookup(self):
question = IP(dst=self.controlResolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname,
self.controlResolverAddr, self.controlResolverPort))
answered, unanswered = yield self.sr1(question)
|
Implement test that detects DNS spoofing * To be run with a known good resolver and the default resolver of the country being tested, from inside the country with a hostname of a site that is known to be censored.
|
Implement test that detects DNS spoofing
* To be run with a known good resolver and the default resolver of the country
being tested, from inside the country with a hostname of a site that is known
to be censored.
|
Python
|
bsd-2-clause
|
0xPoly/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe
|
Implement test that detects DNS spoofing
* To be run with a known good resolver and the default resolver of the country
being tested, from inside the country with a hostname of a site that is known
to be censored.
|
from twisted.internet import defer
from twisted.python import usage
from scapy.all import IP, UDP, DNS, DNSQR
from ooni.templates import scapyt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [['resolver', 'r', None,
'Specify the resolver that should be used for DNS queries (ip:port)'],
['hostname', 'h', None,
'Specify the hostname of a censored site'],
['backend', 'b', '8.8.8.8:53',
'Specify the IP address of a good DNS resolver (ip:port)']
]
class DNSSpoof(scapyt.ScapyTest):
name = "DNS Spoof"
timeout = 2
usageOptions = UsageOptions
requiredOptions = ['hostname', 'resolver']
def setUp(self):
self.resolverAddr, self.resolverPort = self.localOptions['resolver'].split(':')
self.resolverPort = int(self.resolverPort)
self.controlResolverAddr, self.controlResolverPort = self.localOptions['backend'].split(':')
self.controlResolverPort = int(self.controlResolverPort)
self.hostname = self.localOptions['hostname']
def postProcessor(self, report):
"""
This is not tested, but the concept is that if the two responses
match up then spoofing is occuring.
"""
test_answer = report['test_a_lookup']['answered_packets'][0][1]
control_answer = report['test_control_a_lookup']['answered_packets'][0][1]
if test_answer[UDP] == control_answer[UDP]:
self.report['spoofing'] = True
else:
self.report['spoofing'] = False
return
@defer.inlineCallbacks
def test_a_lookup(self):
question = IP(dst=self.resolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname, self.resolverAddr, self.resolverPort))
answered, unanswered = yield self.sr1(question)
@defer.inlineCallbacks
def test_control_a_lookup(self):
question = IP(dst=self.controlResolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname,
self.controlResolverAddr, self.controlResolverPort))
answered, unanswered = yield self.sr1(question)
|
<commit_before><commit_msg>Implement test that detects DNS spoofing
* To be run with a known good resolver and the default resolver of the country
being tested, from inside the country with a hostname of a site that is known
to be censored.<commit_after>
|
from twisted.internet import defer
from twisted.python import usage
from scapy.all import IP, UDP, DNS, DNSQR
from ooni.templates import scapyt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [['resolver', 'r', None,
'Specify the resolver that should be used for DNS queries (ip:port)'],
['hostname', 'h', None,
'Specify the hostname of a censored site'],
['backend', 'b', '8.8.8.8:53',
'Specify the IP address of a good DNS resolver (ip:port)']
]
class DNSSpoof(scapyt.ScapyTest):
name = "DNS Spoof"
timeout = 2
usageOptions = UsageOptions
requiredOptions = ['hostname', 'resolver']
def setUp(self):
self.resolverAddr, self.resolverPort = self.localOptions['resolver'].split(':')
self.resolverPort = int(self.resolverPort)
self.controlResolverAddr, self.controlResolverPort = self.localOptions['backend'].split(':')
self.controlResolverPort = int(self.controlResolverPort)
self.hostname = self.localOptions['hostname']
def postProcessor(self, report):
"""
This is not tested, but the concept is that if the two responses
match up then spoofing is occuring.
"""
test_answer = report['test_a_lookup']['answered_packets'][0][1]
control_answer = report['test_control_a_lookup']['answered_packets'][0][1]
if test_answer[UDP] == control_answer[UDP]:
self.report['spoofing'] = True
else:
self.report['spoofing'] = False
return
@defer.inlineCallbacks
def test_a_lookup(self):
question = IP(dst=self.resolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname, self.resolverAddr, self.resolverPort))
answered, unanswered = yield self.sr1(question)
@defer.inlineCallbacks
def test_control_a_lookup(self):
question = IP(dst=self.controlResolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname,
self.controlResolverAddr, self.controlResolverPort))
answered, unanswered = yield self.sr1(question)
|
Implement test that detects DNS spoofing
* To be run with a known good resolver and the default resolver of the country
being tested, from inside the country with a hostname of a site that is known
to be censored.from twisted.internet import defer
from twisted.python import usage
from scapy.all import IP, UDP, DNS, DNSQR
from ooni.templates import scapyt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [['resolver', 'r', None,
'Specify the resolver that should be used for DNS queries (ip:port)'],
['hostname', 'h', None,
'Specify the hostname of a censored site'],
['backend', 'b', '8.8.8.8:53',
'Specify the IP address of a good DNS resolver (ip:port)']
]
class DNSSpoof(scapyt.ScapyTest):
name = "DNS Spoof"
timeout = 2
usageOptions = UsageOptions
requiredOptions = ['hostname', 'resolver']
def setUp(self):
self.resolverAddr, self.resolverPort = self.localOptions['resolver'].split(':')
self.resolverPort = int(self.resolverPort)
self.controlResolverAddr, self.controlResolverPort = self.localOptions['backend'].split(':')
self.controlResolverPort = int(self.controlResolverPort)
self.hostname = self.localOptions['hostname']
def postProcessor(self, report):
"""
This is not tested, but the concept is that if the two responses
match up then spoofing is occuring.
"""
test_answer = report['test_a_lookup']['answered_packets'][0][1]
control_answer = report['test_control_a_lookup']['answered_packets'][0][1]
if test_answer[UDP] == control_answer[UDP]:
self.report['spoofing'] = True
else:
self.report['spoofing'] = False
return
@defer.inlineCallbacks
def test_a_lookup(self):
question = IP(dst=self.resolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname, self.resolverAddr, self.resolverPort))
answered, unanswered = yield self.sr1(question)
@defer.inlineCallbacks
def test_control_a_lookup(self):
question = IP(dst=self.controlResolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname,
self.controlResolverAddr, self.controlResolverPort))
answered, unanswered = yield self.sr1(question)
|
<commit_before><commit_msg>Implement test that detects DNS spoofing
* To be run with a known good resolver and the default resolver of the country
being tested, from inside the country with a hostname of a site that is known
to be censored.<commit_after>from twisted.internet import defer
from twisted.python import usage
from scapy.all import IP, UDP, DNS, DNSQR
from ooni.templates import scapyt
from ooni.utils import log
class UsageOptions(usage.Options):
optParameters = [['resolver', 'r', None,
'Specify the resolver that should be used for DNS queries (ip:port)'],
['hostname', 'h', None,
'Specify the hostname of a censored site'],
['backend', 'b', '8.8.8.8:53',
'Specify the IP address of a good DNS resolver (ip:port)']
]
class DNSSpoof(scapyt.ScapyTest):
name = "DNS Spoof"
timeout = 2
usageOptions = UsageOptions
requiredOptions = ['hostname', 'resolver']
def setUp(self):
self.resolverAddr, self.resolverPort = self.localOptions['resolver'].split(':')
self.resolverPort = int(self.resolverPort)
self.controlResolverAddr, self.controlResolverPort = self.localOptions['backend'].split(':')
self.controlResolverPort = int(self.controlResolverPort)
self.hostname = self.localOptions['hostname']
def postProcessor(self, report):
"""
This is not tested, but the concept is that if the two responses
match up then spoofing is occuring.
"""
test_answer = report['test_a_lookup']['answered_packets'][0][1]
control_answer = report['test_control_a_lookup']['answered_packets'][0][1]
if test_answer[UDP] == control_answer[UDP]:
self.report['spoofing'] = True
else:
self.report['spoofing'] = False
return
@defer.inlineCallbacks
def test_a_lookup(self):
question = IP(dst=self.resolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname, self.resolverAddr, self.resolverPort))
answered, unanswered = yield self.sr1(question)
@defer.inlineCallbacks
def test_control_a_lookup(self):
question = IP(dst=self.controlResolverAddr)/UDP()/DNS(rd=1,
qd=DNSQR(qtype="A", qclass="IN", qname=self.hostname))
log.msg("Performing query to %s with %s:%s" % (self.hostname,
self.controlResolverAddr, self.controlResolverPort))
answered, unanswered = yield self.sr1(question)
|
|
2e245b0a28856b86aa8c73e8b2571dd9ffd7f035
|
benchmarks/bench_numpy_vjps.py
|
benchmarks/bench_numpy_vjps.py
|
from autograd import make_vjp
import autograd.numpy as np
import autograd.numpy.random as npr
dot_0 = lambda A, B, g: make_vjp(np.dot)(A, B)[0](g)
dot_1 = lambda A, B, g: make_vjp(np.dot, argnum=1)(A, B)[0](g)
dot_0_0 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_1 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_2 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_1_0 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_1 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_2 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
A = npr.randn(2, 3, 4, 5)
B = npr.randn(2, 3, 5, 4)
g = npr.randn(2, 3, 4, 2, 3, 4)
def time_dot_0():
dot_0(A, B, g)
def time_dot_1():
dot_1(A, B, g)
def time_dot_0_0():
dot_0_0(A, B, g)
def time_dot_0_1():
dot_0_1(A, B, g)
def time_dot_0_2():
dot_0_2(A, B, g)
def time_dot_1_0():
dot_1_0(A, B, g)
def time_dot_1_1():
dot_1_1(A, B, g)
def time_dot_1_2():
dot_1_2(A, B, g)
|
Add benchmarks demonstrating dot grads performance
|
Add benchmarks demonstrating dot grads performance
|
Python
|
mit
|
HIPS/autograd,hips/autograd,hips/autograd,HIPS/autograd
|
Add benchmarks demonstrating dot grads performance
|
from autograd import make_vjp
import autograd.numpy as np
import autograd.numpy.random as npr
dot_0 = lambda A, B, g: make_vjp(np.dot)(A, B)[0](g)
dot_1 = lambda A, B, g: make_vjp(np.dot, argnum=1)(A, B)[0](g)
dot_0_0 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_1 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_2 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_1_0 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_1 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_2 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
A = npr.randn(2, 3, 4, 5)
B = npr.randn(2, 3, 5, 4)
g = npr.randn(2, 3, 4, 2, 3, 4)
def time_dot_0():
dot_0(A, B, g)
def time_dot_1():
dot_1(A, B, g)
def time_dot_0_0():
dot_0_0(A, B, g)
def time_dot_0_1():
dot_0_1(A, B, g)
def time_dot_0_2():
dot_0_2(A, B, g)
def time_dot_1_0():
dot_1_0(A, B, g)
def time_dot_1_1():
dot_1_1(A, B, g)
def time_dot_1_2():
dot_1_2(A, B, g)
|
<commit_before><commit_msg>Add benchmarks demonstrating dot grads performance<commit_after>
|
from autograd import make_vjp
import autograd.numpy as np
import autograd.numpy.random as npr
dot_0 = lambda A, B, g: make_vjp(np.dot)(A, B)[0](g)
dot_1 = lambda A, B, g: make_vjp(np.dot, argnum=1)(A, B)[0](g)
dot_0_0 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_1 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_2 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_1_0 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_1 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_2 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
A = npr.randn(2, 3, 4, 5)
B = npr.randn(2, 3, 5, 4)
g = npr.randn(2, 3, 4, 2, 3, 4)
def time_dot_0():
dot_0(A, B, g)
def time_dot_1():
dot_1(A, B, g)
def time_dot_0_0():
dot_0_0(A, B, g)
def time_dot_0_1():
dot_0_1(A, B, g)
def time_dot_0_2():
dot_0_2(A, B, g)
def time_dot_1_0():
dot_1_0(A, B, g)
def time_dot_1_1():
dot_1_1(A, B, g)
def time_dot_1_2():
dot_1_2(A, B, g)
|
Add benchmarks demonstrating dot grads performancefrom autograd import make_vjp
import autograd.numpy as np
import autograd.numpy.random as npr
dot_0 = lambda A, B, g: make_vjp(np.dot)(A, B)[0](g)
dot_1 = lambda A, B, g: make_vjp(np.dot, argnum=1)(A, B)[0](g)
dot_0_0 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_1 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_2 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_1_0 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_1 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_2 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
A = npr.randn(2, 3, 4, 5)
B = npr.randn(2, 3, 5, 4)
g = npr.randn(2, 3, 4, 2, 3, 4)
def time_dot_0():
dot_0(A, B, g)
def time_dot_1():
dot_1(A, B, g)
def time_dot_0_0():
dot_0_0(A, B, g)
def time_dot_0_1():
dot_0_1(A, B, g)
def time_dot_0_2():
dot_0_2(A, B, g)
def time_dot_1_0():
dot_1_0(A, B, g)
def time_dot_1_1():
dot_1_1(A, B, g)
def time_dot_1_2():
dot_1_2(A, B, g)
|
<commit_before><commit_msg>Add benchmarks demonstrating dot grads performance<commit_after>from autograd import make_vjp
import autograd.numpy as np
import autograd.numpy.random as npr
dot_0 = lambda A, B, g: make_vjp(np.dot)(A, B)[0](g)
dot_1 = lambda A, B, g: make_vjp(np.dot, argnum=1)(A, B)[0](g)
dot_0_0 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_1 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_0_2 = lambda A, B, g: make_vjp(dot_0)(A, B, g)[0](A)
dot_1_0 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_1 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
dot_1_2 = lambda A, B, g: make_vjp(dot_1)(A, B, g)[0](B)
A = npr.randn(2, 3, 4, 5)
B = npr.randn(2, 3, 5, 4)
g = npr.randn(2, 3, 4, 2, 3, 4)
def time_dot_0():
dot_0(A, B, g)
def time_dot_1():
dot_1(A, B, g)
def time_dot_0_0():
dot_0_0(A, B, g)
def time_dot_0_1():
dot_0_1(A, B, g)
def time_dot_0_2():
dot_0_2(A, B, g)
def time_dot_1_0():
dot_1_0(A, B, g)
def time_dot_1_1():
dot_1_1(A, B, g)
def time_dot_1_2():
dot_1_2(A, B, g)
|
|
9e7a42d91b6056494edf14dd5d5089b8b3a123ba
|
tools/apikeys.py
|
tools/apikeys.py
|
import argparse, shelve
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="API key tool for PyHeufyBot.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-k", "--key", help="The API key type to modify", type=str, required=True)
parser.add_argument("-v", "--value", help="The value of the API key", type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
if "api-keys" not in storage:
storage["api-keys"] = {}
keys = storage["api-keys"]
keys[options.key] = options.value
storage["api-keys"] = keys
storage.close()
print "Key '{}' with value '{}' has been added to the API keys.".format(options.key, options.value)
|
Add a tool to manage API keys
|
Add a tool to manage API keys
|
Python
|
mit
|
Heufneutje/PyHeufyBot,Heufneutje/PyHeufyBot
|
Add a tool to manage API keys
|
import argparse, shelve
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="API key tool for PyHeufyBot.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-k", "--key", help="The API key type to modify", type=str, required=True)
parser.add_argument("-v", "--value", help="The value of the API key", type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
if "api-keys" not in storage:
storage["api-keys"] = {}
keys = storage["api-keys"]
keys[options.key] = options.value
storage["api-keys"] = keys
storage.close()
print "Key '{}' with value '{}' has been added to the API keys.".format(options.key, options.value)
|
<commit_before><commit_msg>Add a tool to manage API keys<commit_after>
|
import argparse, shelve
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="API key tool for PyHeufyBot.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-k", "--key", help="The API key type to modify", type=str, required=True)
parser.add_argument("-v", "--value", help="The value of the API key", type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
if "api-keys" not in storage:
storage["api-keys"] = {}
keys = storage["api-keys"]
keys[options.key] = options.value
storage["api-keys"] = keys
storage.close()
print "Key '{}' with value '{}' has been added to the API keys.".format(options.key, options.value)
|
Add a tool to manage API keysimport argparse, shelve
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="API key tool for PyHeufyBot.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-k", "--key", help="The API key type to modify", type=str, required=True)
parser.add_argument("-v", "--value", help="The value of the API key", type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
if "api-keys" not in storage:
storage["api-keys"] = {}
keys = storage["api-keys"]
keys[options.key] = options.value
storage["api-keys"] = keys
storage.close()
print "Key '{}' with value '{}' has been added to the API keys.".format(options.key, options.value)
|
<commit_before><commit_msg>Add a tool to manage API keys<commit_after>import argparse, shelve
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="API key tool for PyHeufyBot.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-k", "--key", help="The API key type to modify", type=str, required=True)
parser.add_argument("-v", "--value", help="The value of the API key", type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
if "api-keys" not in storage:
storage["api-keys"] = {}
keys = storage["api-keys"]
keys[options.key] = options.value
storage["api-keys"] = keys
storage.close()
print "Key '{}' with value '{}' has been added to the API keys.".format(options.key, options.value)
|
|
999f45ddb80899805293696dd2ff4627a63f73dd
|
functest/tests/unit/features/test_doctor.py
|
functest/tests/unit/features/test_doctor.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import doctor
from functest.utils import constants
class DoctorTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.doctor = doctor.Doctor()
def test_init(self):
self.assertEqual(self.doctor.project_name, "doctor")
self.assertEqual(self.doctor.case_name, "doctor-notification")
self.assertEqual(
self.doctor.repo,
constants.CONST.__getattribute__("dir_repo_doctor"))
self.assertEqual(
self.doctor.cmd,
'cd {}/tests && ./run.sh'.format(self.doctor.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for doctor
|
Add unit tests for doctor
Change-Id: Iba3c63592623623507e009b0b9f514792c82d25e
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
mywulin/functest,opnfv/functest,mywulin/functest,opnfv/functest
|
Add unit tests for doctor
Change-Id: Iba3c63592623623507e009b0b9f514792c82d25e
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import doctor
from functest.utils import constants
class DoctorTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.doctor = doctor.Doctor()
def test_init(self):
self.assertEqual(self.doctor.project_name, "doctor")
self.assertEqual(self.doctor.case_name, "doctor-notification")
self.assertEqual(
self.doctor.repo,
constants.CONST.__getattribute__("dir_repo_doctor"))
self.assertEqual(
self.doctor.cmd,
'cd {}/tests && ./run.sh'.format(self.doctor.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for doctor
Change-Id: Iba3c63592623623507e009b0b9f514792c82d25e
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import doctor
from functest.utils import constants
class DoctorTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.doctor = doctor.Doctor()
def test_init(self):
self.assertEqual(self.doctor.project_name, "doctor")
self.assertEqual(self.doctor.case_name, "doctor-notification")
self.assertEqual(
self.doctor.repo,
constants.CONST.__getattribute__("dir_repo_doctor"))
self.assertEqual(
self.doctor.cmd,
'cd {}/tests && ./run.sh'.format(self.doctor.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for doctor
Change-Id: Iba3c63592623623507e009b0b9f514792c82d25e
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import doctor
from functest.utils import constants
class DoctorTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.doctor = doctor.Doctor()
def test_init(self):
self.assertEqual(self.doctor.project_name, "doctor")
self.assertEqual(self.doctor.case_name, "doctor-notification")
self.assertEqual(
self.doctor.repo,
constants.CONST.__getattribute__("dir_repo_doctor"))
self.assertEqual(
self.doctor.cmd,
'cd {}/tests && ./run.sh'.format(self.doctor.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for doctor
Change-Id: Iba3c63592623623507e009b0b9f514792c82d25e
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import doctor
from functest.utils import constants
class DoctorTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.doctor = doctor.Doctor()
def test_init(self):
self.assertEqual(self.doctor.project_name, "doctor")
self.assertEqual(self.doctor.case_name, "doctor-notification")
self.assertEqual(
self.doctor.repo,
constants.CONST.__getattribute__("dir_repo_doctor"))
self.assertEqual(
self.doctor.cmd,
'cd {}/tests && ./run.sh'.format(self.doctor.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
1389c97b0e78e72496b62dc17a97baba7c12115a
|
knocky_code/knocky_config.py
|
knocky_code/knocky_config.py
|
#encoding: utf-8
#
# (C) 2016 José Millán Soto <jmillan@kde-espana.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
def load_config(fName):
f = open(fName)
data = pickle.load(f)
f.close()
return data
def save_config(fName, data):
f = open(fName, 'w')
pickle.dump(data, f)
f.close()
|
Add module for handling configuration files
|
Add module for handling configuration files
This module will store and load the configuration files (for now just a
dictionary which contains the users as keys and the password as values), using
pickle module.
|
Python
|
apache-2.0
|
gpul-labs/knocky
|
Add module for handling configuration files
This module will store and load the configuration files (for now just a
dictionary which contains the users as keys and the password as values), using
pickle module.
|
#encoding: utf-8
#
# (C) 2016 José Millán Soto <jmillan@kde-espana.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
def load_config(fName):
f = open(fName)
data = pickle.load(f)
f.close()
return data
def save_config(fName, data):
f = open(fName, 'w')
pickle.dump(data, f)
f.close()
|
<commit_before><commit_msg>Add module for handling configuration files
This module will store and load the configuration files (for now just a
dictionary which contains the users as keys and the password as values), using
pickle module.<commit_after>
|
#encoding: utf-8
#
# (C) 2016 José Millán Soto <jmillan@kde-espana.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
def load_config(fName):
f = open(fName)
data = pickle.load(f)
f.close()
return data
def save_config(fName, data):
f = open(fName, 'w')
pickle.dump(data, f)
f.close()
|
Add module for handling configuration files
This module will store and load the configuration files (for now just a
dictionary which contains the users as keys and the password as values), using
pickle module.#encoding: utf-8
#
# (C) 2016 José Millán Soto <jmillan@kde-espana.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
def load_config(fName):
f = open(fName)
data = pickle.load(f)
f.close()
return data
def save_config(fName, data):
f = open(fName, 'w')
pickle.dump(data, f)
f.close()
|
<commit_before><commit_msg>Add module for handling configuration files
This module will store and load the configuration files (for now just a
dictionary which contains the users as keys and the password as values), using
pickle module.<commit_after>#encoding: utf-8
#
# (C) 2016 José Millán Soto <jmillan@kde-espana.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
def load_config(fName):
f = open(fName)
data = pickle.load(f)
f.close()
return data
def save_config(fName, data):
f = open(fName, 'w')
pickle.dump(data, f)
f.close()
|
|
417196332246474b306e81c8d7d2f3a7a5065eb5
|
senic_hub/backend/subprocess_run.py
|
senic_hub/backend/subprocess_run.py
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output, CalledProcessError
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
try:
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
except CalledProcessError as e:
if check:
raise
else:
stdout_bytes = e.output
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
Fix throwing error although check arg is false
|
Fix throwing error although check arg is false
|
Python
|
mit
|
grunskis/senic-hub,grunskis/nuimo-hub-backend,grunskis/senic-hub,grunskis/senic-hub,grunskis/senic-hub,grunskis/senic-hub,grunskis/nuimo-hub-backend,getsenic/senic-hub,grunskis/nuimo-hub-backend,grunskis/nuimo-hub-backend,grunskis/senic-hub,getsenic/senic-hub,grunskis/nuimo-hub-backend
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
Fix throwing error although check arg is false
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output, CalledProcessError
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
try:
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
except CalledProcessError as e:
if check:
raise
else:
stdout_bytes = e.output
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
<commit_before>"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
<commit_msg>Fix throwing error although check arg is false<commit_after>
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output, CalledProcessError
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
try:
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
except CalledProcessError as e:
if check:
raise
else:
stdout_bytes = e.output
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
Fix throwing error although check arg is false"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output, CalledProcessError
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
try:
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
except CalledProcessError as e:
if check:
raise
else:
stdout_bytes = e.output
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
<commit_before>"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
<commit_msg>Fix throwing error although check arg is false<commit_after>"""Provides `subprocess.run()` from Python 3.5+ if available. Otherwise falls back to `subprocess.check_output()`."""
try:
from subprocess import run
except ImportError:
from collections import namedtuple
from subprocess import check_output, CalledProcessError
def run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, timeout=None, check=False,
encoding=None, errors=None):
try:
stdout_bytes = check_output(args, stdin=stdin, stderr=stderr, shell=shell, timeout=timeout)
except CalledProcessError as e:
if check:
raise
else:
stdout_bytes = e.output
Output = namedtuple('Output', ['stdout'])
return Output(stdout=stdout_bytes)
|
6d6b942056eccf3e0c69e810e9a2d66cd70d240c
|
books/services.py
|
books/services.py
|
from datetime import date
from datetime import timedelta
from django.utils import timezone
from books.models import Transaction
def get_months_transactions():
today = timezone.now()
first_day_of_a_month = date(today.year, today.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_a_month)
return qs
def get_last_months_transactions():
first_day_of_a_month = timezone.now().replace(day=1)
last_month = first_day_of_a_month - timedelta(days=1)
first_day_of_last_month = date(last_month.year, last_month.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_last_month)
return qs
def get_this_years_transactions():
today = timezone.now()
first_day_of_this_year = date(today.year, 1, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_this_year)
return qs
|
Add helper functions for domain logic
|
Add helper functions for domain logic
|
Python
|
mit
|
trimailov/finance,trimailov/finance,trimailov/finance
|
Add helper functions for domain logic
|
from datetime import date
from datetime import timedelta
from django.utils import timezone
from books.models import Transaction
def get_months_transactions():
today = timezone.now()
first_day_of_a_month = date(today.year, today.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_a_month)
return qs
def get_last_months_transactions():
first_day_of_a_month = timezone.now().replace(day=1)
last_month = first_day_of_a_month - timedelta(days=1)
first_day_of_last_month = date(last_month.year, last_month.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_last_month)
return qs
def get_this_years_transactions():
today = timezone.now()
first_day_of_this_year = date(today.year, 1, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_this_year)
return qs
|
<commit_before><commit_msg>Add helper functions for domain logic<commit_after>
|
from datetime import date
from datetime import timedelta
from django.utils import timezone
from books.models import Transaction
def get_months_transactions():
today = timezone.now()
first_day_of_a_month = date(today.year, today.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_a_month)
return qs
def get_last_months_transactions():
first_day_of_a_month = timezone.now().replace(day=1)
last_month = first_day_of_a_month - timedelta(days=1)
first_day_of_last_month = date(last_month.year, last_month.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_last_month)
return qs
def get_this_years_transactions():
today = timezone.now()
first_day_of_this_year = date(today.year, 1, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_this_year)
return qs
|
Add helper functions for domain logicfrom datetime import date
from datetime import timedelta
from django.utils import timezone
from books.models import Transaction
def get_months_transactions():
today = timezone.now()
first_day_of_a_month = date(today.year, today.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_a_month)
return qs
def get_last_months_transactions():
first_day_of_a_month = timezone.now().replace(day=1)
last_month = first_day_of_a_month - timedelta(days=1)
first_day_of_last_month = date(last_month.year, last_month.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_last_month)
return qs
def get_this_years_transactions():
today = timezone.now()
first_day_of_this_year = date(today.year, 1, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_this_year)
return qs
|
<commit_before><commit_msg>Add helper functions for domain logic<commit_after>from datetime import date
from datetime import timedelta
from django.utils import timezone
from books.models import Transaction
def get_months_transactions():
today = timezone.now()
first_day_of_a_month = date(today.year, today.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_a_month)
return qs
def get_last_months_transactions():
first_day_of_a_month = timezone.now().replace(day=1)
last_month = first_day_of_a_month - timedelta(days=1)
first_day_of_last_month = date(last_month.year, last_month.month, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_last_month)
return qs
def get_this_years_transactions():
today = timezone.now()
first_day_of_this_year = date(today.year, 1, 1)
qs = Transaction.objects.filter(created__gte=first_day_of_this_year)
return qs
|
|
c00e8e416429487fb225ba4e5506b16ea2aa9927
|
dakota-hydrotrend-uq-study.py
|
dakota-hydrotrend-uq-study.py
|
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
"""
import os
import numpy as np
from pymt.components import PolynomialChaos, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), PolynomialChaos()
experiment = {
'component': type(model).__name__,
'run_duration': 365, # days
'auxiliary_files': 'HYDRO0.HYPS', # the default Waipaoa hypsometry
'quadrature_order': 4,
'samples': 10000,
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'variance_based_decomp': True,
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
Add script for UQ experiment
|
Add script for UQ experiment
|
Python
|
mit
|
mdpiper/AGU-2016
|
Add script for UQ experiment
|
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
"""
import os
import numpy as np
from pymt.components import PolynomialChaos, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), PolynomialChaos()
experiment = {
'component': type(model).__name__,
'run_duration': 365, # days
'auxiliary_files': 'HYDRO0.HYPS', # the default Waipaoa hypsometry
'quadrature_order': 4,
'samples': 10000,
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'variance_based_decomp': True,
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
<commit_before><commit_msg>Add script for UQ experiment<commit_after>
|
"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
"""
import os
import numpy as np
from pymt.components import PolynomialChaos, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), PolynomialChaos()
experiment = {
'component': type(model).__name__,
'run_duration': 365, # days
'auxiliary_files': 'HYDRO0.HYPS', # the default Waipaoa hypsometry
'quadrature_order': 4,
'samples': 10000,
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'variance_based_decomp': True,
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
Add script for UQ experiment"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
"""
import os
import numpy as np
from pymt.components import PolynomialChaos, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), PolynomialChaos()
experiment = {
'component': type(model).__name__,
'run_duration': 365, # days
'auxiliary_files': 'HYDRO0.HYPS', # the default Waipaoa hypsometry
'quadrature_order': 4,
'samples': 10000,
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'variance_based_decomp': True,
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
<commit_before><commit_msg>Add script for UQ experiment<commit_after>"""A Dakotathon uncertainty quantification experiment with Hydrotrend.
This experiment requires a WMT executor with PyMT installed. It also
requires Dakotathon and Hydrotrend installed as CSDMS components.
"""
import os
import numpy as np
from pymt.components import PolynomialChaos, Hydrotrend
from dakotathon.utils import configure_parameters
model, dakota = Hydrotrend(), PolynomialChaos()
experiment = {
'component': type(model).__name__,
'run_duration': 365, # days
'auxiliary_files': 'HYDRO0.HYPS', # the default Waipaoa hypsometry
'quadrature_order': 4,
'samples': 10000,
'seed': 17,
'probability_levels': [0.05, 0.10, 0.33, 0.50, 0.67, 0.90, 0.95],
'variance_based_decomp': True,
'descriptors': ['starting_mean_annual_temperature',
'total_annual_precipitation'],
'variable_type': 'uniform_uncertain',
'lower_bounds': [12.8, 1.4],
'upper_bounds': [15.8, 1.8],
'response_descriptors': 'channel_exit_water_sediment~suspended__mass_flow_rate',
'response_statistics': 'median',
}
dakota_parameters, model_parameters = configure_parameters(experiment)
dakota_parameters['run_directory'] = model.setup(os.getcwd(), **model_parameters)
cfg_file = 'HYDRO.IN' # get from pymt eventually
dakota_tmpl_file = cfg_file + '.dtmpl'
os.rename(cfg_file, dakota_tmpl_file)
dakota_parameters['template_file'] = dakota_tmpl_file
dakota.setup(dakota_parameters['run_directory'], **dakota_parameters)
dakota.initialize('dakota.yaml')
dakota.update()
dakota.finalize()
|
|
0499189fb3906d3f9e5892ade07a136b0cc3a97e
|
comics/crawlers/deepfried.py
|
comics/crawlers/deepfried.py
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Deep Fried'
language = 'en'
url = 'http://www.whatisdeepfried.com/'
start_date = '2001-09-16'
history_capable_days = 14
schedule = 'Mo,Tu,We,Th,Fr,Sa'
time_zone = -5
rights = 'Jason Yungbluth'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.whatisdeepfried.com/feed/')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
|
Add crawler for 'Deep Fried'
|
Add crawler for 'Deep Fried'
|
Python
|
agpl-3.0
|
jodal/comics,datagutten/comics,datagutten/comics,datagutten/comics,jodal/comics,jodal/comics,klette/comics,klette/comics,datagutten/comics,jodal/comics,klette/comics
|
Add crawler for 'Deep Fried'
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Deep Fried'
language = 'en'
url = 'http://www.whatisdeepfried.com/'
start_date = '2001-09-16'
history_capable_days = 14
schedule = 'Mo,Tu,We,Th,Fr,Sa'
time_zone = -5
rights = 'Jason Yungbluth'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.whatisdeepfried.com/feed/')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
|
<commit_before><commit_msg>Add crawler for 'Deep Fried'<commit_after>
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Deep Fried'
language = 'en'
url = 'http://www.whatisdeepfried.com/'
start_date = '2001-09-16'
history_capable_days = 14
schedule = 'Mo,Tu,We,Th,Fr,Sa'
time_zone = -5
rights = 'Jason Yungbluth'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.whatisdeepfried.com/feed/')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
|
Add crawler for 'Deep Fried'from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Deep Fried'
language = 'en'
url = 'http://www.whatisdeepfried.com/'
start_date = '2001-09-16'
history_capable_days = 14
schedule = 'Mo,Tu,We,Th,Fr,Sa'
time_zone = -5
rights = 'Jason Yungbluth'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.whatisdeepfried.com/feed/')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
|
<commit_before><commit_msg>Add crawler for 'Deep Fried'<commit_after>from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Deep Fried'
language = 'en'
url = 'http://www.whatisdeepfried.com/'
start_date = '2001-09-16'
history_capable_days = 14
schedule = 'Mo,Tu,We,Th,Fr,Sa'
time_zone = -5
rights = 'Jason Yungbluth'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.parse_feed('http://www.whatisdeepfried.com/feed/')
for entry in self.feed.entries:
if self.timestamp_to_date(entry.updated_parsed) == self.pub_date:
self.title = entry.title
pieces = entry.summary.split('"')
for i, piece in enumerate(pieces):
if piece.count('src='):
self.url = pieces[i + 1]
return
|
|
4a8d8e8e13d0ca685608532159868fc453df12ca
|
login/migrations/0017_auto_20181119_1631.py
|
login/migrations/0017_auto_20181119_1631.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20181018_1401'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='is_native',
field=models.BooleanField(default=True),
),
]
|
Migrate default for is_native to database
|
Migrate default for is_native to database
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
|
Migrate default for is_native to database
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20181018_1401'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='is_native',
field=models.BooleanField(default=True),
),
]
|
<commit_before><commit_msg>Migrate default for is_native to database<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20181018_1401'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='is_native',
field=models.BooleanField(default=True),
),
]
|
Migrate default for is_native to database# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20181018_1401'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='is_native',
field=models.BooleanField(default=True),
),
]
|
<commit_before><commit_msg>Migrate default for is_native to database<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-11-19 15:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20181018_1401'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='is_native',
field=models.BooleanField(default=True),
),
]
|
|
ecf09e199a1dc03eaa7ad58f77865fd4d2de5ba3
|
tests/test_io.py
|
tests/test_io.py
|
"""
Tests for the BSE IO functions
"""
# Most functionality is covered under other tests.
# This tests the remainder
import bse
import os
import pytest
_data_dir = bse.default_data_dir
@pytest.mark.parametrize('file_path', [
'CC-PVDZ.0.table.json',
'CRENBL.0.table.json',
'dunning/CC-PVDZ.0.element.json',
'crenb/CRENBL.0.element.json',
'dunning/CC-PVDZ_dunning1989a.0.json',
'crenb/CRENBL_ross1994a.0.json',
'crenb/CRENBL-ECP_ross1994a.0.json'
])
def test_read_write_basis(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_json_basis(full_path)
bse.io.write_json_basis(full_path_new, data)
os.remove(full_path_new)
@pytest.mark.parametrize('file_path', [
'REFERENCES.json'
])
def test_read_write_references(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_references(full_path)
bse.io.write_references(full_path_new, data)
os.remove(full_path_new)
|
Test writing of json files
|
Test writing of json files
|
Python
|
bsd-3-clause
|
MOLSSI-BSE/basis_set_exchange
|
Test writing of json files
|
"""
Tests for the BSE IO functions
"""
# Most functionality is covered under other tests.
# This tests the remainder
import bse
import os
import pytest
_data_dir = bse.default_data_dir
@pytest.mark.parametrize('file_path', [
'CC-PVDZ.0.table.json',
'CRENBL.0.table.json',
'dunning/CC-PVDZ.0.element.json',
'crenb/CRENBL.0.element.json',
'dunning/CC-PVDZ_dunning1989a.0.json',
'crenb/CRENBL_ross1994a.0.json',
'crenb/CRENBL-ECP_ross1994a.0.json'
])
def test_read_write_basis(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_json_basis(full_path)
bse.io.write_json_basis(full_path_new, data)
os.remove(full_path_new)
@pytest.mark.parametrize('file_path', [
'REFERENCES.json'
])
def test_read_write_references(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_references(full_path)
bse.io.write_references(full_path_new, data)
os.remove(full_path_new)
|
<commit_before><commit_msg>Test writing of json files<commit_after>
|
"""
Tests for the BSE IO functions
"""
# Most functionality is covered under other tests.
# This tests the remainder
import bse
import os
import pytest
_data_dir = bse.default_data_dir
@pytest.mark.parametrize('file_path', [
'CC-PVDZ.0.table.json',
'CRENBL.0.table.json',
'dunning/CC-PVDZ.0.element.json',
'crenb/CRENBL.0.element.json',
'dunning/CC-PVDZ_dunning1989a.0.json',
'crenb/CRENBL_ross1994a.0.json',
'crenb/CRENBL-ECP_ross1994a.0.json'
])
def test_read_write_basis(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_json_basis(full_path)
bse.io.write_json_basis(full_path_new, data)
os.remove(full_path_new)
@pytest.mark.parametrize('file_path', [
'REFERENCES.json'
])
def test_read_write_references(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_references(full_path)
bse.io.write_references(full_path_new, data)
os.remove(full_path_new)
|
Test writing of json files"""
Tests for the BSE IO functions
"""
# Most functionality is covered under other tests.
# This tests the remainder
import bse
import os
import pytest
_data_dir = bse.default_data_dir
@pytest.mark.parametrize('file_path', [
'CC-PVDZ.0.table.json',
'CRENBL.0.table.json',
'dunning/CC-PVDZ.0.element.json',
'crenb/CRENBL.0.element.json',
'dunning/CC-PVDZ_dunning1989a.0.json',
'crenb/CRENBL_ross1994a.0.json',
'crenb/CRENBL-ECP_ross1994a.0.json'
])
def test_read_write_basis(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_json_basis(full_path)
bse.io.write_json_basis(full_path_new, data)
os.remove(full_path_new)
@pytest.mark.parametrize('file_path', [
'REFERENCES.json'
])
def test_read_write_references(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_references(full_path)
bse.io.write_references(full_path_new, data)
os.remove(full_path_new)
|
<commit_before><commit_msg>Test writing of json files<commit_after>"""
Tests for the BSE IO functions
"""
# Most functionality is covered under other tests.
# This tests the remainder
import bse
import os
import pytest
_data_dir = bse.default_data_dir
@pytest.mark.parametrize('file_path', [
'CC-PVDZ.0.table.json',
'CRENBL.0.table.json',
'dunning/CC-PVDZ.0.element.json',
'crenb/CRENBL.0.element.json',
'dunning/CC-PVDZ_dunning1989a.0.json',
'crenb/CRENBL_ross1994a.0.json',
'crenb/CRENBL-ECP_ross1994a.0.json'
])
def test_read_write_basis(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_json_basis(full_path)
bse.io.write_json_basis(full_path_new, data)
os.remove(full_path_new)
@pytest.mark.parametrize('file_path', [
'REFERENCES.json'
])
def test_read_write_references(file_path):
# needed to be tested to make sure something isn't left
# out of the sort lists, etc
full_path = os.path.join(_data_dir, file_path)
full_path_new = full_path + '.new'
data = bse.io.read_references(full_path)
bse.io.write_references(full_path_new, data)
os.remove(full_path_new)
|
|
200a9008512ba644e2ae773745d3c8896f390eee
|
dataactcore/migrations/versions/a7249e2d8a1a_wipe_tas_lookups.py
|
dataactcore/migrations/versions/a7249e2d8a1a_wipe_tas_lookups.py
|
"""Wipe TAS lookups: one-off data migration to delete existing TASLookups
Revision ID: a7249e2d8a1a
Revises: 0c857b50962a
Create Date: 2016-10-18 19:58:19.837713
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a7249e2d8a1a'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.execute('DELETE FROM tas_lookup')
def downgrade_data_broker():
pass
|
Add data migration for removing TAS entries
|
Add data migration for removing TAS entries
We won't be clearing the whole TAS table when importing in a later commit, so
drop all the data one last time.
|
Python
|
cc0-1.0
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend,fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend
|
Add data migration for removing TAS entries
We won't be clearing the whole TAS table when importing in a later commit, so
drop all the data one last time.
|
"""Wipe TAS lookups: one-off data migration to delete existing TASLookups
Revision ID: a7249e2d8a1a
Revises: 0c857b50962a
Create Date: 2016-10-18 19:58:19.837713
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a7249e2d8a1a'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.execute('DELETE FROM tas_lookup')
def downgrade_data_broker():
pass
|
<commit_before><commit_msg>Add data migration for removing TAS entries
We won't be clearing the whole TAS table when importing in a later commit, so
drop all the data one last time.<commit_after>
|
"""Wipe TAS lookups: one-off data migration to delete existing TASLookups
Revision ID: a7249e2d8a1a
Revises: 0c857b50962a
Create Date: 2016-10-18 19:58:19.837713
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a7249e2d8a1a'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.execute('DELETE FROM tas_lookup')
def downgrade_data_broker():
pass
|
Add data migration for removing TAS entries
We won't be clearing the whole TAS table when importing in a later commit, so
drop all the data one last time."""Wipe TAS lookups: one-off data migration to delete existing TASLookups
Revision ID: a7249e2d8a1a
Revises: 0c857b50962a
Create Date: 2016-10-18 19:58:19.837713
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a7249e2d8a1a'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.execute('DELETE FROM tas_lookup')
def downgrade_data_broker():
pass
|
<commit_before><commit_msg>Add data migration for removing TAS entries
We won't be clearing the whole TAS table when importing in a later commit, so
drop all the data one last time.<commit_after>"""Wipe TAS lookups: one-off data migration to delete existing TASLookups
Revision ID: a7249e2d8a1a
Revises: 0c857b50962a
Create Date: 2016-10-18 19:58:19.837713
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'a7249e2d8a1a'
down_revision = '0c857b50962a'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
op.execute('DELETE FROM tas_lookup')
def downgrade_data_broker():
pass
|
|
b7d3d0289c605c6174a7605fcc700945a48cab23
|
lino_extjs6/extjs6/models.py
|
lino_extjs6/extjs6/models.py
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""Database models for :mod:`extjs6`.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.users.models import Users, UserDetail
from django.db import models
EXTJS6_THEMES_CHOICES = (
("theme-classic", "Theme classic"),
("theme-aria", "Theme Aria"),
("theme-classic", "Theme Classic"),
("theme-classic-sandbox", "Theme Classic Sandbox"),
("theme-crisp", "Theme Crisp"),
("theme-crisp-touch", "Theme crisp touch"),
("theme-gray", "Theme gray"),
("theme-neptune", "Theme neptune"),
("theme-neptune-touch", "Theme neptune touch"),
("theme-triton", "Theme triton"),
)
dd.inject_field(
'users.User', 'prefered_theme',
models.CharField(_("Prefered theme"), choices=EXTJS6_THEMES_CHOICES, default="", blank=True, max_length=25))
class ThemedUserDetail(UserDetail):
box1 = """
username profile:20 partner
first_name last_name initials
email language timezone prefered_theme
id created modified
"""
Users.set_detail_layout(ThemedUserDetail())
|
Add the prefered_theme field for User model
|
Add the prefered_theme field for User model
|
Python
|
agpl-3.0
|
lsaffre/lino_extjs6,lsaffre/lino_extjs6,lsaffre/lino_extjs6,lsaffre/lino_extjs6,lino-framework/extjs6,lsaffre/lino_extjs6,lino-framework/extjs6,lino-framework/extjs6
|
Add the prefered_theme field for User model
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""Database models for :mod:`extjs6`.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.users.models import Users, UserDetail
from django.db import models
EXTJS6_THEMES_CHOICES = (
("theme-classic", "Theme classic"),
("theme-aria", "Theme Aria"),
("theme-classic", "Theme Classic"),
("theme-classic-sandbox", "Theme Classic Sandbox"),
("theme-crisp", "Theme Crisp"),
("theme-crisp-touch", "Theme crisp touch"),
("theme-gray", "Theme gray"),
("theme-neptune", "Theme neptune"),
("theme-neptune-touch", "Theme neptune touch"),
("theme-triton", "Theme triton"),
)
dd.inject_field(
'users.User', 'prefered_theme',
models.CharField(_("Prefered theme"), choices=EXTJS6_THEMES_CHOICES, default="", blank=True, max_length=25))
class ThemedUserDetail(UserDetail):
box1 = """
username profile:20 partner
first_name last_name initials
email language timezone prefered_theme
id created modified
"""
Users.set_detail_layout(ThemedUserDetail())
|
<commit_before><commit_msg>Add the prefered_theme field for User model<commit_after>
|
# -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""Database models for :mod:`extjs6`.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.users.models import Users, UserDetail
from django.db import models
EXTJS6_THEMES_CHOICES = (
("theme-classic", "Theme classic"),
("theme-aria", "Theme Aria"),
("theme-classic", "Theme Classic"),
("theme-classic-sandbox", "Theme Classic Sandbox"),
("theme-crisp", "Theme Crisp"),
("theme-crisp-touch", "Theme crisp touch"),
("theme-gray", "Theme gray"),
("theme-neptune", "Theme neptune"),
("theme-neptune-touch", "Theme neptune touch"),
("theme-triton", "Theme triton"),
)
dd.inject_field(
'users.User', 'prefered_theme',
models.CharField(_("Prefered theme"), choices=EXTJS6_THEMES_CHOICES, default="", blank=True, max_length=25))
class ThemedUserDetail(UserDetail):
box1 = """
username profile:20 partner
first_name last_name initials
email language timezone prefered_theme
id created modified
"""
Users.set_detail_layout(ThemedUserDetail())
|
Add the prefered_theme field for User model# -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""Database models for :mod:`extjs6`.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.users.models import Users, UserDetail
from django.db import models
EXTJS6_THEMES_CHOICES = (
("theme-classic", "Theme classic"),
("theme-aria", "Theme Aria"),
("theme-classic", "Theme Classic"),
("theme-classic-sandbox", "Theme Classic Sandbox"),
("theme-crisp", "Theme Crisp"),
("theme-crisp-touch", "Theme crisp touch"),
("theme-gray", "Theme gray"),
("theme-neptune", "Theme neptune"),
("theme-neptune-touch", "Theme neptune touch"),
("theme-triton", "Theme triton"),
)
dd.inject_field(
'users.User', 'prefered_theme',
models.CharField(_("Prefered theme"), choices=EXTJS6_THEMES_CHOICES, default="", blank=True, max_length=25))
class ThemedUserDetail(UserDetail):
box1 = """
username profile:20 partner
first_name last_name initials
email language timezone prefered_theme
id created modified
"""
Users.set_detail_layout(ThemedUserDetail())
|
<commit_before><commit_msg>Add the prefered_theme field for User model<commit_after># -*- coding: UTF-8 -*-
# Copyright 2015-2016 Luc Saffre
# License: BSD (see file COPYING for details)
"""Database models for :mod:`extjs6`.
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino.modlib.users.models import Users, UserDetail
from django.db import models
EXTJS6_THEMES_CHOICES = (
("theme-classic", "Theme classic"),
("theme-aria", "Theme Aria"),
("theme-classic", "Theme Classic"),
("theme-classic-sandbox", "Theme Classic Sandbox"),
("theme-crisp", "Theme Crisp"),
("theme-crisp-touch", "Theme crisp touch"),
("theme-gray", "Theme gray"),
("theme-neptune", "Theme neptune"),
("theme-neptune-touch", "Theme neptune touch"),
("theme-triton", "Theme triton"),
)
dd.inject_field(
'users.User', 'prefered_theme',
models.CharField(_("Prefered theme"), choices=EXTJS6_THEMES_CHOICES, default="", blank=True, max_length=25))
class ThemedUserDetail(UserDetail):
box1 = """
username profile:20 partner
first_name last_name initials
email language timezone prefered_theme
id created modified
"""
Users.set_detail_layout(ThemedUserDetail())
|
|
bb6efe4170a9ddb144d09212ab5bf06b0d043f3d
|
judge/views/user.py
|
judge/views/user.py
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.long_display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
Use long display name on profile
|
Use long display name on profile
|
Python
|
agpl-3.0
|
DMOJ/site,Minkov/site,Phoenix1369/site,Minkov/site,monouno/site,DMOJ/site,monouno/site,DMOJ/site,DMOJ/site,Minkov/site,monouno/site,Phoenix1369/site,Minkov/site,Phoenix1369/site,Phoenix1369/site,monouno/site,monouno/site
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
Use long display name on profile
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.long_display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
<commit_before>from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
<commit_msg>Use long display name on profile<commit_after>
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.long_display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
Use long display name on profilefrom django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.long_display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
<commit_before>from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
<commit_msg>Use long display name on profile<commit_after>from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from judge.forms import ProfileForm
from judge.models import Profile
def user(request, user):
try:
user = Profile.objects.get(user__username=user)
return render_to_response('user.html', {'user': user, 'title': 'User %s' % user.long_display_name()},
context_instance=RequestContext(request))
except ObjectDoesNotExist:
return Http404()
@login_required
def edit_profile(request):
profile = Profile.objects.get(user=request.user)
if request.method == 'POST':
form = ProfileForm(request.POST, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(request.path)
else:
form = ProfileForm(instance=profile)
return render_to_response('edit_profile.html', {'form': form, 'title': 'Edit profile'},
context_instance=RequestContext(request))
def users(request):
return render_to_response('users.html', {'users': Profile.objects.all(), 'title': 'Users'},
context_instance=RequestContext(request))
|
23962420a7bbdcdeaec500f94b4c5f19a16f63dc
|
patch_analyzer/patch_utils.py
|
patch_analyzer/patch_utils.py
|
from subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patch))
return list(set(classes))
|
Patch utils to get content from patch jars
|
Patch utils to get content from patch jars
|
Python
|
mit
|
alepulver/changesets
|
Patch utils to get content from patch jars
|
from subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patch))
return list(set(classes))
|
<commit_before><commit_msg>Patch utils to get content from patch jars<commit_after>
|
from subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patch))
return list(set(classes))
|
Patch utils to get content from patch jarsfrom subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patch))
return list(set(classes))
|
<commit_before><commit_msg>Patch utils to get content from patch jars<commit_after>from subprocess import Popen, PIPE
def is_class(file):
return file.endswith(".class") and not file.startswith("META-INF")
def modified_paths(patch):
p = Popen(["jar", "-tf", patch], stdout=PIPE)
output, _ = p.communicate()
return filter(is_class, [file.decode() for file in output.split(b"\n")])
def path_to_class(path):
if "$" in path:
return path.split("$")[0]
else:
return path.replace(".class", "")
def modified_classes(patch):
classes = map(path_to_class, modified_paths(patch))
return list(set(classes))
|
|
596692a033b7b9e76d7de176564abc640b647d95
|
confluent/auth.py
|
confluent/auth.py
|
# authentication and authorization routines for confluent
import confluent.config as config
def authorize(name, element):
#TODO: actually use the element to ascertain if this user is good enough
try:
if '/' in name:
tenant, user = name.split('/', 1)
tenant = config.get_tenant_id(tenant)
user = config.get_user(user, tenant)
elif name in config.get_tenant_names():
tenant = config.get_tenant_id(name)
user = config.get_user(name, tenant)
else:
user = config.get_user(name, 0)
tenant = 0
return (tenant, user)
except:
print "uh oh"
return None
|
Put missing file under git tracking
|
Put missing file under git tracking
|
Python
|
apache-2.0
|
jufm/confluent,jjohnson42/confluent,chenglch/confluent,xcat2/confluent,chenglch/confluent,michaelfardu/thinkconfluent,michaelfardu/thinkconfluent,whowutwut/confluent,michaelfardu/thinkconfluent,jjohnson42/confluent,xcat2/confluent,whowutwut/confluent,jufm/confluent,xcat2/confluent,michaelfardu/thinkconfluent,chenglch/confluent,jufm/confluent,jjohnson42/confluent,jjohnson42/confluent,xcat2/confluent,whowutwut/confluent,whowutwut/confluent,xcat2/confluent,chenglch/confluent,chenglch/confluent,jjohnson42/confluent,michaelfardu/thinkconfluent,jufm/confluent,jufm/confluent
|
Put missing file under git tracking
|
# authentication and authorization routines for confluent
import confluent.config as config
def authorize(name, element):
#TODO: actually use the element to ascertain if this user is good enough
try:
if '/' in name:
tenant, user = name.split('/', 1)
tenant = config.get_tenant_id(tenant)
user = config.get_user(user, tenant)
elif name in config.get_tenant_names():
tenant = config.get_tenant_id(name)
user = config.get_user(name, tenant)
else:
user = config.get_user(name, 0)
tenant = 0
return (tenant, user)
except:
print "uh oh"
return None
|
<commit_before><commit_msg>Put missing file under git tracking<commit_after>
|
# authentication and authorization routines for confluent
import confluent.config as config
def authorize(name, element):
#TODO: actually use the element to ascertain if this user is good enough
try:
if '/' in name:
tenant, user = name.split('/', 1)
tenant = config.get_tenant_id(tenant)
user = config.get_user(user, tenant)
elif name in config.get_tenant_names():
tenant = config.get_tenant_id(name)
user = config.get_user(name, tenant)
else:
user = config.get_user(name, 0)
tenant = 0
return (tenant, user)
except:
print "uh oh"
return None
|
Put missing file under git tracking# authentication and authorization routines for confluent
import confluent.config as config
def authorize(name, element):
#TODO: actually use the element to ascertain if this user is good enough
try:
if '/' in name:
tenant, user = name.split('/', 1)
tenant = config.get_tenant_id(tenant)
user = config.get_user(user, tenant)
elif name in config.get_tenant_names():
tenant = config.get_tenant_id(name)
user = config.get_user(name, tenant)
else:
user = config.get_user(name, 0)
tenant = 0
return (tenant, user)
except:
print "uh oh"
return None
|
<commit_before><commit_msg>Put missing file under git tracking<commit_after># authentication and authorization routines for confluent
import confluent.config as config
def authorize(name, element):
#TODO: actually use the element to ascertain if this user is good enough
try:
if '/' in name:
tenant, user = name.split('/', 1)
tenant = config.get_tenant_id(tenant)
user = config.get_user(user, tenant)
elif name in config.get_tenant_names():
tenant = config.get_tenant_id(name)
user = config.get_user(name, tenant)
else:
user = config.get_user(name, 0)
tenant = 0
return (tenant, user)
except:
print "uh oh"
return None
|
|
edb8caff755853443cfd94cba6ef2e949ae47900
|
knights/django.py
|
knights/django.py
|
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in params.get('DIRS', []):
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self template_name):
tmpl = loader.load_template(template_name)
if tmpl is None:
raise TemplateDoesNotExist(template_name)
return Teplate(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template()(context)
|
Add Django 1.8 engine wrapper
|
Add Django 1.8 engine wrapper
|
Python
|
mit
|
funkybob/knights-templater,funkybob/knights-templater
|
Add Django 1.8 engine wrapper
|
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in params.get('DIRS', []):
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self template_name):
tmpl = loader.load_template(template_name)
if tmpl is None:
raise TemplateDoesNotExist(template_name)
return Teplate(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template()(context)
|
<commit_before><commit_msg>Add Django 1.8 engine wrapper<commit_after>
|
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in params.get('DIRS', []):
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self template_name):
tmpl = loader.load_template(template_name)
if tmpl is None:
raise TemplateDoesNotExist(template_name)
return Teplate(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template()(context)
|
Add Django 1.8 engine wrapperfrom django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in params.get('DIRS', []):
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self template_name):
tmpl = loader.load_template(template_name)
if tmpl is None:
raise TemplateDoesNotExist(template_name)
return Teplate(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template()(context)
|
<commit_before><commit_msg>Add Django 1.8 engine wrapper<commit_after>from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.base import BaseEngine
from django.template.backends.utils import csrf_input_lazy, csrf_token_lazy
from . import compiler
from . import loader
class KnightsTemplater(BaseEngine):
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(KnightsTemplater, self).__init__(params)
for path in params.get('DIRS', []):
loader.add_path(path)
def from_string(self, template_code):
tmpl = compiler.kompile(template_code)
return Template(tmpl)
def get_template(self template_name):
tmpl = loader.load_template(template_name)
if tmpl is None:
raise TemplateDoesNotExist(template_name)
return Teplate(tmpl)
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template()(context)
|
|
88fc0bca84ea1a66e941b094c7a21ece8b97c616
|
python/gegl/pygobject_introspection/test.py
|
python/gegl/pygobject_introspection/test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
Add a snippet (Python GEGL).
|
Add a snippet (Python GEGL).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python GEGL).
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python GEGL).<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
Add a snippet (Python GEGL).#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python GEGL).<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
A simple GEGL snippet with the Gobject automatic introspection method.
This inverts the colors of a PNG file.
BUG: GEGL has been built without introspection on Debian 8.
See https://github.com/jsbueno/python-gegl/issues/2
See: http://linuxfr.org/news/gegl-0-3-0-et-babl-0-1-12-sont-de-sortie
http://gegl.org/operations.html#Gegl
http://gegl.org/operations.html#GEGL%20operations
https://github.com/jsbueno/python-gegl/blob/master/snippets.py
https://github.com/jsbueno/python-gegl
Debian dependencies: libgegl-dev (?)
"""
import argparse
from gi.repository import Gegl as gegl
def main():
# Parse options
parser = argparse.ArgumentParser(description='An argparse snippet.')
parser.add_argument("--infile", "-i", help="the input file", required=True, metavar="STRING")
parser.add_argument("--outfile", "-o", help="the output file", required=True, metavar="STRING")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
# GEGL ######################################
gegl.init([])
#print(gegl.list_operations())
# Make nodes
node1 = gegl.Node()
node2 = gegl.Node() # png-load
node3 = gegl.Node() # invert
node4 = gegl.Node() # png-save
# Set properties
node2.set_property("operation", "gegl:png-load")
node2.set_property("path", infile)
node3.set_property("operation", "gegl:invert")
node4.set_property("operation", "gegl:png-save")
node4.set_property("path", outfile)
# Make the graph
node1.add_child(node2)
node1.add_child(node3)
node1.add_child(node4)
node2.connect_to("output", node3, "input")
node3.connect_to("output", node4, "input")
# Process
node4.process()
if __name__ == '__main__':
main()
|
|
c8b47062c76ebe77b8cf441107f97f3c2ec3f0ac
|
django/hello/world/models.py
|
django/hello/world/models.py
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'world'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'fortune'
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'World'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'Fortune'
|
Fix table name for MySQL
|
Fix table name for MySQL
|
Python
|
bsd-3-clause
|
greg-hellings/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sxend/FrameworkBenchmarks,methane/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,valyala/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,leafo/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,actframework/FrameworkBenchmarks,testn/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zloster/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,methane/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,grob/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sgml/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,doom369/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sxend/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,testn/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,valyala/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,actframework/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,testn/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,torhve/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,sgml/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zloster/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,torhve/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Verber/FrameworkBenchmarks,herloct/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,herloct/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,valyala/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zapov/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zapov/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zapov/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Verber/FrameworkBenchmarks,methane/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zloster/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,testn/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,herloct/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,torhve/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,testn/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,actframework/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jamming/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,herloct/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,valyala/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,valyala/FrameworkBenchmarks,doom369/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,testn/FrameworkBenchmarks,testn/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sxend/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,valyala/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,sgml/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,herloct/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,joshk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,dmacd/FB-try1,RockinRoel/FrameworkBenchmarks,jamming/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,dmacd/FB-try1,nkasvosve/FrameworkBenchmarks,sgml/FrameworkBenchmarks,torhve/FrameworkBenchmarks,leafo/FrameworkBenchmarks,sxend/FrameworkBenchmarks,doom369/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zloster/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zloster/FrameworkBenchmarks,grob/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,sgml/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sgml/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jamming/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,testn/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,grob/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,herloct/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,torhve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,zapov/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Verber/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,doom369/FrameworkBenchmarks,grob/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sxend/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,dmacd/FB-try1,Jesterovskiy/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,leafo/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,dmacd/FB-try1,zloster/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,actframework/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,grob/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,methane/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,zloster/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,valyala/FrameworkBenchmarks,methane/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zloster/FrameworkBenchmarks,denkab/FrameworkBenchmarks,joshk/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,denkab/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,testn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zloster/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,khellang/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sgml/FrameworkBenchmarks,sgml/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zloster/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,leafo/FrameworkBenchmarks,jamming/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zapov/FrameworkBenchmarks,khellang/FrameworkBenchmarks,khellang/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,actframework/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,valyala/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,torhve/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zloster/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,khellang/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,dmacd/FB-try1,PermeAgility/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sxend/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zapov/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,leafo/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jamming/FrameworkBenchmarks,sgml/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,methane/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,valyala/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,doom369/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,doom369/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Verber/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,grob/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,doom369/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zloster/FrameworkBenchmarks,denkab/FrameworkBenchmarks,herloct/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jamming/FrameworkBenchmarks,khellang/FrameworkBenchmarks,sxend/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,methane/FrameworkBenchmarks,torhve/FrameworkBenchmarks,doom369/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,methane/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,leafo/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,grob/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,joshk/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,leafo/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,methane/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,dmacd/FB-try1,methane/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,denkab/FrameworkBenchmarks,dmacd/FB-try1,zapov/FrameworkBenchmarks,torhve/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,zapov/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,actframework/FrameworkBenchmarks,dmacd/FB-try1,k-r-g/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,testn/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jamming/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sxend/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,denkab/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,doom369/FrameworkBenchmarks,valyala/FrameworkBenchmarks,khellang/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sgml/FrameworkBenchmarks,grob/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,dmacd/FB-try1,zloster/FrameworkBenchmarks,zapov/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,grob/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,doom369/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,actframework/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,dmacd/FB-try1,martin-g/FrameworkBenchmarks,denkab/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,sgml/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,khellang/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,testn/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,methane/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,dmacd/FB-try1,saturday06/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,methane/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,khellang/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zloster/FrameworkBenchmarks,grob/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,methane/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,sgml/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,testn/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,herloct/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jamming/FrameworkBenchmarks,methane/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,joshk/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,joshk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,denkab/FrameworkBenchmarks,joshk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,grob/FrameworkBenchmarks,torhve/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,sxend/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,herloct/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,torhve/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,leafo/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Verber/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,khellang/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Verber/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,denkab/FrameworkBenchmarks,zloster/FrameworkBenchmarks,herloct/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,denkab/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,zloster/FrameworkBenchmarks,testn/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,leafo/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Verber/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,grob/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,joshk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,herloct/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,joshk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,leafo/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sxend/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,actframework/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,joshk/FrameworkBenchmarks,dmacd/FB-try1,diablonhn/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,grob/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,joshk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,leafo/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,testn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zloster/FrameworkBenchmarks,khellang/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,leafo/FrameworkBenchmarks,saturday06/FrameworkBenchmarks
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'world'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'fortune'
Fix table name for MySQL
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'World'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'Fortune'
|
<commit_before>from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'world'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'fortune'
<commit_msg>Fix table name for MySQL<commit_after>
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'World'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'Fortune'
|
from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'world'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'fortune'
Fix table name for MySQLfrom django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'World'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'Fortune'
|
<commit_before>from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'world'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'fortune'
<commit_msg>Fix table name for MySQL<commit_after>from django.db import models
# Create your models here.
class World(models.Model):
randomnumber = models.IntegerField()
class Meta:
db_table = 'World'
class Fortune(models.Model):
message = models.CharField(max_length=65535)
class Meta:
db_table = 'Fortune'
|
0802859bd579fd93c3d29e33ea84e3796f685163
|
set_building.py
|
set_building.py
|
import sys
import json
import time
from sklearn.cluster import KMeans
num_words = int(sys.argv[1])
num_clusters = int(sys.argv[2])
f = open("google-10000-vectors.json","r").read().split("\n")
word_list = []
vector_list = []
for i in range(0,num_words):
j = json.loads(f[i])
word_list.append(j[0])
vector_list.append(j[1])
old_time = time.time()
kmeans_clust = KMeans(n_clusters = num_clusters)
idx = kmeans_clust.fit_predict(vector_list)
new_time = time.time()
print("Time taken for clustering", new_time - old_time)
k=[[] for x in range(num_clusters)]
word_centroid_map = dict(zip(word_list,idx))
for word in word_centroid_map.keys():
k[word_centroid_map[word]].append(word)
open("Kmeans-"+str(num_words)+"words-"+str(num_clusters)+"clusters.json","w").write(json.dumps(k))
|
Set Building Implemented * word2vec vectors generated from vocabulary file * KMeans clustering employed for grouping semantically similar vectors * storing as seperate files with group of similar sets
|
Set Building Implemented
* word2vec vectors generated from vocabulary file
* KMeans clustering employed for grouping semantically similar vectors
* storing as seperate files with group of similar sets
|
Python
|
mit
|
iisc-sa-open/trsl,Jaiswal-ruhil/trsl,iisc-sa-open/trsl
|
Set Building Implemented
* word2vec vectors generated from vocabulary file
* KMeans clustering employed for grouping semantically similar vectors
* storing as seperate files with group of similar sets
|
import sys
import json
import time
from sklearn.cluster import KMeans
num_words = int(sys.argv[1])
num_clusters = int(sys.argv[2])
f = open("google-10000-vectors.json","r").read().split("\n")
word_list = []
vector_list = []
for i in range(0,num_words):
j = json.loads(f[i])
word_list.append(j[0])
vector_list.append(j[1])
old_time = time.time()
kmeans_clust = KMeans(n_clusters = num_clusters)
idx = kmeans_clust.fit_predict(vector_list)
new_time = time.time()
print("Time taken for clustering", new_time - old_time)
k=[[] for x in range(num_clusters)]
word_centroid_map = dict(zip(word_list,idx))
for word in word_centroid_map.keys():
k[word_centroid_map[word]].append(word)
open("Kmeans-"+str(num_words)+"words-"+str(num_clusters)+"clusters.json","w").write(json.dumps(k))
|
<commit_before><commit_msg>Set Building Implemented
* word2vec vectors generated from vocabulary file
* KMeans clustering employed for grouping semantically similar vectors
* storing as seperate files with group of similar sets<commit_after>
|
import sys
import json
import time
from sklearn.cluster import KMeans
num_words = int(sys.argv[1])
num_clusters = int(sys.argv[2])
f = open("google-10000-vectors.json","r").read().split("\n")
word_list = []
vector_list = []
for i in range(0,num_words):
j = json.loads(f[i])
word_list.append(j[0])
vector_list.append(j[1])
old_time = time.time()
kmeans_clust = KMeans(n_clusters = num_clusters)
idx = kmeans_clust.fit_predict(vector_list)
new_time = time.time()
print("Time taken for clustering", new_time - old_time)
k=[[] for x in range(num_clusters)]
word_centroid_map = dict(zip(word_list,idx))
for word in word_centroid_map.keys():
k[word_centroid_map[word]].append(word)
open("Kmeans-"+str(num_words)+"words-"+str(num_clusters)+"clusters.json","w").write(json.dumps(k))
|
Set Building Implemented
* word2vec vectors generated from vocabulary file
* KMeans clustering employed for grouping semantically similar vectors
* storing as seperate files with group of similar setsimport sys
import json
import time
from sklearn.cluster import KMeans
num_words = int(sys.argv[1])
num_clusters = int(sys.argv[2])
f = open("google-10000-vectors.json","r").read().split("\n")
word_list = []
vector_list = []
for i in range(0,num_words):
j = json.loads(f[i])
word_list.append(j[0])
vector_list.append(j[1])
old_time = time.time()
kmeans_clust = KMeans(n_clusters = num_clusters)
idx = kmeans_clust.fit_predict(vector_list)
new_time = time.time()
print("Time taken for clustering", new_time - old_time)
k=[[] for x in range(num_clusters)]
word_centroid_map = dict(zip(word_list,idx))
for word in word_centroid_map.keys():
k[word_centroid_map[word]].append(word)
open("Kmeans-"+str(num_words)+"words-"+str(num_clusters)+"clusters.json","w").write(json.dumps(k))
|
<commit_before><commit_msg>Set Building Implemented
* word2vec vectors generated from vocabulary file
* KMeans clustering employed for grouping semantically similar vectors
* storing as seperate files with group of similar sets<commit_after>import sys
import json
import time
from sklearn.cluster import KMeans
num_words = int(sys.argv[1])
num_clusters = int(sys.argv[2])
f = open("google-10000-vectors.json","r").read().split("\n")
word_list = []
vector_list = []
for i in range(0,num_words):
j = json.loads(f[i])
word_list.append(j[0])
vector_list.append(j[1])
old_time = time.time()
kmeans_clust = KMeans(n_clusters = num_clusters)
idx = kmeans_clust.fit_predict(vector_list)
new_time = time.time()
print("Time taken for clustering", new_time - old_time)
k=[[] for x in range(num_clusters)]
word_centroid_map = dict(zip(word_list,idx))
for word in word_centroid_map.keys():
k[word_centroid_map[word]].append(word)
open("Kmeans-"+str(num_words)+"words-"+str(num_clusters)+"clusters.json","w").write(json.dumps(k))
|
|
87f4c67f937ce23c01e41427f038152f38db82dc
|
python/utils/serial_debug.py
|
python/utils/serial_debug.py
|
#!/usr/bin/env python
# coding=utf8
# http://stackoverflow.com/a/15095449/706819
import os
import pty
def print_serial():
master, slave = pty.openpty()
tty_name = os.ttyname(slave)
print("Write serial data to {}\nCtrl-C to exit\n\n".format(tty_name))
while True:
print(os.read(master, 1024))
if __name__ == '__main__':
print_serial()
|
Add debug util to listen on a tty and print input
|
Add debug util to listen on a tty and print input
Makes it much easier to debug the arduino_serial stuff.
|
Python
|
bsd-3-clause
|
aranchelk/headmouse
|
Add debug util to listen on a tty and print input
Makes it much easier to debug the arduino_serial stuff.
|
#!/usr/bin/env python
# coding=utf8
# http://stackoverflow.com/a/15095449/706819
import os
import pty
def print_serial():
master, slave = pty.openpty()
tty_name = os.ttyname(slave)
print("Write serial data to {}\nCtrl-C to exit\n\n".format(tty_name))
while True:
print(os.read(master, 1024))
if __name__ == '__main__':
print_serial()
|
<commit_before><commit_msg>Add debug util to listen on a tty and print input
Makes it much easier to debug the arduino_serial stuff.<commit_after>
|
#!/usr/bin/env python
# coding=utf8
# http://stackoverflow.com/a/15095449/706819
import os
import pty
def print_serial():
master, slave = pty.openpty()
tty_name = os.ttyname(slave)
print("Write serial data to {}\nCtrl-C to exit\n\n".format(tty_name))
while True:
print(os.read(master, 1024))
if __name__ == '__main__':
print_serial()
|
Add debug util to listen on a tty and print input
Makes it much easier to debug the arduino_serial stuff.#!/usr/bin/env python
# coding=utf8
# http://stackoverflow.com/a/15095449/706819
import os
import pty
def print_serial():
master, slave = pty.openpty()
tty_name = os.ttyname(slave)
print("Write serial data to {}\nCtrl-C to exit\n\n".format(tty_name))
while True:
print(os.read(master, 1024))
if __name__ == '__main__':
print_serial()
|
<commit_before><commit_msg>Add debug util to listen on a tty and print input
Makes it much easier to debug the arduino_serial stuff.<commit_after>#!/usr/bin/env python
# coding=utf8
# http://stackoverflow.com/a/15095449/706819
import os
import pty
def print_serial():
master, slave = pty.openpty()
tty_name = os.ttyname(slave)
print("Write serial data to {}\nCtrl-C to exit\n\n".format(tty_name))
while True:
print(os.read(master, 1024))
if __name__ == '__main__':
print_serial()
|
|
f4425bc8d63c725329ea04898c86275928f0f4be
|
scripts/learning_curve.py
|
scripts/learning_curve.py
|
#!/usr/bin/env python
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def learning_curve(csv_file):
df = pd.read_csv(csv_file)
df_train = df.query("type == 'train'")
df_val = df.query("type == 'val'")
plt.figure()
# train loss
plt.subplot(221)
plt.semilogy(df_train.i_iter, df_train.loss, 'o', markersize=1, color='r',
alpha=.5, label='train loss')
plt.title('train loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# val loss
plt.subplot(222)
plt.semilogy(df_val.i_iter, df_val.loss, 'o-', color='r',
alpha=.5, label='val loss')
plt.title('val loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# train accuracy
plt.subplot(223)
plt.plot(df_train.i_iter, df_train.accuracy, 'o', markersize=1, color='g',
alpha=.5, label='train accuracy')
plt.title('train accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
# val accuracy
plt.subplot(224)
plt.plot(df_val.i_iter, df_val.accuracy, 'o-', color='g',
alpha=.5, label='val accuracy')
plt.title('val accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('csv_file')
args = parser.parse_args()
learning_curve(args.csv_file)
if __name__ == '__main__':
main()
|
Add script to draw learning curve
|
Add script to draw learning curve
|
Python
|
mit
|
wkentaro/fcn
|
Add script to draw learning curve
|
#!/usr/bin/env python
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def learning_curve(csv_file):
df = pd.read_csv(csv_file)
df_train = df.query("type == 'train'")
df_val = df.query("type == 'val'")
plt.figure()
# train loss
plt.subplot(221)
plt.semilogy(df_train.i_iter, df_train.loss, 'o', markersize=1, color='r',
alpha=.5, label='train loss')
plt.title('train loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# val loss
plt.subplot(222)
plt.semilogy(df_val.i_iter, df_val.loss, 'o-', color='r',
alpha=.5, label='val loss')
plt.title('val loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# train accuracy
plt.subplot(223)
plt.plot(df_train.i_iter, df_train.accuracy, 'o', markersize=1, color='g',
alpha=.5, label='train accuracy')
plt.title('train accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
# val accuracy
plt.subplot(224)
plt.plot(df_val.i_iter, df_val.accuracy, 'o-', color='g',
alpha=.5, label='val accuracy')
plt.title('val accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('csv_file')
args = parser.parse_args()
learning_curve(args.csv_file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to draw learning curve<commit_after>
|
#!/usr/bin/env python
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def learning_curve(csv_file):
df = pd.read_csv(csv_file)
df_train = df.query("type == 'train'")
df_val = df.query("type == 'val'")
plt.figure()
# train loss
plt.subplot(221)
plt.semilogy(df_train.i_iter, df_train.loss, 'o', markersize=1, color='r',
alpha=.5, label='train loss')
plt.title('train loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# val loss
plt.subplot(222)
plt.semilogy(df_val.i_iter, df_val.loss, 'o-', color='r',
alpha=.5, label='val loss')
plt.title('val loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# train accuracy
plt.subplot(223)
plt.plot(df_train.i_iter, df_train.accuracy, 'o', markersize=1, color='g',
alpha=.5, label='train accuracy')
plt.title('train accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
# val accuracy
plt.subplot(224)
plt.plot(df_val.i_iter, df_val.accuracy, 'o-', color='g',
alpha=.5, label='val accuracy')
plt.title('val accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('csv_file')
args = parser.parse_args()
learning_curve(args.csv_file)
if __name__ == '__main__':
main()
|
Add script to draw learning curve#!/usr/bin/env python
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def learning_curve(csv_file):
df = pd.read_csv(csv_file)
df_train = df.query("type == 'train'")
df_val = df.query("type == 'val'")
plt.figure()
# train loss
plt.subplot(221)
plt.semilogy(df_train.i_iter, df_train.loss, 'o', markersize=1, color='r',
alpha=.5, label='train loss')
plt.title('train loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# val loss
plt.subplot(222)
plt.semilogy(df_val.i_iter, df_val.loss, 'o-', color='r',
alpha=.5, label='val loss')
plt.title('val loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# train accuracy
plt.subplot(223)
plt.plot(df_train.i_iter, df_train.accuracy, 'o', markersize=1, color='g',
alpha=.5, label='train accuracy')
plt.title('train accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
# val accuracy
plt.subplot(224)
plt.plot(df_val.i_iter, df_val.accuracy, 'o-', color='g',
alpha=.5, label='val accuracy')
plt.title('val accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('csv_file')
args = parser.parse_args()
learning_curve(args.csv_file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to draw learning curve<commit_after>#!/usr/bin/env python
import argparse
import pandas as pd
import matplotlib.pyplot as plt
def learning_curve(csv_file):
df = pd.read_csv(csv_file)
df_train = df.query("type == 'train'")
df_val = df.query("type == 'val'")
plt.figure()
# train loss
plt.subplot(221)
plt.semilogy(df_train.i_iter, df_train.loss, 'o', markersize=1, color='r',
alpha=.5, label='train loss')
plt.title('train loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# val loss
plt.subplot(222)
plt.semilogy(df_val.i_iter, df_val.loss, 'o-', color='r',
alpha=.5, label='val loss')
plt.title('val loss')
plt.xlabel('iteration')
plt.ylabel('loss')
# train accuracy
plt.subplot(223)
plt.plot(df_train.i_iter, df_train.accuracy, 'o', markersize=1, color='g',
alpha=.5, label='train accuracy')
plt.title('train accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
# val accuracy
plt.subplot(224)
plt.plot(df_val.i_iter, df_val.accuracy, 'o-', color='g',
alpha=.5, label='val accuracy')
plt.title('val accuracy')
plt.xlabel('iteration')
plt.ylabel('accuracy')
plt.show()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('csv_file')
args = parser.parse_args()
learning_curve(args.csv_file)
if __name__ == '__main__':
main()
|
|
b5a108873ed9537b205be75eee58d7f6984a8a18
|
myuw_api/quarter.py
|
myuw_api/quarter.py
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from models import Term
from restclients.sws_client import SWSClient
import logging
import urllib
import re
import json
# quarter class definition
class Quarter:
__logger = logging.getLogger('quartermanager')
__sws_client = SWSClient()
def __init__(self):
pass
def get_cur_quarter(self):
# see if we have local data
try:
self.local = Term.objects.get(
first_day_quarter__lte=datetime.today(),
last_final_exam_date__gte=datetime.today()
)
except ObjectDoesNotExist:
__mock()
return self.local
def get_next_quarter(self):
pass
def get_cur_quarter_from_sws(self):
url = '...'; # mock SWS URL base
self.sws_result = __sws_client.get_current_term(url)
def get_next_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_next_term(url)
def get_prev_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_previous_term(url)
def refresh_sws(self):
get_cur_quarter_from_sws()
self.local = Term (
year = self.sws_result.year,
quarter = self.sws_result.quarter,
first_day_quarter = self.sws_result.first_day,
last_day_instruction = self.sws_result.last_day_of_classes,
aterm_last_date = self.sws_result.last_add_day_a_term,
bterm_first_date = self.sws_result.b_term_first_day,
last_final_exam_date = self.sws_result.last_final_exam_day,
last_verified = datetime.now()
)
__save()
def __mock(self):
# mock data
self.local = Term (
year = '2012',
quarter = '3',
first_day_quarter = datetime.date(2012, 6, 18),
last_day_instruction = datetime.date(2012, 8, 10),
aterm_last_date = datetime.date(2012, 7, 18),
bterm_first_date = datetime.date(2012, 6, 19),
last_final_exam_date = datetime.date(2012, 8, 17),
last_verified = datetime.now()
)
__save()
def __save(self):
self.local.save()
|
Create the class that maintain the term cache.
|
Create the class that maintain the term cache.
|
Python
|
apache-2.0
|
uw-it-aca/myuw,uw-it-aca/myuw,fanglinfang/myuw,fanglinfang/myuw,uw-it-aca/myuw,uw-it-aca/myuw,fanglinfang/myuw
|
Create the class that maintain the term cache.
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from models import Term
from restclients.sws_client import SWSClient
import logging
import urllib
import re
import json
# quarter class definition
class Quarter:
__logger = logging.getLogger('quartermanager')
__sws_client = SWSClient()
def __init__(self):
pass
def get_cur_quarter(self):
# see if we have local data
try:
self.local = Term.objects.get(
first_day_quarter__lte=datetime.today(),
last_final_exam_date__gte=datetime.today()
)
except ObjectDoesNotExist:
__mock()
return self.local
def get_next_quarter(self):
pass
def get_cur_quarter_from_sws(self):
url = '...'; # mock SWS URL base
self.sws_result = __sws_client.get_current_term(url)
def get_next_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_next_term(url)
def get_prev_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_previous_term(url)
def refresh_sws(self):
get_cur_quarter_from_sws()
self.local = Term (
year = self.sws_result.year,
quarter = self.sws_result.quarter,
first_day_quarter = self.sws_result.first_day,
last_day_instruction = self.sws_result.last_day_of_classes,
aterm_last_date = self.sws_result.last_add_day_a_term,
bterm_first_date = self.sws_result.b_term_first_day,
last_final_exam_date = self.sws_result.last_final_exam_day,
last_verified = datetime.now()
)
__save()
def __mock(self):
# mock data
self.local = Term (
year = '2012',
quarter = '3',
first_day_quarter = datetime.date(2012, 6, 18),
last_day_instruction = datetime.date(2012, 8, 10),
aterm_last_date = datetime.date(2012, 7, 18),
bterm_first_date = datetime.date(2012, 6, 19),
last_final_exam_date = datetime.date(2012, 8, 17),
last_verified = datetime.now()
)
__save()
def __save(self):
self.local.save()
|
<commit_before><commit_msg>Create the class that maintain the term cache.<commit_after>
|
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from models import Term
from restclients.sws_client import SWSClient
import logging
import urllib
import re
import json
# quarter class definition
class Quarter:
__logger = logging.getLogger('quartermanager')
__sws_client = SWSClient()
def __init__(self):
pass
def get_cur_quarter(self):
# see if we have local data
try:
self.local = Term.objects.get(
first_day_quarter__lte=datetime.today(),
last_final_exam_date__gte=datetime.today()
)
except ObjectDoesNotExist:
__mock()
return self.local
def get_next_quarter(self):
pass
def get_cur_quarter_from_sws(self):
url = '...'; # mock SWS URL base
self.sws_result = __sws_client.get_current_term(url)
def get_next_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_next_term(url)
def get_prev_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_previous_term(url)
def refresh_sws(self):
get_cur_quarter_from_sws()
self.local = Term (
year = self.sws_result.year,
quarter = self.sws_result.quarter,
first_day_quarter = self.sws_result.first_day,
last_day_instruction = self.sws_result.last_day_of_classes,
aterm_last_date = self.sws_result.last_add_day_a_term,
bterm_first_date = self.sws_result.b_term_first_day,
last_final_exam_date = self.sws_result.last_final_exam_day,
last_verified = datetime.now()
)
__save()
def __mock(self):
# mock data
self.local = Term (
year = '2012',
quarter = '3',
first_day_quarter = datetime.date(2012, 6, 18),
last_day_instruction = datetime.date(2012, 8, 10),
aterm_last_date = datetime.date(2012, 7, 18),
bterm_first_date = datetime.date(2012, 6, 19),
last_final_exam_date = datetime.date(2012, 8, 17),
last_verified = datetime.now()
)
__save()
def __save(self):
self.local.save()
|
Create the class that maintain the term cache.from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from models import Term
from restclients.sws_client import SWSClient
import logging
import urllib
import re
import json
# quarter class definition
class Quarter:
__logger = logging.getLogger('quartermanager')
__sws_client = SWSClient()
def __init__(self):
pass
def get_cur_quarter(self):
# see if we have local data
try:
self.local = Term.objects.get(
first_day_quarter__lte=datetime.today(),
last_final_exam_date__gte=datetime.today()
)
except ObjectDoesNotExist:
__mock()
return self.local
def get_next_quarter(self):
pass
def get_cur_quarter_from_sws(self):
url = '...'; # mock SWS URL base
self.sws_result = __sws_client.get_current_term(url)
def get_next_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_next_term(url)
def get_prev_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_previous_term(url)
def refresh_sws(self):
get_cur_quarter_from_sws()
self.local = Term (
year = self.sws_result.year,
quarter = self.sws_result.quarter,
first_day_quarter = self.sws_result.first_day,
last_day_instruction = self.sws_result.last_day_of_classes,
aterm_last_date = self.sws_result.last_add_day_a_term,
bterm_first_date = self.sws_result.b_term_first_day,
last_final_exam_date = self.sws_result.last_final_exam_day,
last_verified = datetime.now()
)
__save()
def __mock(self):
# mock data
self.local = Term (
year = '2012',
quarter = '3',
first_day_quarter = datetime.date(2012, 6, 18),
last_day_instruction = datetime.date(2012, 8, 10),
aterm_last_date = datetime.date(2012, 7, 18),
bterm_first_date = datetime.date(2012, 6, 19),
last_final_exam_date = datetime.date(2012, 8, 17),
last_verified = datetime.now()
)
__save()
def __save(self):
self.local.save()
|
<commit_before><commit_msg>Create the class that maintain the term cache.<commit_after>from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from models import Term
from restclients.sws_client import SWSClient
import logging
import urllib
import re
import json
# quarter class definition
class Quarter:
__logger = logging.getLogger('quartermanager')
__sws_client = SWSClient()
def __init__(self):
pass
def get_cur_quarter(self):
# see if we have local data
try:
self.local = Term.objects.get(
first_day_quarter__lte=datetime.today(),
last_final_exam_date__gte=datetime.today()
)
except ObjectDoesNotExist:
__mock()
return self.local
def get_next_quarter(self):
pass
def get_cur_quarter_from_sws(self):
url = '...'; # mock SWS URL base
self.sws_result = __sws_client.get_current_term(url)
def get_next_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_next_term(url)
def get_prev_quarter_from_sws(self):
url = '...'
self.sws_result = __sws_client.get_previous_term(url)
def refresh_sws(self):
get_cur_quarter_from_sws()
self.local = Term (
year = self.sws_result.year,
quarter = self.sws_result.quarter,
first_day_quarter = self.sws_result.first_day,
last_day_instruction = self.sws_result.last_day_of_classes,
aterm_last_date = self.sws_result.last_add_day_a_term,
bterm_first_date = self.sws_result.b_term_first_day,
last_final_exam_date = self.sws_result.last_final_exam_day,
last_verified = datetime.now()
)
__save()
def __mock(self):
# mock data
self.local = Term (
year = '2012',
quarter = '3',
first_day_quarter = datetime.date(2012, 6, 18),
last_day_instruction = datetime.date(2012, 8, 10),
aterm_last_date = datetime.date(2012, 7, 18),
bterm_first_date = datetime.date(2012, 6, 19),
last_final_exam_date = datetime.date(2012, 8, 17),
last_verified = datetime.now()
)
__save()
def __save(self):
self.local.save()
|
|
713b05e49b814a2a924f657294352bc5e7061638
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/reenable_after_bypass.py
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/reenable_after_bypass.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block/",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
Add trailing slash to chrome_proxy telemetry test page URL.
|
Add trailing slash to chrome_proxy telemetry test page URL.
BUG=507797
Review URL: https://codereview.chromium.org/1229563002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#337895}
|
Python
|
bsd-3-clause
|
hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
Add trailing slash to chrome_proxy telemetry test page URL.
BUG=507797
Review URL: https://codereview.chromium.org/1229563002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#337895}
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block/",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
<commit_msg>Add trailing slash to chrome_proxy telemetry test page URL.
BUG=507797
Review URL: https://codereview.chromium.org/1229563002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#337895}<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block/",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
Add trailing slash to chrome_proxy telemetry test page URL.
BUG=507797
Review URL: https://codereview.chromium.org/1229563002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#337895}# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block/",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
<commit_msg>Add trailing slash to chrome_proxy telemetry test page URL.
BUG=507797
Review URL: https://codereview.chromium.org/1229563002
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#337895}<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry import story
class ReenableAfterBypassPage(page_module.Page):
"""A test page for the re-enable after bypass tests.
Attributes:
bypass_seconds_min: The minimum number of seconds that the bypass
triggered by loading this page should last.
bypass_seconds_max: The maximum number of seconds that the bypass
triggered by loading this page should last.
"""
def __init__(self,
url,
page_set,
bypass_seconds_min,
bypass_seconds_max):
super(ReenableAfterBypassPage, self).__init__(url=url, page_set=page_set)
self.bypass_seconds_min = bypass_seconds_min
self.bypass_seconds_max = bypass_seconds_max
class ReenableAfterBypassStorySet(story.StorySet):
""" Chrome proxy test sites """
def __init__(self):
super(ReenableAfterBypassStorySet, self).__init__()
# Test page for "Chrome-Proxy: block=0". Loading this page should cause all
# data reduction proxies to be bypassed for one to five minutes.
self.AddStory(ReenableAfterBypassPage(
url="http://check.googlezip.net/block/",
page_set=self,
bypass_seconds_min=60,
bypass_seconds_max=300))
|
8381e50b641c3c1e94dec18bcd14e5a1eff490cf
|
scripts/ensure_tilesize.py
|
scripts/ensure_tilesize.py
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
Add script to ensure the correct tile size of a file
|
Add script to ensure the correct tile size of a file
By @tomka
|
Python
|
agpl-3.0
|
htem/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID
|
Add script to ensure the correct tile size of a file
By @tomka
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
<commit_before><commit_msg>Add script to ensure the correct tile size of a file
By @tomka<commit_after>
|
#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
Add script to ensure the correct tile size of a file
By @tomka#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
<commit_before><commit_msg>Add script to ensure the correct tile size of a file
By @tomka<commit_after>#!/usr/bin/python
#
# This is a helper script to ensure an image has the correct tile size.
# It uses pgmagick[1] to read and (if needed) correct the image. To use
# it on a number of files one could use e.g. the find command:
#
# find <data-folder> -name *.jpg -exec scripts/ensure_tilesize.py {} 256 \;
#
# [1] http://pypi.python.org/pypi/pgmagick/
import sys
import os
from pgmagick import Image, Geometry, Color, CompositeOperator as co
# Make sure we got the arguments we expect
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: ensure_tilesize.py <FILENAME> <TILESIZE>"
sys.exit(1)
image_path = sys.argv[1]
tile_size = int(sys.argv[2])
# Make sure the file actually exists
if not os.path.exists(image_path):
print >> sys.stderr, "Could not find file!"
sys.exit(1)
# Get properties of image
image = Image(image_path)
image_width = image.size().width()
image_height = image.size().height()
image_name = image.fileName()
# If the image has the correct size, just exit
if image_width == tile_size and image_height == tile_size:
sys.exit(0)
# A new image with the correct size is needed, create it
geometry = Geometry(tile_size, tile_size)
color = Color("black")
new_image = Image(geometry, color)
# Copy original image to position 0,0 of new image
new_image.composite(image, 0, 0, co.OverCompositeOp)
# Override original image
new_image.write(image_name)
print >> sys.stdout, "Corrected " + image_name + " from " + str(image_width) + "x" + str(image_height) + " to " + str(tile_size) + "x" + str(tile_size)
|
|
bfe960a8aa35cd8b0c306882b008390990fce23f
|
pycroft/lib/user_deletion.py
|
pycroft/lib/user_deletion.py
|
from datetime import timedelta
from sqlalchemy import func, nulls_last
from sqlalchemy.engine import Row
from sqlalchemy.future import select
from sqlalchemy.sql.elements import and_, not_
from sqlalchemy.sql.functions import current_timestamp
from pycroft import config, Config
from pycroft.model.property import CurrentProperty
from pycroft.model.session import session
from pycroft.model.user import User, Membership
def get_archivable_members() -> list[Row]:
"""Return all the users that qualify for being archived right now.
Selected are those users
- whose last membership in the member_group ended two weeks in the past,
- excluding users who currently have the `noarchive` property.
"""
# see FunctionElement.over
window_args = {'partition_by': User.id, 'order_by': nulls_last(Membership.ends_at),
'rows': (None, None)}
last_mem = (
select(
User.id.label('user_id'),
func.last_value(Membership.id).over(**window_args).label('mem_id'),
func.last_value(Membership.ends_at).over(**window_args).label('mem_end'),
)
.select_from(User)
.distinct()
.join(Membership)
.join(Config, Config.member_group_id == Membership.group_id)
).cte('last_mem')
stmt = (
select(
User,
last_mem.c.mem_id,
last_mem.c.mem_end,
CurrentProperty.property_name.is_not(None).label('noarchive')
)
.select_from(last_mem)
.join(CurrentProperty,
and_(last_mem.c.user_id == CurrentProperty.user_id,
CurrentProperty.property_name == 'noarchive',
not_(CurrentProperty.denied)),
isouter=True)
.join(User, User.id == last_mem.c.user_id)
.filter(last_mem.c.mem_end < current_timestamp() - timedelta(days=14))
.order_by(last_mem.c.mem_end)
)
return session.execute(stmt).all()
def get_invalidated_archive_memberships() -> list[Membership]:
"""Get all memberships in `to_be_archived` of users who have an active `do-not-archive` property.
This can happen if archivability is detected, and later the user becomes a member again,
or if for some reason the user shall not be archived.
"""
pass
|
Add first working `get_archivable_members` function
|
Add first working `get_archivable_members` function
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
Add first working `get_archivable_members` function
|
from datetime import timedelta
from sqlalchemy import func, nulls_last
from sqlalchemy.engine import Row
from sqlalchemy.future import select
from sqlalchemy.sql.elements import and_, not_
from sqlalchemy.sql.functions import current_timestamp
from pycroft import config, Config
from pycroft.model.property import CurrentProperty
from pycroft.model.session import session
from pycroft.model.user import User, Membership
def get_archivable_members() -> list[Row]:
"""Return all the users that qualify for being archived right now.
Selected are those users
- whose last membership in the member_group ended two weeks in the past,
- excluding users who currently have the `noarchive` property.
"""
# see FunctionElement.over
window_args = {'partition_by': User.id, 'order_by': nulls_last(Membership.ends_at),
'rows': (None, None)}
last_mem = (
select(
User.id.label('user_id'),
func.last_value(Membership.id).over(**window_args).label('mem_id'),
func.last_value(Membership.ends_at).over(**window_args).label('mem_end'),
)
.select_from(User)
.distinct()
.join(Membership)
.join(Config, Config.member_group_id == Membership.group_id)
).cte('last_mem')
stmt = (
select(
User,
last_mem.c.mem_id,
last_mem.c.mem_end,
CurrentProperty.property_name.is_not(None).label('noarchive')
)
.select_from(last_mem)
.join(CurrentProperty,
and_(last_mem.c.user_id == CurrentProperty.user_id,
CurrentProperty.property_name == 'noarchive',
not_(CurrentProperty.denied)),
isouter=True)
.join(User, User.id == last_mem.c.user_id)
.filter(last_mem.c.mem_end < current_timestamp() - timedelta(days=14))
.order_by(last_mem.c.mem_end)
)
return session.execute(stmt).all()
def get_invalidated_archive_memberships() -> list[Membership]:
"""Get all memberships in `to_be_archived` of users who have an active `do-not-archive` property.
This can happen if archivability is detected, and later the user becomes a member again,
or if for some reason the user shall not be archived.
"""
pass
|
<commit_before><commit_msg>Add first working `get_archivable_members` function<commit_after>
|
from datetime import timedelta
from sqlalchemy import func, nulls_last
from sqlalchemy.engine import Row
from sqlalchemy.future import select
from sqlalchemy.sql.elements import and_, not_
from sqlalchemy.sql.functions import current_timestamp
from pycroft import config, Config
from pycroft.model.property import CurrentProperty
from pycroft.model.session import session
from pycroft.model.user import User, Membership
def get_archivable_members() -> list[Row]:
"""Return all the users that qualify for being archived right now.
Selected are those users
- whose last membership in the member_group ended two weeks in the past,
- excluding users who currently have the `noarchive` property.
"""
# see FunctionElement.over
window_args = {'partition_by': User.id, 'order_by': nulls_last(Membership.ends_at),
'rows': (None, None)}
last_mem = (
select(
User.id.label('user_id'),
func.last_value(Membership.id).over(**window_args).label('mem_id'),
func.last_value(Membership.ends_at).over(**window_args).label('mem_end'),
)
.select_from(User)
.distinct()
.join(Membership)
.join(Config, Config.member_group_id == Membership.group_id)
).cte('last_mem')
stmt = (
select(
User,
last_mem.c.mem_id,
last_mem.c.mem_end,
CurrentProperty.property_name.is_not(None).label('noarchive')
)
.select_from(last_mem)
.join(CurrentProperty,
and_(last_mem.c.user_id == CurrentProperty.user_id,
CurrentProperty.property_name == 'noarchive',
not_(CurrentProperty.denied)),
isouter=True)
.join(User, User.id == last_mem.c.user_id)
.filter(last_mem.c.mem_end < current_timestamp() - timedelta(days=14))
.order_by(last_mem.c.mem_end)
)
return session.execute(stmt).all()
def get_invalidated_archive_memberships() -> list[Membership]:
"""Get all memberships in `to_be_archived` of users who have an active `do-not-archive` property.
This can happen if archivability is detected, and later the user becomes a member again,
or if for some reason the user shall not be archived.
"""
pass
|
Add first working `get_archivable_members` functionfrom datetime import timedelta
from sqlalchemy import func, nulls_last
from sqlalchemy.engine import Row
from sqlalchemy.future import select
from sqlalchemy.sql.elements import and_, not_
from sqlalchemy.sql.functions import current_timestamp
from pycroft import config, Config
from pycroft.model.property import CurrentProperty
from pycroft.model.session import session
from pycroft.model.user import User, Membership
def get_archivable_members() -> list[Row]:
"""Return all the users that qualify for being archived right now.
Selected are those users
- whose last membership in the member_group ended two weeks in the past,
- excluding users who currently have the `noarchive` property.
"""
# see FunctionElement.over
window_args = {'partition_by': User.id, 'order_by': nulls_last(Membership.ends_at),
'rows': (None, None)}
last_mem = (
select(
User.id.label('user_id'),
func.last_value(Membership.id).over(**window_args).label('mem_id'),
func.last_value(Membership.ends_at).over(**window_args).label('mem_end'),
)
.select_from(User)
.distinct()
.join(Membership)
.join(Config, Config.member_group_id == Membership.group_id)
).cte('last_mem')
stmt = (
select(
User,
last_mem.c.mem_id,
last_mem.c.mem_end,
CurrentProperty.property_name.is_not(None).label('noarchive')
)
.select_from(last_mem)
.join(CurrentProperty,
and_(last_mem.c.user_id == CurrentProperty.user_id,
CurrentProperty.property_name == 'noarchive',
not_(CurrentProperty.denied)),
isouter=True)
.join(User, User.id == last_mem.c.user_id)
.filter(last_mem.c.mem_end < current_timestamp() - timedelta(days=14))
.order_by(last_mem.c.mem_end)
)
return session.execute(stmt).all()
def get_invalidated_archive_memberships() -> list[Membership]:
"""Get all memberships in `to_be_archived` of users who have an active `do-not-archive` property.
This can happen if archivability is detected, and later the user becomes a member again,
or if for some reason the user shall not be archived.
"""
pass
|
<commit_before><commit_msg>Add first working `get_archivable_members` function<commit_after>from datetime import timedelta
from sqlalchemy import func, nulls_last
from sqlalchemy.engine import Row
from sqlalchemy.future import select
from sqlalchemy.sql.elements import and_, not_
from sqlalchemy.sql.functions import current_timestamp
from pycroft import config, Config
from pycroft.model.property import CurrentProperty
from pycroft.model.session import session
from pycroft.model.user import User, Membership
def get_archivable_members() -> list[Row]:
"""Return all the users that qualify for being archived right now.
Selected are those users
- whose last membership in the member_group ended two weeks in the past,
- excluding users who currently have the `noarchive` property.
"""
# see FunctionElement.over
window_args = {'partition_by': User.id, 'order_by': nulls_last(Membership.ends_at),
'rows': (None, None)}
last_mem = (
select(
User.id.label('user_id'),
func.last_value(Membership.id).over(**window_args).label('mem_id'),
func.last_value(Membership.ends_at).over(**window_args).label('mem_end'),
)
.select_from(User)
.distinct()
.join(Membership)
.join(Config, Config.member_group_id == Membership.group_id)
).cte('last_mem')
stmt = (
select(
User,
last_mem.c.mem_id,
last_mem.c.mem_end,
CurrentProperty.property_name.is_not(None).label('noarchive')
)
.select_from(last_mem)
.join(CurrentProperty,
and_(last_mem.c.user_id == CurrentProperty.user_id,
CurrentProperty.property_name == 'noarchive',
not_(CurrentProperty.denied)),
isouter=True)
.join(User, User.id == last_mem.c.user_id)
.filter(last_mem.c.mem_end < current_timestamp() - timedelta(days=14))
.order_by(last_mem.c.mem_end)
)
return session.execute(stmt).all()
def get_invalidated_archive_memberships() -> list[Membership]:
"""Get all memberships in `to_be_archived` of users who have an active `do-not-archive` property.
This can happen if archivability is detected, and later the user becomes a member again,
or if for some reason the user shall not be archived.
"""
pass
|
|
446dd62862127839f2803612274c93484b5ed384
|
greenland_climate_change/linear_or_polynomial_fit_seasonal_change.py
|
greenland_climate_change/linear_or_polynomial_fit_seasonal_change.py
|
import numpy as np
import pandas as pd
from scipy import interpolate, poly1d
import matplotlib.pyplot as plt
def main():
df = pd.read_csv('greenland-mass-change.csv')
all_xs = df['year']
all_ys = df['mass change']
train = df[df['year'] < 2012]
test = df[df['year'] > 2012]
train_xs = train['year']
train_ys = train['mass change']
test_xs = test['year']
spline = interpolate.UnivariateSpline(all_xs, all_ys)
# Found 3 experimentally. See climate_change_analysis_polynomial_fit.py
coefficients = np.polyfit(train_xs, train_ys, 3)
trend_fn = poly1d(coefficients)
earliest = min(train['year'])
latest = max(train['year'])
seasonal_fluctuations = []
for n, t in enumerate(test_xs):
start = (t - int(t)) + int(earliest)
if start < earliest:
start = start + 1
end = (t - int(t)) + int(latest)
if end > latest:
end = end - 1
steps = [(s + start) for s in range(int(end - start))]
spline_samples = [spline(s) for s in steps]
relative_trend = trend_fn(steps)
samples = [s - r for s, r in zip(spline_samples, relative_trend)]
seasonal_fluctuations.append((samples, steps))
for n, (f, s) in enumerate(seasonal_fluctuations):
plt.plot(s, f)
plt.title('Greenland mass change')
plt.xlabel('Time')
plt.ylabel('Mass')
plt.grid(True)
plt.show()
plt.savefig('greenland-mass-change.png')
if __name__ == '__main__':
main()
|
Determine whether linear or polynomial fit is best for seasonal change
|
Determine whether linear or polynomial fit is best for seasonal change
|
Python
|
mit
|
noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit
|
Determine whether linear or polynomial fit is best for seasonal change
|
import numpy as np
import pandas as pd
from scipy import interpolate, poly1d
import matplotlib.pyplot as plt
def main():
df = pd.read_csv('greenland-mass-change.csv')
all_xs = df['year']
all_ys = df['mass change']
train = df[df['year'] < 2012]
test = df[df['year'] > 2012]
train_xs = train['year']
train_ys = train['mass change']
test_xs = test['year']
spline = interpolate.UnivariateSpline(all_xs, all_ys)
# Found 3 experimentally. See climate_change_analysis_polynomial_fit.py
coefficients = np.polyfit(train_xs, train_ys, 3)
trend_fn = poly1d(coefficients)
earliest = min(train['year'])
latest = max(train['year'])
seasonal_fluctuations = []
for n, t in enumerate(test_xs):
start = (t - int(t)) + int(earliest)
if start < earliest:
start = start + 1
end = (t - int(t)) + int(latest)
if end > latest:
end = end - 1
steps = [(s + start) for s in range(int(end - start))]
spline_samples = [spline(s) for s in steps]
relative_trend = trend_fn(steps)
samples = [s - r for s, r in zip(spline_samples, relative_trend)]
seasonal_fluctuations.append((samples, steps))
for n, (f, s) in enumerate(seasonal_fluctuations):
plt.plot(s, f)
plt.title('Greenland mass change')
plt.xlabel('Time')
plt.ylabel('Mass')
plt.grid(True)
plt.show()
plt.savefig('greenland-mass-change.png')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Determine whether linear or polynomial fit is best for seasonal change<commit_after>
|
import numpy as np
import pandas as pd
from scipy import interpolate, poly1d
import matplotlib.pyplot as plt
def main():
df = pd.read_csv('greenland-mass-change.csv')
all_xs = df['year']
all_ys = df['mass change']
train = df[df['year'] < 2012]
test = df[df['year'] > 2012]
train_xs = train['year']
train_ys = train['mass change']
test_xs = test['year']
spline = interpolate.UnivariateSpline(all_xs, all_ys)
# Found 3 experimentally. See climate_change_analysis_polynomial_fit.py
coefficients = np.polyfit(train_xs, train_ys, 3)
trend_fn = poly1d(coefficients)
earliest = min(train['year'])
latest = max(train['year'])
seasonal_fluctuations = []
for n, t in enumerate(test_xs):
start = (t - int(t)) + int(earliest)
if start < earliest:
start = start + 1
end = (t - int(t)) + int(latest)
if end > latest:
end = end - 1
steps = [(s + start) for s in range(int(end - start))]
spline_samples = [spline(s) for s in steps]
relative_trend = trend_fn(steps)
samples = [s - r for s, r in zip(spline_samples, relative_trend)]
seasonal_fluctuations.append((samples, steps))
for n, (f, s) in enumerate(seasonal_fluctuations):
plt.plot(s, f)
plt.title('Greenland mass change')
plt.xlabel('Time')
plt.ylabel('Mass')
plt.grid(True)
plt.show()
plt.savefig('greenland-mass-change.png')
if __name__ == '__main__':
main()
|
Determine whether linear or polynomial fit is best for seasonal changeimport numpy as np
import pandas as pd
from scipy import interpolate, poly1d
import matplotlib.pyplot as plt
def main():
df = pd.read_csv('greenland-mass-change.csv')
all_xs = df['year']
all_ys = df['mass change']
train = df[df['year'] < 2012]
test = df[df['year'] > 2012]
train_xs = train['year']
train_ys = train['mass change']
test_xs = test['year']
spline = interpolate.UnivariateSpline(all_xs, all_ys)
# Found 3 experimentally. See climate_change_analysis_polynomial_fit.py
coefficients = np.polyfit(train_xs, train_ys, 3)
trend_fn = poly1d(coefficients)
earliest = min(train['year'])
latest = max(train['year'])
seasonal_fluctuations = []
for n, t in enumerate(test_xs):
start = (t - int(t)) + int(earliest)
if start < earliest:
start = start + 1
end = (t - int(t)) + int(latest)
if end > latest:
end = end - 1
steps = [(s + start) for s in range(int(end - start))]
spline_samples = [spline(s) for s in steps]
relative_trend = trend_fn(steps)
samples = [s - r for s, r in zip(spline_samples, relative_trend)]
seasonal_fluctuations.append((samples, steps))
for n, (f, s) in enumerate(seasonal_fluctuations):
plt.plot(s, f)
plt.title('Greenland mass change')
plt.xlabel('Time')
plt.ylabel('Mass')
plt.grid(True)
plt.show()
plt.savefig('greenland-mass-change.png')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Determine whether linear or polynomial fit is best for seasonal change<commit_after>import numpy as np
import pandas as pd
from scipy import interpolate, poly1d
import matplotlib.pyplot as plt
def main():
df = pd.read_csv('greenland-mass-change.csv')
all_xs = df['year']
all_ys = df['mass change']
train = df[df['year'] < 2012]
test = df[df['year'] > 2012]
train_xs = train['year']
train_ys = train['mass change']
test_xs = test['year']
spline = interpolate.UnivariateSpline(all_xs, all_ys)
# Found 3 experimentally. See climate_change_analysis_polynomial_fit.py
coefficients = np.polyfit(train_xs, train_ys, 3)
trend_fn = poly1d(coefficients)
earliest = min(train['year'])
latest = max(train['year'])
seasonal_fluctuations = []
for n, t in enumerate(test_xs):
start = (t - int(t)) + int(earliest)
if start < earliest:
start = start + 1
end = (t - int(t)) + int(latest)
if end > latest:
end = end - 1
steps = [(s + start) for s in range(int(end - start))]
spline_samples = [spline(s) for s in steps]
relative_trend = trend_fn(steps)
samples = [s - r for s, r in zip(spline_samples, relative_trend)]
seasonal_fluctuations.append((samples, steps))
for n, (f, s) in enumerate(seasonal_fluctuations):
plt.plot(s, f)
plt.title('Greenland mass change')
plt.xlabel('Time')
plt.ylabel('Mass')
plt.grid(True)
plt.show()
plt.savefig('greenland-mass-change.png')
if __name__ == '__main__':
main()
|
|
8664d781bbbed5753979ff299174efc1906eaaf0
|
taiga/users/migrations/0018_remove_vote_issues_in_roles_permissions_field.py
|
taiga/users/migrations/0018_remove_vote_issues_in_roles_permissions_field.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
Clean user role permissions: remove vote_issues in Role.permissions
|
Clean user role permissions: remove vote_issues in Role.permissions
|
Python
|
agpl-3.0
|
taigaio/taiga-back,dayatz/taiga-back,taigaio/taiga-back,xdevelsistemas/taiga-back-community,dayatz/taiga-back,dayatz/taiga-back,xdevelsistemas/taiga-back-community,taigaio/taiga-back,xdevelsistemas/taiga-back-community
|
Clean user role permissions: remove vote_issues in Role.permissions
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
<commit_before><commit_msg>Clean user role permissions: remove vote_issues in Role.permissions<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
Clean user role permissions: remove vote_issues in Role.permissions# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
<commit_before><commit_msg>Clean user role permissions: remove vote_issues in Role.permissions<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-04 09:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0017_auto_20160208_1751'),
]
operations = [
migrations.RunSQL(
"UPDATE users_role SET permissions = ARRAY_REMOVE(permissions, 'vote_issues')"
),
]
|
|
2291a18db52f8a6443805736a5477e2f81158ab5
|
web/form/base_form.py
|
web/form/base_form.py
|
from collections import OrderedDict
from flask_wtf import Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
|
Introduce a `BaseForm` to allow customization of field order
|
Introduce a `BaseForm` to allow customization of field order
|
Python
|
apache-2.0
|
lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft
|
Introduce a `BaseForm` to allow customization of field order
|
from collections import OrderedDict
from flask_wtf import Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
|
<commit_before><commit_msg>Introduce a `BaseForm` to allow customization of field order<commit_after>
|
from collections import OrderedDict
from flask_wtf import Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
|
Introduce a `BaseForm` to allow customization of field orderfrom collections import OrderedDict
from flask_wtf import Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
|
<commit_before><commit_msg>Introduce a `BaseForm` to allow customization of field order<commit_after>from collections import OrderedDict
from flask_wtf import Form
class BaseForm(Form):
def __iter__(self):
field_order = getattr(self, '_order', [])
if field_order:
ordered_fields = OrderedDict()
for name in field_order:
ordered_fields[name] = self._fields.pop(name)
ordered_fields.update(self._fields)
self._fields = ordered_fields
return super(BaseForm, self).__iter__()
|
|
0e013b6608f36dc1ba44904d3c94faee5150f5af
|
st2common/tests/unit/test_resource_registrar.py
|
st2common/tests/unit/test_resource_registrar.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.base import ResourceRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import ConfigSchema
from st2tests import DbTestCase
from st2tests import fixturesloader
__all__ = [
'ResourceRegistrarTestCase'
]
PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
class ResourceRegistrarTestCase(DbTestCase):
def test_register_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_schema_dbs), 0)
registrar = ResourceRegistrar()
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_schema_dbs), 1)
self.assertEqual(pack_dbs[0].name, 'dummy_pack_1')
self.assertTrue('api_key' in config_schema_dbs[0].attributes)
self.assertTrue('api_secret' in config_schema_dbs[0].attributes)
|
Add tests for pack and config schema registrar.
|
Add tests for pack and config schema registrar.
|
Python
|
apache-2.0
|
emedvedev/st2,pixelrebel/st2,Plexxi/st2,StackStorm/st2,lakshmi-kannan/st2,nzlosh/st2,Plexxi/st2,punalpatel/st2,emedvedev/st2,peak6/st2,StackStorm/st2,StackStorm/st2,tonybaloney/st2,tonybaloney/st2,Plexxi/st2,lakshmi-kannan/st2,nzlosh/st2,pixelrebel/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,punalpatel/st2,peak6/st2,pixelrebel/st2,StackStorm/st2,lakshmi-kannan/st2,emedvedev/st2,punalpatel/st2,peak6/st2,tonybaloney/st2
|
Add tests for pack and config schema registrar.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.base import ResourceRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import ConfigSchema
from st2tests import DbTestCase
from st2tests import fixturesloader
__all__ = [
'ResourceRegistrarTestCase'
]
PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
class ResourceRegistrarTestCase(DbTestCase):
def test_register_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_schema_dbs), 0)
registrar = ResourceRegistrar()
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_schema_dbs), 1)
self.assertEqual(pack_dbs[0].name, 'dummy_pack_1')
self.assertTrue('api_key' in config_schema_dbs[0].attributes)
self.assertTrue('api_secret' in config_schema_dbs[0].attributes)
|
<commit_before><commit_msg>Add tests for pack and config schema registrar.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.base import ResourceRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import ConfigSchema
from st2tests import DbTestCase
from st2tests import fixturesloader
__all__ = [
'ResourceRegistrarTestCase'
]
PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
class ResourceRegistrarTestCase(DbTestCase):
def test_register_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_schema_dbs), 0)
registrar = ResourceRegistrar()
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_schema_dbs), 1)
self.assertEqual(pack_dbs[0].name, 'dummy_pack_1')
self.assertTrue('api_key' in config_schema_dbs[0].attributes)
self.assertTrue('api_secret' in config_schema_dbs[0].attributes)
|
Add tests for pack and config schema registrar.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.base import ResourceRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import ConfigSchema
from st2tests import DbTestCase
from st2tests import fixturesloader
__all__ = [
'ResourceRegistrarTestCase'
]
PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
class ResourceRegistrarTestCase(DbTestCase):
def test_register_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_schema_dbs), 0)
registrar = ResourceRegistrar()
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_schema_dbs), 1)
self.assertEqual(pack_dbs[0].name, 'dummy_pack_1')
self.assertTrue('api_key' in config_schema_dbs[0].attributes)
self.assertTrue('api_secret' in config_schema_dbs[0].attributes)
|
<commit_before><commit_msg>Add tests for pack and config schema registrar.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.base import ResourceRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import ConfigSchema
from st2tests import DbTestCase
from st2tests import fixturesloader
__all__ = [
'ResourceRegistrarTestCase'
]
PACK_PATH = os.path.join(fixturesloader.get_fixtures_base_path(), 'dummy_pack_1')
class ResourceRegistrarTestCase(DbTestCase):
def test_register_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_schema_dbs), 0)
registrar = ResourceRegistrar()
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_schema_dbs = ConfigSchema.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_schema_dbs), 1)
self.assertEqual(pack_dbs[0].name, 'dummy_pack_1')
self.assertTrue('api_key' in config_schema_dbs[0].attributes)
self.assertTrue('api_secret' in config_schema_dbs[0].attributes)
|
|
7350422a1364f996b7ac362e8457e2a5e04afc7c
|
sympy/interactive/tests/test_ipythonprinting.py
|
sympy/interactive/tests/test_ipythonprinting.py
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("from IPython.core.interactiveshell import InteractiveShell")
app.run_cell("inst = InteractiveShell.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
Make ipythonprinting test more robust
|
Make ipythonprinting test more robust
|
Python
|
bsd-3-clause
|
Vishluck/sympy,Mitchkoens/sympy,Davidjohnwilson/sympy,pandeyadarsh/sympy,hrashk/sympy,Davidjohnwilson/sympy,sahmed95/sympy,yukoba/sympy,Sumith1896/sympy,jamesblunt/sympy,moble/sympy,chaffra/sympy,Mitchkoens/sympy,Shaswat27/sympy,saurabhjn76/sympy,abhiii5459/sympy,jerli/sympy,jaimahajan1997/sympy,ahhda/sympy,sunny94/temp,wanglongqi/sympy,meghana1995/sympy,jaimahajan1997/sympy,lindsayad/sympy,Sumith1896/sympy,atsao72/sympy,sampadsaha5/sympy,atreyv/sympy,kaichogami/sympy,Curious72/sympy,kumarkrishna/sympy,Designist/sympy,abhiii5459/sympy,mafiya69/sympy,lidavidm/sympy,meghana1995/sympy,maniteja123/sympy,amitjamadagni/sympy,Titan-C/sympy,yashsharan/sympy,debugger22/sympy,skidzo/sympy,ga7g08/sympy,ga7g08/sympy,liangjiaxing/sympy,skidzo/sympy,cswiercz/sympy,beni55/sympy,liangjiaxing/sympy,Curious72/sympy,MechCoder/sympy,cccfran/sympy,Designist/sympy,cswiercz/sympy,skidzo/sympy,kumarkrishna/sympy,Gadal/sympy,cswiercz/sympy,emon10005/sympy,ahhda/sympy,AkademieOlympia/sympy,mcdaniel67/sympy,ChristinaZografou/sympy,drufat/sympy,rahuldan/sympy,toolforger/sympy,Davidjohnwilson/sympy,rahuldan/sympy,wanglongqi/sympy,souravsingh/sympy,AkademieOlympia/sympy,MechCoder/sympy,kaushik94/sympy,kevalds51/sympy,AunShiLord/sympy,sampadsaha5/sympy,wyom/sympy,asm666/sympy,jbbskinny/sympy,kumarkrishna/sympy,shikil/sympy,sunny94/temp,dqnykamp/sympy,lidavidm/sympy,madan96/sympy,ahhda/sympy,AunShiLord/sympy,bukzor/sympy,saurabhjn76/sympy,MridulS/sympy,vipulroxx/sympy,shipci/sympy,Vishluck/sympy,Vishluck/sympy,ChristinaZografou/sympy,jamesblunt/sympy,hrashk/sympy,jerli/sympy,abloomston/sympy,oliverlee/sympy,Sumith1896/sympy,emon10005/sympy,farhaanbukhsh/sympy,sunny94/temp,abloomston/sympy,moble/sympy,madan96/sympy,grevutiu-gabriel/sympy,AunShiLord/sympy,cccfran/sympy,oliverlee/sympy,kevalds51/sympy,iamutkarshtiwari/sympy,shipci/sympy,souravsingh/sympy,farhaanbukhsh/sympy,vipulroxx/sympy,Gadal/sympy,hrashk/sympy,maniteja123/sympy,sahmed95/sympy,kmacinnis/sympy,jbbskinny/sympy,Titan-C/sympy,cccfran/sympy,VaibhavAgarwalVA/sympy,asm666/sympy,rahuldan/sympy,postvakje/sympy,emon10005/sympy,diofant/diofant,yukoba/sympy,aktech/sympy,sahilshekhawat/sympy,drufat/sympy,pandeyadarsh/sympy,MechCoder/sympy,Designist/sympy,MridulS/sympy,aktech/sympy,grevutiu-gabriel/sympy,garvitr/sympy,meghana1995/sympy,Gadal/sympy,moble/sympy,Curious72/sympy,jamesblunt/sympy,kmacinnis/sympy,lindsayad/sympy,postvakje/sympy,atsao72/sympy,iamutkarshtiwari/sympy,debugger22/sympy,toolforger/sympy,hargup/sympy,hargup/sympy,amitjamadagni/sympy,ga7g08/sympy,atreyv/sympy,Shaswat27/sympy,hargup/sympy,Arafatk/sympy,mcdaniel67/sympy,skirpichev/omg,pbrady/sympy,jerli/sympy,Mitchkoens/sympy,liangjiaxing/sympy,dqnykamp/sympy,sahilshekhawat/sympy,beni55/sympy,mafiya69/sympy,MridulS/sympy,Shaswat27/sympy,Arafatk/sympy,souravsingh/sympy,dqnykamp/sympy,jaimahajan1997/sympy,oliverlee/sympy,shipci/sympy,lindsayad/sympy,kaushik94/sympy,abloomston/sympy,atsao72/sympy,wanglongqi/sympy,kaushik94/sympy,toolforger/sympy,shikil/sympy,postvakje/sympy,Arafatk/sympy,VaibhavAgarwalVA/sympy,yukoba/sympy,maniteja123/sympy,beni55/sympy,yashsharan/sympy,mcdaniel67/sympy,pbrady/sympy,yashsharan/sympy,VaibhavAgarwalVA/sympy,farhaanbukhsh/sympy,wyom/sympy,jbbskinny/sympy,vipulroxx/sympy,lidavidm/sympy,iamutkarshtiwari/sympy,atreyv/sympy,garvitr/sympy,bukzor/sympy,kaichogami/sympy,AkademieOlympia/sympy,bukzor/sympy,chaffra/sympy,asm666/sympy,garvitr/sympy,grevutiu-gabriel/sympy,Titan-C/sympy,kmacinnis/sympy,aktech/sympy,madan96/sympy,abhiii5459/sympy,chaffra/sympy,drufat/sympy,saurabhjn76/sympy,pbrady/sympy,sampadsaha5/sympy,wyom/sympy,shikil/sympy,ChristinaZografou/sympy,pandeyadarsh/sympy,debugger22/sympy,kaichogami/sympy,sahmed95/sympy,kevalds51/sympy,sahilshekhawat/sympy,mafiya69/sympy
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("from IPython.core.interactiveshell import InteractiveShell")
app.run_cell("inst = InteractiveShell.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
Make ipythonprinting test more robust
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
<commit_before>"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("from IPython.core.interactiveshell import InteractiveShell")
app.run_cell("inst = InteractiveShell.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
<commit_msg>Make ipythonprinting test more robust<commit_after>
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("from IPython.core.interactiveshell import InteractiveShell")
app.run_cell("inst = InteractiveShell.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
Make ipythonprinting test more robust"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
<commit_before>"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("from IPython.core.interactiveshell import InteractiveShell")
app.run_cell("inst = InteractiveShell.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
<commit_msg>Make ipythonprinting test more robust<commit_after>"""Tests that the IPython printing module is properly loaded. """
from sympy.interactive.session import init_ipython_session
from sympy.external import import_module
ipython = import_module("IPython", min_module_version="0.11")
# disable tests if ipython is not present
if not ipython:
disabled = True
def test_ipythonprinting():
# Initialize and setup IPython session
app = init_ipython_session()
app.run_cell("ip = get_ipython()")
app.run_cell("inst = ip.instance()")
app.run_cell("format = inst.display_formatter.format")
app.run_cell("from sympy import Symbol")
# Printing without printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == "pi"
# Load printing extension
app.run_cell("%load_ext sympy.interactive.ipythonprinting")
# Printing with printing extension
app.run_cell("a = format(Symbol('pi'))")
assert app.user_ns['a']['text/plain'] == u'\u03c0'
|
50c4dd5b6b911b783a29ff7e8764c113ab474d9c
|
tools/coverage_parser.py
|
tools/coverage_parser.py
|
import os
os.chdir('tests/cmdline')
lines = file('coverage.out', 'r').readlines()
files = {}
for line in lines:
filename, lineno = line.split("----------")
if filename not in files:
files[filename] = set()
files[filename].add(int(lineno))
filenames = files.keys()
filenames.sort()
for filename in filenames:
linenos = files[filename]
file_contents = file(filename.strip(), 'r').readlines()
coverage_name = filename.strip() + '-coverage.html'
print "Creating", coverage_name
coverage_file = file(coverage_name, 'w')
coverage_file.write('<html><head><style type="text/css">div {white-space: pre; font-family: monospace} .covered {color: green} .uncovered{color: red}</style></head><body>\n')
for i, line in enumerate(file_contents):
if i + 1 in linenos:
covered = 'covered'
else:
covered = 'uncovered'
coverage_file.write('<div class="%s">%s</div>\n' % (covered, line))
coverage_file.write('</body></html>')
|
Add makefile build rule for building HTML files which show which lines are covered and which are not.
|
Add makefile build rule for building HTML files which show which lines are covered and which are not.
|
Python
|
bsd-2-clause
|
Ms2ger/dom.js,andreasgal/dom.js,modulexcite/dom.js,modulexcite/dom.js,modulexcite/dom.js
|
Add makefile build rule for building HTML files which show which lines are covered and which are not.
|
import os
os.chdir('tests/cmdline')
lines = file('coverage.out', 'r').readlines()
files = {}
for line in lines:
filename, lineno = line.split("----------")
if filename not in files:
files[filename] = set()
files[filename].add(int(lineno))
filenames = files.keys()
filenames.sort()
for filename in filenames:
linenos = files[filename]
file_contents = file(filename.strip(), 'r').readlines()
coverage_name = filename.strip() + '-coverage.html'
print "Creating", coverage_name
coverage_file = file(coverage_name, 'w')
coverage_file.write('<html><head><style type="text/css">div {white-space: pre; font-family: monospace} .covered {color: green} .uncovered{color: red}</style></head><body>\n')
for i, line in enumerate(file_contents):
if i + 1 in linenos:
covered = 'covered'
else:
covered = 'uncovered'
coverage_file.write('<div class="%s">%s</div>\n' % (covered, line))
coverage_file.write('</body></html>')
|
<commit_before><commit_msg>Add makefile build rule for building HTML files which show which lines are covered and which are not.<commit_after>
|
import os
os.chdir('tests/cmdline')
lines = file('coverage.out', 'r').readlines()
files = {}
for line in lines:
filename, lineno = line.split("----------")
if filename not in files:
files[filename] = set()
files[filename].add(int(lineno))
filenames = files.keys()
filenames.sort()
for filename in filenames:
linenos = files[filename]
file_contents = file(filename.strip(), 'r').readlines()
coverage_name = filename.strip() + '-coverage.html'
print "Creating", coverage_name
coverage_file = file(coverage_name, 'w')
coverage_file.write('<html><head><style type="text/css">div {white-space: pre; font-family: monospace} .covered {color: green} .uncovered{color: red}</style></head><body>\n')
for i, line in enumerate(file_contents):
if i + 1 in linenos:
covered = 'covered'
else:
covered = 'uncovered'
coverage_file.write('<div class="%s">%s</div>\n' % (covered, line))
coverage_file.write('</body></html>')
|
Add makefile build rule for building HTML files which show which lines are covered and which are not.
import os
os.chdir('tests/cmdline')
lines = file('coverage.out', 'r').readlines()
files = {}
for line in lines:
filename, lineno = line.split("----------")
if filename not in files:
files[filename] = set()
files[filename].add(int(lineno))
filenames = files.keys()
filenames.sort()
for filename in filenames:
linenos = files[filename]
file_contents = file(filename.strip(), 'r').readlines()
coverage_name = filename.strip() + '-coverage.html'
print "Creating", coverage_name
coverage_file = file(coverage_name, 'w')
coverage_file.write('<html><head><style type="text/css">div {white-space: pre; font-family: monospace} .covered {color: green} .uncovered{color: red}</style></head><body>\n')
for i, line in enumerate(file_contents):
if i + 1 in linenos:
covered = 'covered'
else:
covered = 'uncovered'
coverage_file.write('<div class="%s">%s</div>\n' % (covered, line))
coverage_file.write('</body></html>')
|
<commit_before><commit_msg>Add makefile build rule for building HTML files which show which lines are covered and which are not.<commit_after>
import os
os.chdir('tests/cmdline')
lines = file('coverage.out', 'r').readlines()
files = {}
for line in lines:
filename, lineno = line.split("----------")
if filename not in files:
files[filename] = set()
files[filename].add(int(lineno))
filenames = files.keys()
filenames.sort()
for filename in filenames:
linenos = files[filename]
file_contents = file(filename.strip(), 'r').readlines()
coverage_name = filename.strip() + '-coverage.html'
print "Creating", coverage_name
coverage_file = file(coverage_name, 'w')
coverage_file.write('<html><head><style type="text/css">div {white-space: pre; font-family: monospace} .covered {color: green} .uncovered{color: red}</style></head><body>\n')
for i, line in enumerate(file_contents):
if i + 1 in linenos:
covered = 'covered'
else:
covered = 'uncovered'
coverage_file.write('<div class="%s">%s</div>\n' % (covered, line))
coverage_file.write('</body></html>')
|
|
97f915187ba84f6104a4cf9fd7b54a7198a02ee7
|
indra/ontology/bio/__main__.py
|
indra/ontology/bio/__main__.py
|
import os
import sys
import glob
import shutil
import logging
from .ontology import BioOntology, CACHE_DIR
logger = logging.getLogger('indra.ontology.bio')
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
if len(sys.argv) < 2:
logger.info('Operation missing. Supported operations: '
'build, clean, clean-old, clean-all.')
sys.exit(1)
operation = sys.argv[1]
if operation == 'build':
BioOntology().initialize()
elif operation.startswith('clean'):
parent_dir = os.path.normpath(os.path.join(CACHE_DIR, os.pardir))
version_paths = glob.glob(os.path.join(parent_dir, '*', ''))
if operation == 'clean-all':
to_remove = [parent_dir]
else:
to_remove = []
for version_path in version_paths:
version = os.path.basename(os.path.normpath(version_path))
if operation == 'clean-old' and version != BioOntology.version:
to_remove.append(version_path)
elif operation == 'clean' and version == BioOntology.version:
to_remove.append(version_path)
for rem in to_remove:
logger.info('Removing %s' % rem)
shutil.rmtree(rem)
|
Add CLI to regenerate ontology
|
Add CLI to regenerate ontology
|
Python
|
bsd-2-clause
|
bgyori/indra,johnbachman/belpy,johnbachman/belpy,johnbachman/belpy,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,sorgerlab/belpy,sorgerlab/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra
|
Add CLI to regenerate ontology
|
import os
import sys
import glob
import shutil
import logging
from .ontology import BioOntology, CACHE_DIR
logger = logging.getLogger('indra.ontology.bio')
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
if len(sys.argv) < 2:
logger.info('Operation missing. Supported operations: '
'build, clean, clean-old, clean-all.')
sys.exit(1)
operation = sys.argv[1]
if operation == 'build':
BioOntology().initialize()
elif operation.startswith('clean'):
parent_dir = os.path.normpath(os.path.join(CACHE_DIR, os.pardir))
version_paths = glob.glob(os.path.join(parent_dir, '*', ''))
if operation == 'clean-all':
to_remove = [parent_dir]
else:
to_remove = []
for version_path in version_paths:
version = os.path.basename(os.path.normpath(version_path))
if operation == 'clean-old' and version != BioOntology.version:
to_remove.append(version_path)
elif operation == 'clean' and version == BioOntology.version:
to_remove.append(version_path)
for rem in to_remove:
logger.info('Removing %s' % rem)
shutil.rmtree(rem)
|
<commit_before><commit_msg>Add CLI to regenerate ontology<commit_after>
|
import os
import sys
import glob
import shutil
import logging
from .ontology import BioOntology, CACHE_DIR
logger = logging.getLogger('indra.ontology.bio')
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
if len(sys.argv) < 2:
logger.info('Operation missing. Supported operations: '
'build, clean, clean-old, clean-all.')
sys.exit(1)
operation = sys.argv[1]
if operation == 'build':
BioOntology().initialize()
elif operation.startswith('clean'):
parent_dir = os.path.normpath(os.path.join(CACHE_DIR, os.pardir))
version_paths = glob.glob(os.path.join(parent_dir, '*', ''))
if operation == 'clean-all':
to_remove = [parent_dir]
else:
to_remove = []
for version_path in version_paths:
version = os.path.basename(os.path.normpath(version_path))
if operation == 'clean-old' and version != BioOntology.version:
to_remove.append(version_path)
elif operation == 'clean' and version == BioOntology.version:
to_remove.append(version_path)
for rem in to_remove:
logger.info('Removing %s' % rem)
shutil.rmtree(rem)
|
Add CLI to regenerate ontologyimport os
import sys
import glob
import shutil
import logging
from .ontology import BioOntology, CACHE_DIR
logger = logging.getLogger('indra.ontology.bio')
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
if len(sys.argv) < 2:
logger.info('Operation missing. Supported operations: '
'build, clean, clean-old, clean-all.')
sys.exit(1)
operation = sys.argv[1]
if operation == 'build':
BioOntology().initialize()
elif operation.startswith('clean'):
parent_dir = os.path.normpath(os.path.join(CACHE_DIR, os.pardir))
version_paths = glob.glob(os.path.join(parent_dir, '*', ''))
if operation == 'clean-all':
to_remove = [parent_dir]
else:
to_remove = []
for version_path in version_paths:
version = os.path.basename(os.path.normpath(version_path))
if operation == 'clean-old' and version != BioOntology.version:
to_remove.append(version_path)
elif operation == 'clean' and version == BioOntology.version:
to_remove.append(version_path)
for rem in to_remove:
logger.info('Removing %s' % rem)
shutil.rmtree(rem)
|
<commit_before><commit_msg>Add CLI to regenerate ontology<commit_after>import os
import sys
import glob
import shutil
import logging
from .ontology import BioOntology, CACHE_DIR
logger = logging.getLogger('indra.ontology.bio')
if __name__ == '__main__':
import ipdb; ipdb.set_trace()
if len(sys.argv) < 2:
logger.info('Operation missing. Supported operations: '
'build, clean, clean-old, clean-all.')
sys.exit(1)
operation = sys.argv[1]
if operation == 'build':
BioOntology().initialize()
elif operation.startswith('clean'):
parent_dir = os.path.normpath(os.path.join(CACHE_DIR, os.pardir))
version_paths = glob.glob(os.path.join(parent_dir, '*', ''))
if operation == 'clean-all':
to_remove = [parent_dir]
else:
to_remove = []
for version_path in version_paths:
version = os.path.basename(os.path.normpath(version_path))
if operation == 'clean-old' and version != BioOntology.version:
to_remove.append(version_path)
elif operation == 'clean' and version == BioOntology.version:
to_remove.append(version_path)
for rem in to_remove:
logger.info('Removing %s' % rem)
shutil.rmtree(rem)
|
|
a30fb24754fad371178207180bb064fcc5d5ca9d
|
patterns/factory.py
|
patterns/factory.py
|
"""
Factory Pattern
Definition:
pass
Also Known As:
pass
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
"""
Factory Pattern
Definition:
This pattern defines an interface for creating an object, but lets
subclasses decide which class to instantiate. Factory Method lets a class
defer instantiation to subclasses.
Also Known As:
Virtual Constructor
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
Add some info to Factory pattern
|
Add some info to Factory pattern
|
Python
|
mit
|
jdavis/rust-design-patterns,ianlet/rust-design-patterns,beni55/rust-design-patterns,ianlet/rust-design-patterns,jdavis/rust-design-patterns,beni55/rust-design-patterns,ianlet/rust-design-patterns,beni55/rust-design-patterns,jdavis/rust-design-patterns,jdavis/rust-design-patterns
|
"""
Factory Pattern
Definition:
pass
Also Known As:
pass
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
Add some info to Factory pattern
|
"""
Factory Pattern
Definition:
This pattern defines an interface for creating an object, but lets
subclasses decide which class to instantiate. Factory Method lets a class
defer instantiation to subclasses.
Also Known As:
Virtual Constructor
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
<commit_before>"""
Factory Pattern
Definition:
pass
Also Known As:
pass
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
<commit_msg>Add some info to Factory pattern<commit_after>
|
"""
Factory Pattern
Definition:
This pattern defines an interface for creating an object, but lets
subclasses decide which class to instantiate. Factory Method lets a class
defer instantiation to subclasses.
Also Known As:
Virtual Constructor
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
"""
Factory Pattern
Definition:
pass
Also Known As:
pass
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
Add some info to Factory pattern"""
Factory Pattern
Definition:
This pattern defines an interface for creating an object, but lets
subclasses decide which class to instantiate. Factory Method lets a class
defer instantiation to subclasses.
Also Known As:
Virtual Constructor
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
<commit_before>"""
Factory Pattern
Definition:
pass
Also Known As:
pass
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
<commit_msg>Add some info to Factory pattern<commit_after>"""
Factory Pattern
Definition:
This pattern defines an interface for creating an object, but lets
subclasses decide which class to instantiate. Factory Method lets a class
defer instantiation to subclasses.
Also Known As:
Virtual Constructor
Problem:
pass
Wrong Solution:
pass
Correct Solution:
pass
Sources:
Title: Head First Design Patterns
Author(s): Eric Freeman & Elisabeth Freeman
Pages: 109-168
Title: Design Patterns
Author(s): Erich Gamma, Richard Helm, Ralph Johnson, John Vlissides
Pages: 107-116
Example Info:
pass
"""
|
d4ccc7e5bff643eae3aa3a2211169014068053a6
|
models/bmi_demo/bmi_models.py
|
models/bmi_demo/bmi_models.py
|
import os
import shutil
from indra.sources import eidos
from indra.assemblers.bmi_wrapper import BMIModel
from indra.assemblers import PysbAssembler
def text_to_stmts(text):
fname = text.replace(' ', '_') + '.jsonld'
if os.path.exists(fname):
ep = eidos.process_json_ld_file(fname)
else:
ep = eidos.process_text(text)
shutil.move('eidos_output.json', fname)
return ep.statements
def make_component_repo(bmi_models):
for m in bmi_models:
m.export_into_python()
comps = [m.make_repository_component() for m in bmi_models]
rep_str = '<repository>%s</repository>' % '\n'.join(comps)
with open('component_repository.xml', 'w') as fh:
fh.write(rep_str)
with open('component_providers.txt', 'w') as fh:
fh.write('model0 model0\nmodel1 model1')
if __name__ == '__main__':
model_txts = ['rainfall causes floods', 'floods cause displacement']
stmts = [text_to_stmts(t) for t in model_txts]
bmi_models = []
for idx, model_stmts in enumerate(stmts):
pa = PysbAssembler()
pa.add_statements(model_stmts)
model = pa.make_model()
model.name = 'model%d' % idx
bm = BMIModel(model)
bmi_models.append(bm)
make_component_repo(bmi_models)
|
Add demo example for BMI
|
Add demo example for BMI
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,sorgerlab/indra,johnbachman/indra,bgyori/indra,johnbachman/belpy,bgyori/indra,bgyori/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,johnbachman/belpy,pvtodorov/indra,pvtodorov/indra
|
Add demo example for BMI
|
import os
import shutil
from indra.sources import eidos
from indra.assemblers.bmi_wrapper import BMIModel
from indra.assemblers import PysbAssembler
def text_to_stmts(text):
fname = text.replace(' ', '_') + '.jsonld'
if os.path.exists(fname):
ep = eidos.process_json_ld_file(fname)
else:
ep = eidos.process_text(text)
shutil.move('eidos_output.json', fname)
return ep.statements
def make_component_repo(bmi_models):
for m in bmi_models:
m.export_into_python()
comps = [m.make_repository_component() for m in bmi_models]
rep_str = '<repository>%s</repository>' % '\n'.join(comps)
with open('component_repository.xml', 'w') as fh:
fh.write(rep_str)
with open('component_providers.txt', 'w') as fh:
fh.write('model0 model0\nmodel1 model1')
if __name__ == '__main__':
model_txts = ['rainfall causes floods', 'floods cause displacement']
stmts = [text_to_stmts(t) for t in model_txts]
bmi_models = []
for idx, model_stmts in enumerate(stmts):
pa = PysbAssembler()
pa.add_statements(model_stmts)
model = pa.make_model()
model.name = 'model%d' % idx
bm = BMIModel(model)
bmi_models.append(bm)
make_component_repo(bmi_models)
|
<commit_before><commit_msg>Add demo example for BMI<commit_after>
|
import os
import shutil
from indra.sources import eidos
from indra.assemblers.bmi_wrapper import BMIModel
from indra.assemblers import PysbAssembler
def text_to_stmts(text):
fname = text.replace(' ', '_') + '.jsonld'
if os.path.exists(fname):
ep = eidos.process_json_ld_file(fname)
else:
ep = eidos.process_text(text)
shutil.move('eidos_output.json', fname)
return ep.statements
def make_component_repo(bmi_models):
for m in bmi_models:
m.export_into_python()
comps = [m.make_repository_component() for m in bmi_models]
rep_str = '<repository>%s</repository>' % '\n'.join(comps)
with open('component_repository.xml', 'w') as fh:
fh.write(rep_str)
with open('component_providers.txt', 'w') as fh:
fh.write('model0 model0\nmodel1 model1')
if __name__ == '__main__':
model_txts = ['rainfall causes floods', 'floods cause displacement']
stmts = [text_to_stmts(t) for t in model_txts]
bmi_models = []
for idx, model_stmts in enumerate(stmts):
pa = PysbAssembler()
pa.add_statements(model_stmts)
model = pa.make_model()
model.name = 'model%d' % idx
bm = BMIModel(model)
bmi_models.append(bm)
make_component_repo(bmi_models)
|
Add demo example for BMIimport os
import shutil
from indra.sources import eidos
from indra.assemblers.bmi_wrapper import BMIModel
from indra.assemblers import PysbAssembler
def text_to_stmts(text):
fname = text.replace(' ', '_') + '.jsonld'
if os.path.exists(fname):
ep = eidos.process_json_ld_file(fname)
else:
ep = eidos.process_text(text)
shutil.move('eidos_output.json', fname)
return ep.statements
def make_component_repo(bmi_models):
for m in bmi_models:
m.export_into_python()
comps = [m.make_repository_component() for m in bmi_models]
rep_str = '<repository>%s</repository>' % '\n'.join(comps)
with open('component_repository.xml', 'w') as fh:
fh.write(rep_str)
with open('component_providers.txt', 'w') as fh:
fh.write('model0 model0\nmodel1 model1')
if __name__ == '__main__':
model_txts = ['rainfall causes floods', 'floods cause displacement']
stmts = [text_to_stmts(t) for t in model_txts]
bmi_models = []
for idx, model_stmts in enumerate(stmts):
pa = PysbAssembler()
pa.add_statements(model_stmts)
model = pa.make_model()
model.name = 'model%d' % idx
bm = BMIModel(model)
bmi_models.append(bm)
make_component_repo(bmi_models)
|
<commit_before><commit_msg>Add demo example for BMI<commit_after>import os
import shutil
from indra.sources import eidos
from indra.assemblers.bmi_wrapper import BMIModel
from indra.assemblers import PysbAssembler
def text_to_stmts(text):
fname = text.replace(' ', '_') + '.jsonld'
if os.path.exists(fname):
ep = eidos.process_json_ld_file(fname)
else:
ep = eidos.process_text(text)
shutil.move('eidos_output.json', fname)
return ep.statements
def make_component_repo(bmi_models):
for m in bmi_models:
m.export_into_python()
comps = [m.make_repository_component() for m in bmi_models]
rep_str = '<repository>%s</repository>' % '\n'.join(comps)
with open('component_repository.xml', 'w') as fh:
fh.write(rep_str)
with open('component_providers.txt', 'w') as fh:
fh.write('model0 model0\nmodel1 model1')
if __name__ == '__main__':
model_txts = ['rainfall causes floods', 'floods cause displacement']
stmts = [text_to_stmts(t) for t in model_txts]
bmi_models = []
for idx, model_stmts in enumerate(stmts):
pa = PysbAssembler()
pa.add_statements(model_stmts)
model = pa.make_model()
model.name = 'model%d' % idx
bm = BMIModel(model)
bmi_models.append(bm)
make_component_repo(bmi_models)
|
|
aec27cf5799299c0267da8819ed836a0327ad47c
|
kyoukai/blueprints/regexp.py
|
kyoukai/blueprints/regexp.py
|
"""
Regular-expression based blueprint for Kyoukai.
This produces regex-based routes when ``wrap_route`` is called, and is the default blueprint handler for the Kyoukai
blueprint tree.
"""
import typing
from kyoukai.blueprints.base import ABCBlueprint
from kyoukai.routing.base import ABCRoute
from kyoukai.routing.regexp import RegexRoute
class RegexBlueprint(ABCBlueprint):
"""
The class for a RegexBlueprint.
RegexBlueprints are very simple compared to other types of blueprints, using a basic regular expression to match
the path of of a request with a route. This means there is no complex type conversion, as there is with
:class:`kyoukai.blueprints.wz.WerkzeugBlueprint`, and the routes used can be incredibly powerful.
"""
def __init__(self, name: str, parent: 'ABCBlueprint' = None,
url_prefix: str = ""):
super().__init__(name, parent, url_prefix)
# Define the routes list.
self.routes = []
def add_route(self, route: 'ABCRoute'):
# Adds the route to self.route
self.routes.append(route)
return route
def wrap_route(self, match_string: str, coroutine: typing.Awaitable, *, methods: list = None,
run_hooks = True) -> ABCRoute:
# Wrap the route in an RegexRoute instance.
rtt = RegexRoute(self, match_string, methods, bound=False, run_hooks=run_hooks)
rtt.create(coroutine)
return rtt
def gather_routes(self) -> list:
# Gathers all routes to use.
routes = []
for child in self.children:
routes += child.gather_routes()
routes += self.routes
return routes
def match(self, route: str, method: str = None) -> typing.Tuple[ABCRoute, typing.Iterable]:
pass
|
Add stub for a regular expression based blueprint.
|
Add stub for a regular expression based blueprint.
|
Python
|
mit
|
SunDwarf/Kyoukai
|
Add stub for a regular expression based blueprint.
|
"""
Regular-expression based blueprint for Kyoukai.
This produces regex-based routes when ``wrap_route`` is called, and is the default blueprint handler for the Kyoukai
blueprint tree.
"""
import typing
from kyoukai.blueprints.base import ABCBlueprint
from kyoukai.routing.base import ABCRoute
from kyoukai.routing.regexp import RegexRoute
class RegexBlueprint(ABCBlueprint):
"""
The class for a RegexBlueprint.
RegexBlueprints are very simple compared to other types of blueprints, using a basic regular expression to match
the path of of a request with a route. This means there is no complex type conversion, as there is with
:class:`kyoukai.blueprints.wz.WerkzeugBlueprint`, and the routes used can be incredibly powerful.
"""
def __init__(self, name: str, parent: 'ABCBlueprint' = None,
url_prefix: str = ""):
super().__init__(name, parent, url_prefix)
# Define the routes list.
self.routes = []
def add_route(self, route: 'ABCRoute'):
# Adds the route to self.route
self.routes.append(route)
return route
def wrap_route(self, match_string: str, coroutine: typing.Awaitable, *, methods: list = None,
run_hooks = True) -> ABCRoute:
# Wrap the route in an RegexRoute instance.
rtt = RegexRoute(self, match_string, methods, bound=False, run_hooks=run_hooks)
rtt.create(coroutine)
return rtt
def gather_routes(self) -> list:
# Gathers all routes to use.
routes = []
for child in self.children:
routes += child.gather_routes()
routes += self.routes
return routes
def match(self, route: str, method: str = None) -> typing.Tuple[ABCRoute, typing.Iterable]:
pass
|
<commit_before><commit_msg>Add stub for a regular expression based blueprint.<commit_after>
|
"""
Regular-expression based blueprint for Kyoukai.
This produces regex-based routes when ``wrap_route`` is called, and is the default blueprint handler for the Kyoukai
blueprint tree.
"""
import typing
from kyoukai.blueprints.base import ABCBlueprint
from kyoukai.routing.base import ABCRoute
from kyoukai.routing.regexp import RegexRoute
class RegexBlueprint(ABCBlueprint):
"""
The class for a RegexBlueprint.
RegexBlueprints are very simple compared to other types of blueprints, using a basic regular expression to match
the path of of a request with a route. This means there is no complex type conversion, as there is with
:class:`kyoukai.blueprints.wz.WerkzeugBlueprint`, and the routes used can be incredibly powerful.
"""
def __init__(self, name: str, parent: 'ABCBlueprint' = None,
url_prefix: str = ""):
super().__init__(name, parent, url_prefix)
# Define the routes list.
self.routes = []
def add_route(self, route: 'ABCRoute'):
# Adds the route to self.route
self.routes.append(route)
return route
def wrap_route(self, match_string: str, coroutine: typing.Awaitable, *, methods: list = None,
run_hooks = True) -> ABCRoute:
# Wrap the route in an RegexRoute instance.
rtt = RegexRoute(self, match_string, methods, bound=False, run_hooks=run_hooks)
rtt.create(coroutine)
return rtt
def gather_routes(self) -> list:
# Gathers all routes to use.
routes = []
for child in self.children:
routes += child.gather_routes()
routes += self.routes
return routes
def match(self, route: str, method: str = None) -> typing.Tuple[ABCRoute, typing.Iterable]:
pass
|
Add stub for a regular expression based blueprint."""
Regular-expression based blueprint for Kyoukai.
This produces regex-based routes when ``wrap_route`` is called, and is the default blueprint handler for the Kyoukai
blueprint tree.
"""
import typing
from kyoukai.blueprints.base import ABCBlueprint
from kyoukai.routing.base import ABCRoute
from kyoukai.routing.regexp import RegexRoute
class RegexBlueprint(ABCBlueprint):
"""
The class for a RegexBlueprint.
RegexBlueprints are very simple compared to other types of blueprints, using a basic regular expression to match
the path of of a request with a route. This means there is no complex type conversion, as there is with
:class:`kyoukai.blueprints.wz.WerkzeugBlueprint`, and the routes used can be incredibly powerful.
"""
def __init__(self, name: str, parent: 'ABCBlueprint' = None,
url_prefix: str = ""):
super().__init__(name, parent, url_prefix)
# Define the routes list.
self.routes = []
def add_route(self, route: 'ABCRoute'):
# Adds the route to self.route
self.routes.append(route)
return route
def wrap_route(self, match_string: str, coroutine: typing.Awaitable, *, methods: list = None,
run_hooks = True) -> ABCRoute:
# Wrap the route in an RegexRoute instance.
rtt = RegexRoute(self, match_string, methods, bound=False, run_hooks=run_hooks)
rtt.create(coroutine)
return rtt
def gather_routes(self) -> list:
# Gathers all routes to use.
routes = []
for child in self.children:
routes += child.gather_routes()
routes += self.routes
return routes
def match(self, route: str, method: str = None) -> typing.Tuple[ABCRoute, typing.Iterable]:
pass
|
<commit_before><commit_msg>Add stub for a regular expression based blueprint.<commit_after>"""
Regular-expression based blueprint for Kyoukai.
This produces regex-based routes when ``wrap_route`` is called, and is the default blueprint handler for the Kyoukai
blueprint tree.
"""
import typing
from kyoukai.blueprints.base import ABCBlueprint
from kyoukai.routing.base import ABCRoute
from kyoukai.routing.regexp import RegexRoute
class RegexBlueprint(ABCBlueprint):
"""
The class for a RegexBlueprint.
RegexBlueprints are very simple compared to other types of blueprints, using a basic regular expression to match
the path of of a request with a route. This means there is no complex type conversion, as there is with
:class:`kyoukai.blueprints.wz.WerkzeugBlueprint`, and the routes used can be incredibly powerful.
"""
def __init__(self, name: str, parent: 'ABCBlueprint' = None,
url_prefix: str = ""):
super().__init__(name, parent, url_prefix)
# Define the routes list.
self.routes = []
def add_route(self, route: 'ABCRoute'):
# Adds the route to self.route
self.routes.append(route)
return route
def wrap_route(self, match_string: str, coroutine: typing.Awaitable, *, methods: list = None,
run_hooks = True) -> ABCRoute:
# Wrap the route in an RegexRoute instance.
rtt = RegexRoute(self, match_string, methods, bound=False, run_hooks=run_hooks)
rtt.create(coroutine)
return rtt
def gather_routes(self) -> list:
# Gathers all routes to use.
routes = []
for child in self.children:
routes += child.gather_routes()
routes += self.routes
return routes
def match(self, route: str, method: str = None) -> typing.Tuple[ABCRoute, typing.Iterable]:
pass
|
|
30d71d6952a46f25d40af524c9583dac50459711
|
util/testCamTrakParam.py
|
util/testCamTrakParam.py
|
from SimpleCV import *
import time
cam = Camera()
try:
while True:
t0 = time.time()
img = cam.getImage()
topCropH = 60
img = img.resize(w = 200)
img = img.crop(0, topCropH, img.width, img.height - topCropH)
## img.show()
## time.sleep(.5)
jaune = Color.hueToBGR(74)
iBin = img.hueDistance(color = jaune, minsaturation = 150)
iBin = iBin.binarize(40)
## iBin.show()
## time.sleep(.5)
blobs = iBin.findBlobs()
## iBin.show()
## blobs.show()
## print blobs
if blobs is not None:
blobs.sortArea()
verre = blobs[0]
x, y = verre.centroid()
print x - img.width / 2
dl = img.dl()
verre.drawRect(layer = dl, color = (0, 255, 0), width = 2)
img.show()
dt = time.time() - t0
if dt < .1:
time.sleep(.1 -dt) ## time.sleep(3)
except KeyboardInterrupt:
print "Fin"
|
Test file to try camera tracking parameters
|
Test file to try camera tracking parameters
|
Python
|
mit
|
DrGFreeman/RasPiBot202,DrGFreeman/RasPiBot202
|
Test file to try camera tracking parameters
|
from SimpleCV import *
import time
cam = Camera()
try:
while True:
t0 = time.time()
img = cam.getImage()
topCropH = 60
img = img.resize(w = 200)
img = img.crop(0, topCropH, img.width, img.height - topCropH)
## img.show()
## time.sleep(.5)
jaune = Color.hueToBGR(74)
iBin = img.hueDistance(color = jaune, minsaturation = 150)
iBin = iBin.binarize(40)
## iBin.show()
## time.sleep(.5)
blobs = iBin.findBlobs()
## iBin.show()
## blobs.show()
## print blobs
if blobs is not None:
blobs.sortArea()
verre = blobs[0]
x, y = verre.centroid()
print x - img.width / 2
dl = img.dl()
verre.drawRect(layer = dl, color = (0, 255, 0), width = 2)
img.show()
dt = time.time() - t0
if dt < .1:
time.sleep(.1 -dt) ## time.sleep(3)
except KeyboardInterrupt:
print "Fin"
|
<commit_before><commit_msg>Test file to try camera tracking parameters<commit_after>
|
from SimpleCV import *
import time
cam = Camera()
try:
while True:
t0 = time.time()
img = cam.getImage()
topCropH = 60
img = img.resize(w = 200)
img = img.crop(0, topCropH, img.width, img.height - topCropH)
## img.show()
## time.sleep(.5)
jaune = Color.hueToBGR(74)
iBin = img.hueDistance(color = jaune, minsaturation = 150)
iBin = iBin.binarize(40)
## iBin.show()
## time.sleep(.5)
blobs = iBin.findBlobs()
## iBin.show()
## blobs.show()
## print blobs
if blobs is not None:
blobs.sortArea()
verre = blobs[0]
x, y = verre.centroid()
print x - img.width / 2
dl = img.dl()
verre.drawRect(layer = dl, color = (0, 255, 0), width = 2)
img.show()
dt = time.time() - t0
if dt < .1:
time.sleep(.1 -dt) ## time.sleep(3)
except KeyboardInterrupt:
print "Fin"
|
Test file to try camera tracking parametersfrom SimpleCV import *
import time
cam = Camera()
try:
while True:
t0 = time.time()
img = cam.getImage()
topCropH = 60
img = img.resize(w = 200)
img = img.crop(0, topCropH, img.width, img.height - topCropH)
## img.show()
## time.sleep(.5)
jaune = Color.hueToBGR(74)
iBin = img.hueDistance(color = jaune, minsaturation = 150)
iBin = iBin.binarize(40)
## iBin.show()
## time.sleep(.5)
blobs = iBin.findBlobs()
## iBin.show()
## blobs.show()
## print blobs
if blobs is not None:
blobs.sortArea()
verre = blobs[0]
x, y = verre.centroid()
print x - img.width / 2
dl = img.dl()
verre.drawRect(layer = dl, color = (0, 255, 0), width = 2)
img.show()
dt = time.time() - t0
if dt < .1:
time.sleep(.1 -dt) ## time.sleep(3)
except KeyboardInterrupt:
print "Fin"
|
<commit_before><commit_msg>Test file to try camera tracking parameters<commit_after>from SimpleCV import *
import time
cam = Camera()
try:
while True:
t0 = time.time()
img = cam.getImage()
topCropH = 60
img = img.resize(w = 200)
img = img.crop(0, topCropH, img.width, img.height - topCropH)
## img.show()
## time.sleep(.5)
jaune = Color.hueToBGR(74)
iBin = img.hueDistance(color = jaune, minsaturation = 150)
iBin = iBin.binarize(40)
## iBin.show()
## time.sleep(.5)
blobs = iBin.findBlobs()
## iBin.show()
## blobs.show()
## print blobs
if blobs is not None:
blobs.sortArea()
verre = blobs[0]
x, y = verre.centroid()
print x - img.width / 2
dl = img.dl()
verre.drawRect(layer = dl, color = (0, 255, 0), width = 2)
img.show()
dt = time.time() - t0
if dt < .1:
time.sleep(.1 -dt) ## time.sleep(3)
except KeyboardInterrupt:
print "Fin"
|
|
87027c6f0e0afd9b37b4c076b42735bf8092e2ca
|
src/ggrc_basic_permissions/migrations/versions/20150805105543_99925466d6e_add_roles_order_column.py
|
src/ggrc_basic_permissions/migrations/versions/20150805105543_99925466d6e_add_roles_order_column.py
|
"""Add roles order column
Revision ID: 99925466d6e
Revises: 401fb7f0184b
Create Date: 2015-08-05 10:55:43.992382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99925466d6e'
down_revision = '401fb7f0184b'
def upgrade():
op.add_column("roles", sa.Column("role_order", sa.Integer(), nullable=True))
op.execute("UPDATE roles SET role_order = id")
# creator role should appear before other roles
op.execute("UPDATE roles SET role_order = 4 WHERE name='Creator'")
def downgrade():
op.drop_column("roles", "role_order")
|
Add role_order column to roles table
|
Add role_order column to roles table
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core
|
Add role_order column to roles table
|
"""Add roles order column
Revision ID: 99925466d6e
Revises: 401fb7f0184b
Create Date: 2015-08-05 10:55:43.992382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99925466d6e'
down_revision = '401fb7f0184b'
def upgrade():
op.add_column("roles", sa.Column("role_order", sa.Integer(), nullable=True))
op.execute("UPDATE roles SET role_order = id")
# creator role should appear before other roles
op.execute("UPDATE roles SET role_order = 4 WHERE name='Creator'")
def downgrade():
op.drop_column("roles", "role_order")
|
<commit_before><commit_msg>Add role_order column to roles table<commit_after>
|
"""Add roles order column
Revision ID: 99925466d6e
Revises: 401fb7f0184b
Create Date: 2015-08-05 10:55:43.992382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99925466d6e'
down_revision = '401fb7f0184b'
def upgrade():
op.add_column("roles", sa.Column("role_order", sa.Integer(), nullable=True))
op.execute("UPDATE roles SET role_order = id")
# creator role should appear before other roles
op.execute("UPDATE roles SET role_order = 4 WHERE name='Creator'")
def downgrade():
op.drop_column("roles", "role_order")
|
Add role_order column to roles table
"""Add roles order column
Revision ID: 99925466d6e
Revises: 401fb7f0184b
Create Date: 2015-08-05 10:55:43.992382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99925466d6e'
down_revision = '401fb7f0184b'
def upgrade():
op.add_column("roles", sa.Column("role_order", sa.Integer(), nullable=True))
op.execute("UPDATE roles SET role_order = id")
# creator role should appear before other roles
op.execute("UPDATE roles SET role_order = 4 WHERE name='Creator'")
def downgrade():
op.drop_column("roles", "role_order")
|
<commit_before><commit_msg>Add role_order column to roles table<commit_after>
"""Add roles order column
Revision ID: 99925466d6e
Revises: 401fb7f0184b
Create Date: 2015-08-05 10:55:43.992382
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '99925466d6e'
down_revision = '401fb7f0184b'
def upgrade():
op.add_column("roles", sa.Column("role_order", sa.Integer(), nullable=True))
op.execute("UPDATE roles SET role_order = id")
# creator role should appear before other roles
op.execute("UPDATE roles SET role_order = 4 WHERE name='Creator'")
def downgrade():
op.drop_column("roles", "role_order")
|
|
6086aed8f4c4afa6d1c345649e9dcf33e593b4a9
|
crypto_enigma/tests/test_utils.py
|
crypto_enigma/tests/test_utils.py
|
#!/usr/bin/env python
# encoding: utf8
from __future__ import (absolute_import, print_function, division, unicode_literals)
''' Simple test file for debugging and testing at the shell. To use simply
python test.py
or
./test.py
or run 'test' in PyCharm.
'''
from ..machine import *
# Test utilities and internals
def test_encoding_chars():
assert encode_char("PQR", 'Z') == ' '
assert encode_char("PQR", 'B') == 'Q'
assert encode_char("", ' ') == ' '
def test_marked_mapping():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "", "dkjfsldfjhsljdhflskjdfh"]:
assert EnigmaConfig._marked_mapping(s, 50) == s
def test_locate_letter():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, 'A', "zzzzz") == -1
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, '5', "zzzzz") == -1
def test_make_message():
assert EnigmaConfig.make_message("AHDuRIWDHUWYRdDUSHSBBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AHDuRI WDHUWYR dDUSHS BBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDy!'") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
|
Add tests of some utilities and internals
|
Add tests of some utilities and internals
|
Python
|
bsd-3-clause
|
orome/crypto-enigma-py
|
Add tests of some utilities and internals
|
#!/usr/bin/env python
# encoding: utf8
from __future__ import (absolute_import, print_function, division, unicode_literals)
''' Simple test file for debugging and testing at the shell. To use simply
python test.py
or
./test.py
or run 'test' in PyCharm.
'''
from ..machine import *
# Test utilities and internals
def test_encoding_chars():
assert encode_char("PQR", 'Z') == ' '
assert encode_char("PQR", 'B') == 'Q'
assert encode_char("", ' ') == ' '
def test_marked_mapping():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "", "dkjfsldfjhsljdhflskjdfh"]:
assert EnigmaConfig._marked_mapping(s, 50) == s
def test_locate_letter():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, 'A', "zzzzz") == -1
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, '5', "zzzzz") == -1
def test_make_message():
assert EnigmaConfig.make_message("AHDuRIWDHUWYRdDUSHSBBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AHDuRI WDHUWYR dDUSHS BBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDy!'") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
|
<commit_before><commit_msg>Add tests of some utilities and internals<commit_after>
|
#!/usr/bin/env python
# encoding: utf8
from __future__ import (absolute_import, print_function, division, unicode_literals)
''' Simple test file for debugging and testing at the shell. To use simply
python test.py
or
./test.py
or run 'test' in PyCharm.
'''
from ..machine import *
# Test utilities and internals
def test_encoding_chars():
assert encode_char("PQR", 'Z') == ' '
assert encode_char("PQR", 'B') == 'Q'
assert encode_char("", ' ') == ' '
def test_marked_mapping():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "", "dkjfsldfjhsljdhflskjdfh"]:
assert EnigmaConfig._marked_mapping(s, 50) == s
def test_locate_letter():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, 'A', "zzzzz") == -1
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, '5', "zzzzz") == -1
def test_make_message():
assert EnigmaConfig.make_message("AHDuRIWDHUWYRdDUSHSBBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AHDuRI WDHUWYR dDUSHS BBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDy!'") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
|
Add tests of some utilities and internals#!/usr/bin/env python
# encoding: utf8
from __future__ import (absolute_import, print_function, division, unicode_literals)
''' Simple test file for debugging and testing at the shell. To use simply
python test.py
or
./test.py
or run 'test' in PyCharm.
'''
from ..machine import *
# Test utilities and internals
def test_encoding_chars():
assert encode_char("PQR", 'Z') == ' '
assert encode_char("PQR", 'B') == 'Q'
assert encode_char("", ' ') == ' '
def test_marked_mapping():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "", "dkjfsldfjhsljdhflskjdfh"]:
assert EnigmaConfig._marked_mapping(s, 50) == s
def test_locate_letter():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, 'A', "zzzzz") == -1
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, '5', "zzzzz") == -1
def test_make_message():
assert EnigmaConfig.make_message("AHDuRIWDHUWYRdDUSHSBBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AHDuRI WDHUWYR dDUSHS BBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDy!'") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
|
<commit_before><commit_msg>Add tests of some utilities and internals<commit_after>#!/usr/bin/env python
# encoding: utf8
from __future__ import (absolute_import, print_function, division, unicode_literals)
''' Simple test file for debugging and testing at the shell. To use simply
python test.py
or
./test.py
or run 'test' in PyCharm.
'''
from ..machine import *
# Test utilities and internals
def test_encoding_chars():
assert encode_char("PQR", 'Z') == ' '
assert encode_char("PQR", 'B') == 'Q'
assert encode_char("", ' ') == ' '
def test_marked_mapping():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "", "dkjfsldfjhsljdhflskjdfh"]:
assert EnigmaConfig._marked_mapping(s, 50) == s
def test_locate_letter():
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, 'A', "zzzzz") == -1
for s in ["AMC", "ANNCJDDJSJKKWKWK", "A", "dkjfslAdfjhsljdhflskjdfh"]:
assert EnigmaConfig._locate_letter(s, '5', "zzzzz") == -1
def test_make_message():
assert EnigmaConfig.make_message("AHDuRIWDHUWYRdDUSHSBBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AHDuRI WDHUWYR dDUSHS BBqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDyXJ") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
assert EnigmaConfig.make_message("AγH*D+uRI WDHβUγWYR dDβ*USHS BBγqDy!'") == "AHDURIWDHUWYRDDUSHSBBQDYXJ"
|
|
323d5e907ce51c01793f52aa054d3f7a57373b22
|
test_billy.py
|
test_billy.py
|
import unittest
import billy
class TestSunlightAPI(unittest.TestCase):
def test_can_get_resp_200(self):
resp = billy.getresponse()
self.assertEqual(resp.status, 200)
|
Add initial test to check response status code
|
Add initial test to check response status code
|
Python
|
mit
|
mosegontar/billybot
|
Add initial test to check response status code
|
import unittest
import billy
class TestSunlightAPI(unittest.TestCase):
def test_can_get_resp_200(self):
resp = billy.getresponse()
self.assertEqual(resp.status, 200)
|
<commit_before><commit_msg>Add initial test to check response status code<commit_after>
|
import unittest
import billy
class TestSunlightAPI(unittest.TestCase):
def test_can_get_resp_200(self):
resp = billy.getresponse()
self.assertEqual(resp.status, 200)
|
Add initial test to check response status codeimport unittest
import billy
class TestSunlightAPI(unittest.TestCase):
def test_can_get_resp_200(self):
resp = billy.getresponse()
self.assertEqual(resp.status, 200)
|
<commit_before><commit_msg>Add initial test to check response status code<commit_after>import unittest
import billy
class TestSunlightAPI(unittest.TestCase):
def test_can_get_resp_200(self):
resp = billy.getresponse()
self.assertEqual(resp.status, 200)
|
|
2ce4386082773652a6da41c5f776882a77ad96ca
|
tests/test_environment.py
|
tests/test_environment.py
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
def test_env_should_raise_for_unknown_extension():
context = {
'_extensions': ['foobar']
}
with pytest.raises(UnknownExtension) as err:
StrictEnvironment(context, keep_trailing_newline=True)
assert 'Unable to load extension: No module named foobar' in str(err.value)
|
Implement a basic test for StrictEnvironment
|
Implement a basic test for StrictEnvironment
|
Python
|
bsd-3-clause
|
michaeljoseph/cookiecutter,terryjbates/cookiecutter,luzfcb/cookiecutter,dajose/cookiecutter,willingc/cookiecutter,stevepiercy/cookiecutter,willingc/cookiecutter,audreyr/cookiecutter,audreyr/cookiecutter,Springerle/cookiecutter,hackebrot/cookiecutter,luzfcb/cookiecutter,hackebrot/cookiecutter,pjbull/cookiecutter,Springerle/cookiecutter,michaeljoseph/cookiecutter,dajose/cookiecutter,terryjbates/cookiecutter,stevepiercy/cookiecutter,pjbull/cookiecutter
|
Implement a basic test for StrictEnvironment
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
def test_env_should_raise_for_unknown_extension():
context = {
'_extensions': ['foobar']
}
with pytest.raises(UnknownExtension) as err:
StrictEnvironment(context, keep_trailing_newline=True)
assert 'Unable to load extension: No module named foobar' in str(err.value)
|
<commit_before><commit_msg>Implement a basic test for StrictEnvironment<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
def test_env_should_raise_for_unknown_extension():
context = {
'_extensions': ['foobar']
}
with pytest.raises(UnknownExtension) as err:
StrictEnvironment(context, keep_trailing_newline=True)
assert 'Unable to load extension: No module named foobar' in str(err.value)
|
Implement a basic test for StrictEnvironment# -*- coding: utf-8 -*-
import pytest
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
def test_env_should_raise_for_unknown_extension():
context = {
'_extensions': ['foobar']
}
with pytest.raises(UnknownExtension) as err:
StrictEnvironment(context, keep_trailing_newline=True)
assert 'Unable to load extension: No module named foobar' in str(err.value)
|
<commit_before><commit_msg>Implement a basic test for StrictEnvironment<commit_after># -*- coding: utf-8 -*-
import pytest
from cookiecutter.environment import StrictEnvironment
from cookiecutter.exceptions import UnknownExtension
def test_env_should_raise_for_unknown_extension():
context = {
'_extensions': ['foobar']
}
with pytest.raises(UnknownExtension) as err:
StrictEnvironment(context, keep_trailing_newline=True)
assert 'Unable to load extension: No module named foobar' in str(err.value)
|
|
33023294b9ef2592397a4bffee5d492caf81e55c
|
content/doc/misc/_wsgi-app.py
|
content/doc/misc/_wsgi-app.py
|
# -*- coding: utf-8 -*-
# content/doc/misc/_wsgi-app.py
# =============================================================================
# WSGI application template.
# Usage:
# $ uwsgi --http :8765 --wsgi-file _wsgi-app.py
# $ curl localhost:8765
# =============================================================================
# See for ref.: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
#
TXT = "\nWSGI application template.\n"
RSC_HTTP_200_OK = "200"
HDR_CONTENT_TYPE_N = "Content-Type"
HDR_CONTENT_TYPE_V = "text/plain"
# The application entry point.
def application(env, resp):
_env = str(env).encode()
_txt = str(TXT).encode()
resp(RSC_HTTP_200_OK,
[(HDR_CONTENT_TYPE_N,
HDR_CONTENT_TYPE_V)])
return [_env, _txt]
# vim:set nu et ts=4 sw=4:
|
Add WSGI app template to play with uWSGI server.
|
Add WSGI app template to play with uWSGI server.
|
Python
|
mit
|
rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io,rgolubtsov/rgolubtsov.github.io
|
Add WSGI app template to play with uWSGI server.
|
# -*- coding: utf-8 -*-
# content/doc/misc/_wsgi-app.py
# =============================================================================
# WSGI application template.
# Usage:
# $ uwsgi --http :8765 --wsgi-file _wsgi-app.py
# $ curl localhost:8765
# =============================================================================
# See for ref.: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
#
TXT = "\nWSGI application template.\n"
RSC_HTTP_200_OK = "200"
HDR_CONTENT_TYPE_N = "Content-Type"
HDR_CONTENT_TYPE_V = "text/plain"
# The application entry point.
def application(env, resp):
_env = str(env).encode()
_txt = str(TXT).encode()
resp(RSC_HTTP_200_OK,
[(HDR_CONTENT_TYPE_N,
HDR_CONTENT_TYPE_V)])
return [_env, _txt]
# vim:set nu et ts=4 sw=4:
|
<commit_before><commit_msg>Add WSGI app template to play with uWSGI server.<commit_after>
|
# -*- coding: utf-8 -*-
# content/doc/misc/_wsgi-app.py
# =============================================================================
# WSGI application template.
# Usage:
# $ uwsgi --http :8765 --wsgi-file _wsgi-app.py
# $ curl localhost:8765
# =============================================================================
# See for ref.: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
#
TXT = "\nWSGI application template.\n"
RSC_HTTP_200_OK = "200"
HDR_CONTENT_TYPE_N = "Content-Type"
HDR_CONTENT_TYPE_V = "text/plain"
# The application entry point.
def application(env, resp):
_env = str(env).encode()
_txt = str(TXT).encode()
resp(RSC_HTTP_200_OK,
[(HDR_CONTENT_TYPE_N,
HDR_CONTENT_TYPE_V)])
return [_env, _txt]
# vim:set nu et ts=4 sw=4:
|
Add WSGI app template to play with uWSGI server.# -*- coding: utf-8 -*-
# content/doc/misc/_wsgi-app.py
# =============================================================================
# WSGI application template.
# Usage:
# $ uwsgi --http :8765 --wsgi-file _wsgi-app.py
# $ curl localhost:8765
# =============================================================================
# See for ref.: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
#
TXT = "\nWSGI application template.\n"
RSC_HTTP_200_OK = "200"
HDR_CONTENT_TYPE_N = "Content-Type"
HDR_CONTENT_TYPE_V = "text/plain"
# The application entry point.
def application(env, resp):
_env = str(env).encode()
_txt = str(TXT).encode()
resp(RSC_HTTP_200_OK,
[(HDR_CONTENT_TYPE_N,
HDR_CONTENT_TYPE_V)])
return [_env, _txt]
# vim:set nu et ts=4 sw=4:
|
<commit_before><commit_msg>Add WSGI app template to play with uWSGI server.<commit_after># -*- coding: utf-8 -*-
# content/doc/misc/_wsgi-app.py
# =============================================================================
# WSGI application template.
# Usage:
# $ uwsgi --http :8765 --wsgi-file _wsgi-app.py
# $ curl localhost:8765
# =============================================================================
# See for ref.: https://uwsgi-docs.readthedocs.io/en/latest/WSGIquickstart.html
#
TXT = "\nWSGI application template.\n"
RSC_HTTP_200_OK = "200"
HDR_CONTENT_TYPE_N = "Content-Type"
HDR_CONTENT_TYPE_V = "text/plain"
# The application entry point.
def application(env, resp):
_env = str(env).encode()
_txt = str(TXT).encode()
resp(RSC_HTTP_200_OK,
[(HDR_CONTENT_TYPE_N,
HDR_CONTENT_TYPE_V)])
return [_env, _txt]
# vim:set nu et ts=4 sw=4:
|
|
007d715ade7706beb56a9e03fbf3b74f596f915e
|
c2cgeoportal/scaffolds/update/+package+/CONST_migration/versions/009_Add_queryLayers_column.py
|
c2cgeoportal/scaffolds/update/+package+/CONST_migration/versions/009_Add_queryLayers_column.py
|
from sqlalchemy import MetaData, Table, Column, types
from c2cgeoportal import schema
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
Column('queryLayers', types.Unicode).create(layer)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
layer.c.queryLayers.drop()
|
Add migration script for queryLayers column
|
Add migration script for queryLayers column
|
Python
|
bsd-2-clause
|
tsauerwein/c2cgeoportal,tsauerwein/c2cgeoportal,tsauerwein/c2cgeoportal,tsauerwein/c2cgeoportal
|
Add migration script for queryLayers column
|
from sqlalchemy import MetaData, Table, Column, types
from c2cgeoportal import schema
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
Column('queryLayers', types.Unicode).create(layer)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
layer.c.queryLayers.drop()
|
<commit_before><commit_msg>Add migration script for queryLayers column<commit_after>
|
from sqlalchemy import MetaData, Table, Column, types
from c2cgeoportal import schema
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
Column('queryLayers', types.Unicode).create(layer)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
layer.c.queryLayers.drop()
|
Add migration script for queryLayers columnfrom sqlalchemy import MetaData, Table, Column, types
from c2cgeoportal import schema
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
Column('queryLayers', types.Unicode).create(layer)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
layer.c.queryLayers.drop()
|
<commit_before><commit_msg>Add migration script for queryLayers column<commit_after>from sqlalchemy import MetaData, Table, Column, types
from c2cgeoportal import schema
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
Column('queryLayers', types.Unicode).create(layer)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
layer = Table('layer', meta, schema=schema, autoload=True)
layer.c.queryLayers.drop()
|
|
c105d1caddade4b2061f45e3ba4db0be0f7edb9d
|
tests/SaveFile/TestSaveFile.py
|
tests/SaveFile/TestSaveFile.py
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import unittest
import os.path
import tempfile
from multiprocessing import Pool
from UM.SaveFile import SaveFile
write_count = 0
def write_dual(path):
with SaveFile(path, "w") as f:
f.write("test file")
class TestSaveFile(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
self._temp_dir = None
def test_singleWrite(self):
path = os.path.join(self._temp_dir.name, "single_write")
with SaveFile(path, "w") as f:
f.write("test file")
with open(path) as f:
self.assertEqual(f.readline(), "test file")
def test_multiWrite(self):
path = os.path.join(self._temp_dir.name, "dual_write")
# Start two processes that try to write to the same file
with Pool(processes = 2) as p:
p.apply_async(write_dual, [path])
p.apply(write_dual, [path])
# Once done, there should be just one file
self.assertEqual(len(os.listdir(self._temp_dir.name)), 1)
# And file contents should be correct.
with open(path) as f:
data = f.read()
self.assertEqual(len(data), 9)
self.assertEqual(data, "test file")
if __name__ == "__main__":
unittest.main()
|
Add a test for SaveFile
|
Add a test for SaveFile
Contributes to CURA-511
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add a test for SaveFile
Contributes to CURA-511
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import unittest
import os.path
import tempfile
from multiprocessing import Pool
from UM.SaveFile import SaveFile
write_count = 0
def write_dual(path):
with SaveFile(path, "w") as f:
f.write("test file")
class TestSaveFile(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
self._temp_dir = None
def test_singleWrite(self):
path = os.path.join(self._temp_dir.name, "single_write")
with SaveFile(path, "w") as f:
f.write("test file")
with open(path) as f:
self.assertEqual(f.readline(), "test file")
def test_multiWrite(self):
path = os.path.join(self._temp_dir.name, "dual_write")
# Start two processes that try to write to the same file
with Pool(processes = 2) as p:
p.apply_async(write_dual, [path])
p.apply(write_dual, [path])
# Once done, there should be just one file
self.assertEqual(len(os.listdir(self._temp_dir.name)), 1)
# And file contents should be correct.
with open(path) as f:
data = f.read()
self.assertEqual(len(data), 9)
self.assertEqual(data, "test file")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a test for SaveFile
Contributes to CURA-511<commit_after>
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import unittest
import os.path
import tempfile
from multiprocessing import Pool
from UM.SaveFile import SaveFile
write_count = 0
def write_dual(path):
with SaveFile(path, "w") as f:
f.write("test file")
class TestSaveFile(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
self._temp_dir = None
def test_singleWrite(self):
path = os.path.join(self._temp_dir.name, "single_write")
with SaveFile(path, "w") as f:
f.write("test file")
with open(path) as f:
self.assertEqual(f.readline(), "test file")
def test_multiWrite(self):
path = os.path.join(self._temp_dir.name, "dual_write")
# Start two processes that try to write to the same file
with Pool(processes = 2) as p:
p.apply_async(write_dual, [path])
p.apply(write_dual, [path])
# Once done, there should be just one file
self.assertEqual(len(os.listdir(self._temp_dir.name)), 1)
# And file contents should be correct.
with open(path) as f:
data = f.read()
self.assertEqual(len(data), 9)
self.assertEqual(data, "test file")
if __name__ == "__main__":
unittest.main()
|
Add a test for SaveFile
Contributes to CURA-511# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import unittest
import os.path
import tempfile
from multiprocessing import Pool
from UM.SaveFile import SaveFile
write_count = 0
def write_dual(path):
with SaveFile(path, "w") as f:
f.write("test file")
class TestSaveFile(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
self._temp_dir = None
def test_singleWrite(self):
path = os.path.join(self._temp_dir.name, "single_write")
with SaveFile(path, "w") as f:
f.write("test file")
with open(path) as f:
self.assertEqual(f.readline(), "test file")
def test_multiWrite(self):
path = os.path.join(self._temp_dir.name, "dual_write")
# Start two processes that try to write to the same file
with Pool(processes = 2) as p:
p.apply_async(write_dual, [path])
p.apply(write_dual, [path])
# Once done, there should be just one file
self.assertEqual(len(os.listdir(self._temp_dir.name)), 1)
# And file contents should be correct.
with open(path) as f:
data = f.read()
self.assertEqual(len(data), 9)
self.assertEqual(data, "test file")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a test for SaveFile
Contributes to CURA-511<commit_after># Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import unittest
import os.path
import tempfile
from multiprocessing import Pool
from UM.SaveFile import SaveFile
write_count = 0
def write_dual(path):
with SaveFile(path, "w") as f:
f.write("test file")
class TestSaveFile(unittest.TestCase):
def setUp(self):
self._temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self._temp_dir.cleanup()
self._temp_dir = None
def test_singleWrite(self):
path = os.path.join(self._temp_dir.name, "single_write")
with SaveFile(path, "w") as f:
f.write("test file")
with open(path) as f:
self.assertEqual(f.readline(), "test file")
def test_multiWrite(self):
path = os.path.join(self._temp_dir.name, "dual_write")
# Start two processes that try to write to the same file
with Pool(processes = 2) as p:
p.apply_async(write_dual, [path])
p.apply(write_dual, [path])
# Once done, there should be just one file
self.assertEqual(len(os.listdir(self._temp_dir.name)), 1)
# And file contents should be correct.
with open(path) as f:
data = f.read()
self.assertEqual(len(data), 9)
self.assertEqual(data, "test file")
if __name__ == "__main__":
unittest.main()
|
|
63633a877289f8b6641db872f4918d437ecbdc22
|
split_to_files.py
|
split_to_files.py
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
argparser = argparse.ArgumentParser(description="Split text file rows to separate files")
argparser.add_argument("filename", help="Filename to split")
args = argparser.parse_args()
with open(args.filename, 'r') as input:
for index, line in enumerate(input):
with open('{prefix}_{id}.txt'.format(id=index, prefix=args.filename.split('.')[0]), 'w') as output:
output.write(line)
|
Add a file to split text files by rows
|
Add a file to split text files by rows
|
Python
|
mit
|
dhh17/categories_norms_genres,dhh17/categories_norms_genres,dhh17/categories_norms_genres
|
Add a file to split text files by rows
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
argparser = argparse.ArgumentParser(description="Split text file rows to separate files")
argparser.add_argument("filename", help="Filename to split")
args = argparser.parse_args()
with open(args.filename, 'r') as input:
for index, line in enumerate(input):
with open('{prefix}_{id}.txt'.format(id=index, prefix=args.filename.split('.')[0]), 'w') as output:
output.write(line)
|
<commit_before><commit_msg>Add a file to split text files by rows<commit_after>
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
argparser = argparse.ArgumentParser(description="Split text file rows to separate files")
argparser.add_argument("filename", help="Filename to split")
args = argparser.parse_args()
with open(args.filename, 'r') as input:
for index, line in enumerate(input):
with open('{prefix}_{id}.txt'.format(id=index, prefix=args.filename.split('.')[0]), 'w') as output:
output.write(line)
|
Add a file to split text files by rows#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
argparser = argparse.ArgumentParser(description="Split text file rows to separate files")
argparser.add_argument("filename", help="Filename to split")
args = argparser.parse_args()
with open(args.filename, 'r') as input:
for index, line in enumerate(input):
with open('{prefix}_{id}.txt'.format(id=index, prefix=args.filename.split('.')[0]), 'w') as output:
output.write(line)
|
<commit_before><commit_msg>Add a file to split text files by rows<commit_after>#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
argparser = argparse.ArgumentParser(description="Split text file rows to separate files")
argparser.add_argument("filename", help="Filename to split")
args = argparser.parse_args()
with open(args.filename, 'r') as input:
for index, line in enumerate(input):
with open('{prefix}_{id}.txt'.format(id=index, prefix=args.filename.split('.')[0]), 'w') as output:
output.write(line)
|
|
f232d704a0c3b659b994d519c492651033a79a32
|
examples/gstreamer/video_pipeline.py
|
examples/gstreamer/video_pipeline.py
|
#!/usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gtk
class Pipeline(object):
def __init__(self):
self.pipeline = gst.Pipeline("pipe")
self.webcam = gst.element_factory_make("v4l2src", "webcam")
self.webcam.set_property("device", "/dev/video0")
self.pipeline.add(self.webcam)
self.caps_filter = gst.element_factory_make("capsfilter", "caps_filter")
caps = gst.Caps("video/x-raw-yuv,width=640,height=480,framerate=30/1")
self.caps_filter.set_property("caps", caps)
self.pipeline.add(self.caps_filter)
self.sink = gst.element_factory_make("xvimagesink", "sink")
self.pipeline.add(self.sink)
self.webcam.link(self.caps_filter)
self.caps_filter.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
start = Pipeline()
gtk.main()
|
Add a video pipeline example.
|
Add a video pipeline example.
|
Python
|
mit
|
peplin/astral
|
Add a video pipeline example.
|
#!/usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gtk
class Pipeline(object):
def __init__(self):
self.pipeline = gst.Pipeline("pipe")
self.webcam = gst.element_factory_make("v4l2src", "webcam")
self.webcam.set_property("device", "/dev/video0")
self.pipeline.add(self.webcam)
self.caps_filter = gst.element_factory_make("capsfilter", "caps_filter")
caps = gst.Caps("video/x-raw-yuv,width=640,height=480,framerate=30/1")
self.caps_filter.set_property("caps", caps)
self.pipeline.add(self.caps_filter)
self.sink = gst.element_factory_make("xvimagesink", "sink")
self.pipeline.add(self.sink)
self.webcam.link(self.caps_filter)
self.caps_filter.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
start = Pipeline()
gtk.main()
|
<commit_before><commit_msg>Add a video pipeline example.<commit_after>
|
#!/usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gtk
class Pipeline(object):
def __init__(self):
self.pipeline = gst.Pipeline("pipe")
self.webcam = gst.element_factory_make("v4l2src", "webcam")
self.webcam.set_property("device", "/dev/video0")
self.pipeline.add(self.webcam)
self.caps_filter = gst.element_factory_make("capsfilter", "caps_filter")
caps = gst.Caps("video/x-raw-yuv,width=640,height=480,framerate=30/1")
self.caps_filter.set_property("caps", caps)
self.pipeline.add(self.caps_filter)
self.sink = gst.element_factory_make("xvimagesink", "sink")
self.pipeline.add(self.sink)
self.webcam.link(self.caps_filter)
self.caps_filter.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
start = Pipeline()
gtk.main()
|
Add a video pipeline example.#!/usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gtk
class Pipeline(object):
def __init__(self):
self.pipeline = gst.Pipeline("pipe")
self.webcam = gst.element_factory_make("v4l2src", "webcam")
self.webcam.set_property("device", "/dev/video0")
self.pipeline.add(self.webcam)
self.caps_filter = gst.element_factory_make("capsfilter", "caps_filter")
caps = gst.Caps("video/x-raw-yuv,width=640,height=480,framerate=30/1")
self.caps_filter.set_property("caps", caps)
self.pipeline.add(self.caps_filter)
self.sink = gst.element_factory_make("xvimagesink", "sink")
self.pipeline.add(self.sink)
self.webcam.link(self.caps_filter)
self.caps_filter.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
start = Pipeline()
gtk.main()
|
<commit_before><commit_msg>Add a video pipeline example.<commit_after>#!/usr/bin/env python
import pygst
pygst.require("0.10")
import gst
import gtk
class Pipeline(object):
def __init__(self):
self.pipeline = gst.Pipeline("pipe")
self.webcam = gst.element_factory_make("v4l2src", "webcam")
self.webcam.set_property("device", "/dev/video0")
self.pipeline.add(self.webcam)
self.caps_filter = gst.element_factory_make("capsfilter", "caps_filter")
caps = gst.Caps("video/x-raw-yuv,width=640,height=480,framerate=30/1")
self.caps_filter.set_property("caps", caps)
self.pipeline.add(self.caps_filter)
self.sink = gst.element_factory_make("xvimagesink", "sink")
self.pipeline.add(self.sink)
self.webcam.link(self.caps_filter)
self.caps_filter.link(self.sink)
self.pipeline.set_state(gst.STATE_PLAYING)
start = Pipeline()
gtk.main()
|
|
157b8a6da669a0b1cbb7c2fc8cb761267be53f19
|
ureport/polls/migrations/0054_add_index_poll_question_ruleset_uuid.py
|
ureport/polls/migrations/0054_add_index_poll_question_ruleset_uuid.py
|
# Generated by Django 2.2.3 on 2019-08-14 07:55
from django.db import migrations
# language=SQL
INDEX_SQL = """
CREATE INDEX polls_pollquestion_ruleset_uuid
ON polls_pollquestion (ruleset_uuid);
"""
class Migration(migrations.Migration):
dependencies = [("polls", "0053_poll_backend")]
operations = [migrations.RunSQL(INDEX_SQL)]
|
Add index on poll question ruleset UUID
|
Add index on poll question ruleset UUID
|
Python
|
agpl-3.0
|
Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport
|
Add index on poll question ruleset UUID
|
# Generated by Django 2.2.3 on 2019-08-14 07:55
from django.db import migrations
# language=SQL
INDEX_SQL = """
CREATE INDEX polls_pollquestion_ruleset_uuid
ON polls_pollquestion (ruleset_uuid);
"""
class Migration(migrations.Migration):
dependencies = [("polls", "0053_poll_backend")]
operations = [migrations.RunSQL(INDEX_SQL)]
|
<commit_before><commit_msg>Add index on poll question ruleset UUID<commit_after>
|
# Generated by Django 2.2.3 on 2019-08-14 07:55
from django.db import migrations
# language=SQL
INDEX_SQL = """
CREATE INDEX polls_pollquestion_ruleset_uuid
ON polls_pollquestion (ruleset_uuid);
"""
class Migration(migrations.Migration):
dependencies = [("polls", "0053_poll_backend")]
operations = [migrations.RunSQL(INDEX_SQL)]
|
Add index on poll question ruleset UUID# Generated by Django 2.2.3 on 2019-08-14 07:55
from django.db import migrations
# language=SQL
INDEX_SQL = """
CREATE INDEX polls_pollquestion_ruleset_uuid
ON polls_pollquestion (ruleset_uuid);
"""
class Migration(migrations.Migration):
dependencies = [("polls", "0053_poll_backend")]
operations = [migrations.RunSQL(INDEX_SQL)]
|
<commit_before><commit_msg>Add index on poll question ruleset UUID<commit_after># Generated by Django 2.2.3 on 2019-08-14 07:55
from django.db import migrations
# language=SQL
INDEX_SQL = """
CREATE INDEX polls_pollquestion_ruleset_uuid
ON polls_pollquestion (ruleset_uuid);
"""
class Migration(migrations.Migration):
dependencies = [("polls", "0053_poll_backend")]
operations = [migrations.RunSQL(INDEX_SQL)]
|
|
386781f1b125f6983a6aa44795dee87d86b68b56
|
pombola/south_africa/management/commands/south_africa_import_scraped_photos.py
|
pombola/south_africa/management/commands/south_africa_import_scraped_photos.py
|
""" Loop through images in a directory and attempt to match them to a person."""
import re
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import LabelCommand
from django.core.files import File
from django.utils.text import slugify
from pombola.core.models import (
Person, ContentType, Image
)
from haystack.query import SearchQuerySet
# Pretty colours to make it easier to spot things.
BRIGHT = '\033[95m'
ENDC = '\033[0m'
def match_person(name):
""" Match up a person by name with their database entry. """
slug = slugify(name)
# Try match on the name first
try:
person = Person.objects.get(legal_name__iexact=name)
except ObjectDoesNotExist:
try:
person = Person.objects.get(slug=slug)
except ObjectDoesNotExist:
search = SearchQuerySet().models(Person).filter(text=name)
if len(search) == 1 and search[0]:
person = search[0].object
else:
return None
except MultipleObjectsReturned:
print 'Multiple people returned for ' + name + ' (' + slug + '). Cannot continue.'
exit(1)
return person
class Command(LabelCommand):
help = 'Imports scraped photos from the ZA parliament site, and matches them to people.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.content_type_person = ContentType.objects.get_for_model(Person)
def handle_label(self, path, **options):
matched = 0
unmatched = 0
for filename in os.listdir(path):
# Strip out the .jpg
name = re.sub('\.jpg', '', filename)
# Strip any non-alpha trailing characters
name = re.sub('[^a-zA-Z]*$', '', name)
# Strip any more trailing whitespace that may have snuck in
name = name.strip()
# Make the name unicode so we can actually work with it in the DB
name = unicode(name)
# Slice the name into two
name = name.split('_')
if len(name) == 2:
# Match up the person
person = match_person(name[1] + ' ' + name[0])
if person is None:
print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
unmatched += 1
else:
print 'Matched ' + person.name.encode('utf-8')
Image.objects.create(
object_id=person.id,
content_type=self.content_type_person,
is_primary=True,
source='http://www.parliament.gov.za',
image=File(open(path + filename, 'r'))
)
matched += 1
else:
# This name doesn't have two bits, complain.
print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
unmatched += 1
print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)
|
Add script to import a folder of named images and attach to people.
|
Add script to import a folder of named images and attach to people.
|
Python
|
agpl-3.0
|
hzj123/56th,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,patricmutwiri/pombola,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,mysociety/pombola
|
Add script to import a folder of named images and attach to people.
|
""" Loop through images in a directory and attempt to match them to a person."""
import re
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import LabelCommand
from django.core.files import File
from django.utils.text import slugify
from pombola.core.models import (
Person, ContentType, Image
)
from haystack.query import SearchQuerySet
# Pretty colours to make it easier to spot things.
BRIGHT = '\033[95m'
ENDC = '\033[0m'
def match_person(name):
""" Match up a person by name with their database entry. """
slug = slugify(name)
# Try match on the name first
try:
person = Person.objects.get(legal_name__iexact=name)
except ObjectDoesNotExist:
try:
person = Person.objects.get(slug=slug)
except ObjectDoesNotExist:
search = SearchQuerySet().models(Person).filter(text=name)
if len(search) == 1 and search[0]:
person = search[0].object
else:
return None
except MultipleObjectsReturned:
print 'Multiple people returned for ' + name + ' (' + slug + '). Cannot continue.'
exit(1)
return person
class Command(LabelCommand):
help = 'Imports scraped photos from the ZA parliament site, and matches them to people.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.content_type_person = ContentType.objects.get_for_model(Person)
def handle_label(self, path, **options):
matched = 0
unmatched = 0
for filename in os.listdir(path):
# Strip out the .jpg
name = re.sub('\.jpg', '', filename)
# Strip any non-alpha trailing characters
name = re.sub('[^a-zA-Z]*$', '', name)
# Strip any more trailing whitespace that may have snuck in
name = name.strip()
# Make the name unicode so we can actually work with it in the DB
name = unicode(name)
# Slice the name into two
name = name.split('_')
if len(name) == 2:
# Match up the person
person = match_person(name[1] + ' ' + name[0])
if person is None:
print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
unmatched += 1
else:
print 'Matched ' + person.name.encode('utf-8')
Image.objects.create(
object_id=person.id,
content_type=self.content_type_person,
is_primary=True,
source='http://www.parliament.gov.za',
image=File(open(path + filename, 'r'))
)
matched += 1
else:
# This name doesn't have two bits, complain.
print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
unmatched += 1
print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)
|
<commit_before><commit_msg>Add script to import a folder of named images and attach to people.<commit_after>
|
""" Loop through images in a directory and attempt to match them to a person."""
import re
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import LabelCommand
from django.core.files import File
from django.utils.text import slugify
from pombola.core.models import (
Person, ContentType, Image
)
from haystack.query import SearchQuerySet
# Pretty colours to make it easier to spot things.
BRIGHT = '\033[95m'
ENDC = '\033[0m'
def match_person(name):
""" Match up a person by name with their database entry. """
slug = slugify(name)
# Try match on the name first
try:
person = Person.objects.get(legal_name__iexact=name)
except ObjectDoesNotExist:
try:
person = Person.objects.get(slug=slug)
except ObjectDoesNotExist:
search = SearchQuerySet().models(Person).filter(text=name)
if len(search) == 1 and search[0]:
person = search[0].object
else:
return None
except MultipleObjectsReturned:
print 'Multiple people returned for ' + name + ' (' + slug + '). Cannot continue.'
exit(1)
return person
class Command(LabelCommand):
help = 'Imports scraped photos from the ZA parliament site, and matches them to people.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.content_type_person = ContentType.objects.get_for_model(Person)
def handle_label(self, path, **options):
matched = 0
unmatched = 0
for filename in os.listdir(path):
# Strip out the .jpg
name = re.sub('\.jpg', '', filename)
# Strip any non-alpha trailing characters
name = re.sub('[^a-zA-Z]*$', '', name)
# Strip any more trailing whitespace that may have snuck in
name = name.strip()
# Make the name unicode so we can actually work with it in the DB
name = unicode(name)
# Slice the name into two
name = name.split('_')
if len(name) == 2:
# Match up the person
person = match_person(name[1] + ' ' + name[0])
if person is None:
print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
unmatched += 1
else:
print 'Matched ' + person.name.encode('utf-8')
Image.objects.create(
object_id=person.id,
content_type=self.content_type_person,
is_primary=True,
source='http://www.parliament.gov.za',
image=File(open(path + filename, 'r'))
)
matched += 1
else:
# This name doesn't have two bits, complain.
print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
unmatched += 1
print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)
|
Add script to import a folder of named images and attach to people.""" Loop through images in a directory and attempt to match them to a person."""
import re
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import LabelCommand
from django.core.files import File
from django.utils.text import slugify
from pombola.core.models import (
Person, ContentType, Image
)
from haystack.query import SearchQuerySet
# Pretty colours to make it easier to spot things.
BRIGHT = '\033[95m'
ENDC = '\033[0m'
def match_person(name):
""" Match up a person by name with their database entry. """
slug = slugify(name)
# Try match on the name first
try:
person = Person.objects.get(legal_name__iexact=name)
except ObjectDoesNotExist:
try:
person = Person.objects.get(slug=slug)
except ObjectDoesNotExist:
search = SearchQuerySet().models(Person).filter(text=name)
if len(search) == 1 and search[0]:
person = search[0].object
else:
return None
except MultipleObjectsReturned:
print 'Multiple people returned for ' + name + ' (' + slug + '). Cannot continue.'
exit(1)
return person
class Command(LabelCommand):
help = 'Imports scraped photos from the ZA parliament site, and matches them to people.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.content_type_person = ContentType.objects.get_for_model(Person)
def handle_label(self, path, **options):
matched = 0
unmatched = 0
for filename in os.listdir(path):
# Strip out the .jpg
name = re.sub('\.jpg', '', filename)
# Strip any non-alpha trailing characters
name = re.sub('[^a-zA-Z]*$', '', name)
# Strip any more trailing whitespace that may have snuck in
name = name.strip()
# Make the name unicode so we can actually work with it in the DB
name = unicode(name)
# Slice the name into two
name = name.split('_')
if len(name) == 2:
# Match up the person
person = match_person(name[1] + ' ' + name[0])
if person is None:
print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
unmatched += 1
else:
print 'Matched ' + person.name.encode('utf-8')
Image.objects.create(
object_id=person.id,
content_type=self.content_type_person,
is_primary=True,
source='http://www.parliament.gov.za',
image=File(open(path + filename, 'r'))
)
matched += 1
else:
# This name doesn't have two bits, complain.
print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
unmatched += 1
print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)
|
<commit_before><commit_msg>Add script to import a folder of named images and attach to people.<commit_after>""" Loop through images in a directory and attempt to match them to a person."""
import re
import os
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import MultipleObjectsReturned
from django.core.management.base import LabelCommand
from django.core.files import File
from django.utils.text import slugify
from pombola.core.models import (
Person, ContentType, Image
)
from haystack.query import SearchQuerySet
# Pretty colours to make it easier to spot things.
BRIGHT = '\033[95m'
ENDC = '\033[0m'
def match_person(name):
""" Match up a person by name with their database entry. """
slug = slugify(name)
# Try match on the name first
try:
person = Person.objects.get(legal_name__iexact=name)
except ObjectDoesNotExist:
try:
person = Person.objects.get(slug=slug)
except ObjectDoesNotExist:
search = SearchQuerySet().models(Person).filter(text=name)
if len(search) == 1 and search[0]:
person = search[0].object
else:
return None
except MultipleObjectsReturned:
print 'Multiple people returned for ' + name + ' (' + slug + '). Cannot continue.'
exit(1)
return person
class Command(LabelCommand):
help = 'Imports scraped photos from the ZA parliament site, and matches them to people.'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.content_type_person = ContentType.objects.get_for_model(Person)
def handle_label(self, path, **options):
matched = 0
unmatched = 0
for filename in os.listdir(path):
# Strip out the .jpg
name = re.sub('\.jpg', '', filename)
# Strip any non-alpha trailing characters
name = re.sub('[^a-zA-Z]*$', '', name)
# Strip any more trailing whitespace that may have snuck in
name = name.strip()
# Make the name unicode so we can actually work with it in the DB
name = unicode(name)
# Slice the name into two
name = name.split('_')
if len(name) == 2:
# Match up the person
person = match_person(name[1] + ' ' + name[0])
if person is None:
print BRIGHT + 'Unable to match "' + filename + '" to a person!'+ ENDC
unmatched += 1
else:
print 'Matched ' + person.name.encode('utf-8')
Image.objects.create(
object_id=person.id,
content_type=self.content_type_person,
is_primary=True,
source='http://www.parliament.gov.za',
image=File(open(path + filename, 'r'))
)
matched += 1
else:
# This name doesn't have two bits, complain.
print BRIGHT + '"' + filename + '" does not parse to a first and last name.'+ ENDC
unmatched += 1
print 'Done! Matched ' + str(matched) + ', failed to match ' + str(unmatched)
|
|
6990a05e360dc66d26997acc3e4095b0f3910d42
|
tests/ttw/test_sign_in.py
|
tests/ttw/test_sign_in.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import BrowserHarness
class Tests(BrowserHarness):
def test_sign_in_modal_is_hidden_by_default(self):
self.visit('/')
assert not self.css('#sign-in-modal').visible
def test_clicking_sign_in_button_opens_up_modal(self):
self.visit('/')
self.css('.sign-in').click()
assert self.css('#sign-in-modal').visible
def test_clicking_close_closes_modal(self):
self.visit('/')
self.css('.sign-in').click()
self.css('#sign-in-modal .close-modal').click()
assert not self.css('#sign-in-modal').visible
def test_401_page_opens_modal_automatically(self):
self.visit('/about/me/emails.json')
assert self.css('#sign-in-modal').visible
assert self.css('#sign-in-modal p')[0].text == 'Please sign in to continue'
|
Add ttw tests for sign in modal
|
Add ttw tests for sign in modal
|
Python
|
mit
|
gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com
|
Add ttw tests for sign in modal
|
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import BrowserHarness
class Tests(BrowserHarness):
def test_sign_in_modal_is_hidden_by_default(self):
self.visit('/')
assert not self.css('#sign-in-modal').visible
def test_clicking_sign_in_button_opens_up_modal(self):
self.visit('/')
self.css('.sign-in').click()
assert self.css('#sign-in-modal').visible
def test_clicking_close_closes_modal(self):
self.visit('/')
self.css('.sign-in').click()
self.css('#sign-in-modal .close-modal').click()
assert not self.css('#sign-in-modal').visible
def test_401_page_opens_modal_automatically(self):
self.visit('/about/me/emails.json')
assert self.css('#sign-in-modal').visible
assert self.css('#sign-in-modal p')[0].text == 'Please sign in to continue'
|
<commit_before><commit_msg>Add ttw tests for sign in modal<commit_after>
|
from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import BrowserHarness
class Tests(BrowserHarness):
def test_sign_in_modal_is_hidden_by_default(self):
self.visit('/')
assert not self.css('#sign-in-modal').visible
def test_clicking_sign_in_button_opens_up_modal(self):
self.visit('/')
self.css('.sign-in').click()
assert self.css('#sign-in-modal').visible
def test_clicking_close_closes_modal(self):
self.visit('/')
self.css('.sign-in').click()
self.css('#sign-in-modal .close-modal').click()
assert not self.css('#sign-in-modal').visible
def test_401_page_opens_modal_automatically(self):
self.visit('/about/me/emails.json')
assert self.css('#sign-in-modal').visible
assert self.css('#sign-in-modal p')[0].text == 'Please sign in to continue'
|
Add ttw tests for sign in modalfrom __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import BrowserHarness
class Tests(BrowserHarness):
def test_sign_in_modal_is_hidden_by_default(self):
self.visit('/')
assert not self.css('#sign-in-modal').visible
def test_clicking_sign_in_button_opens_up_modal(self):
self.visit('/')
self.css('.sign-in').click()
assert self.css('#sign-in-modal').visible
def test_clicking_close_closes_modal(self):
self.visit('/')
self.css('.sign-in').click()
self.css('#sign-in-modal .close-modal').click()
assert not self.css('#sign-in-modal').visible
def test_401_page_opens_modal_automatically(self):
self.visit('/about/me/emails.json')
assert self.css('#sign-in-modal').visible
assert self.css('#sign-in-modal p')[0].text == 'Please sign in to continue'
|
<commit_before><commit_msg>Add ttw tests for sign in modal<commit_after>from __future__ import absolute_import, division, print_function, unicode_literals
from gratipay.testing import BrowserHarness
class Tests(BrowserHarness):
def test_sign_in_modal_is_hidden_by_default(self):
self.visit('/')
assert not self.css('#sign-in-modal').visible
def test_clicking_sign_in_button_opens_up_modal(self):
self.visit('/')
self.css('.sign-in').click()
assert self.css('#sign-in-modal').visible
def test_clicking_close_closes_modal(self):
self.visit('/')
self.css('.sign-in').click()
self.css('#sign-in-modal .close-modal').click()
assert not self.css('#sign-in-modal').visible
def test_401_page_opens_modal_automatically(self):
self.visit('/about/me/emails.json')
assert self.css('#sign-in-modal').visible
assert self.css('#sign-in-modal p')[0].text == 'Please sign in to continue'
|
|
420a5bf5783621ab30fefdfb989c8e10ef35d7c0
|
usecases/places/basics.py
|
usecases/places/basics.py
|
from predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://developer.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# You can search places in a very similar way as for events.
# The full list of parameters is available at
# https://developer.predicthq.com/resources/places/#search-places
# and the fields availables can be found at
# https://developer.predicthq.com/resources/places/#fields
for place in phq.places.search(q='New York', country='US'):
print(place.id, place.name, place.type, place.location)
# You can inspect a single Place object by using the ?id query parameter
ny_state = phq.places.search(id='5128638').results[0]
print(ny_state.id, ny_state.name, ny_state.type, ny_state.location)
|
Add a couple of places examples
|
Add a couple of places examples
|
Python
|
mit
|
predicthq/sdk-py
|
Add a couple of places examples
|
from predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://developer.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# You can search places in a very similar way as for events.
# The full list of parameters is available at
# https://developer.predicthq.com/resources/places/#search-places
# and the fields availables can be found at
# https://developer.predicthq.com/resources/places/#fields
for place in phq.places.search(q='New York', country='US'):
print(place.id, place.name, place.type, place.location)
# You can inspect a single Place object by using the ?id query parameter
ny_state = phq.places.search(id='5128638').results[0]
print(ny_state.id, ny_state.name, ny_state.type, ny_state.location)
|
<commit_before><commit_msg>Add a couple of places examples<commit_after>
|
from predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://developer.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# You can search places in a very similar way as for events.
# The full list of parameters is available at
# https://developer.predicthq.com/resources/places/#search-places
# and the fields availables can be found at
# https://developer.predicthq.com/resources/places/#fields
for place in phq.places.search(q='New York', country='US'):
print(place.id, place.name, place.type, place.location)
# You can inspect a single Place object by using the ?id query parameter
ny_state = phq.places.search(id='5128638').results[0]
print(ny_state.id, ny_state.name, ny_state.type, ny_state.location)
|
Add a couple of places examplesfrom predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://developer.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# You can search places in a very similar way as for events.
# The full list of parameters is available at
# https://developer.predicthq.com/resources/places/#search-places
# and the fields availables can be found at
# https://developer.predicthq.com/resources/places/#fields
for place in phq.places.search(q='New York', country='US'):
print(place.id, place.name, place.type, place.location)
# You can inspect a single Place object by using the ?id query parameter
ny_state = phq.places.search(id='5128638').results[0]
print(ny_state.id, ny_state.name, ny_state.type, ny_state.location)
|
<commit_before><commit_msg>Add a couple of places examples<commit_after>from predicthq import Client
# Please copy paste your access token here
# or read our Quickstart documentation if you don't have a token yet
# https://developer.predicthq.com/guides/quickstart/
ACCESS_TOKEN = 'abc123'
phq = Client(access_token=ACCESS_TOKEN)
# You can search places in a very similar way as for events.
# The full list of parameters is available at
# https://developer.predicthq.com/resources/places/#search-places
# and the fields availables can be found at
# https://developer.predicthq.com/resources/places/#fields
for place in phq.places.search(q='New York', country='US'):
print(place.id, place.name, place.type, place.location)
# You can inspect a single Place object by using the ?id query parameter
ny_state = phq.places.search(id='5128638').results[0]
print(ny_state.id, ny_state.name, ny_state.type, ny_state.location)
|
|
e90e04cb3ed57145a89d9a8a94b329a3d3fc8432
|
lib/gen-blocks.py
|
lib/gen-blocks.py
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Blocks.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
range, name = line.split(';', 2)
start, end = range.split('..')
name = name.lstrip()
names.append((start, end, name))
return names
def write(self, data):
print('''\
struct Block
{
gunichar start;
gunichar end;
const char *name;
};''')
print('static const struct Block all_blocks[] =\n {')
s = ''
offset = 0
for start, end, name in data:
print(' {{ 0x{0}, 0x{1}, "{2}" }},'.format(start, end, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
Add a script for generating blocks
|
lib: Add a script for generating blocks
We'll need this too.
|
Python
|
bsd-3-clause
|
GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters,GNOME/gnome-characters
|
lib: Add a script for generating blocks
We'll need this too.
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Blocks.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
range, name = line.split(';', 2)
start, end = range.split('..')
name = name.lstrip()
names.append((start, end, name))
return names
def write(self, data):
print('''\
struct Block
{
gunichar start;
gunichar end;
const char *name;
};''')
print('static const struct Block all_blocks[] =\n {')
s = ''
offset = 0
for start, end, name in data:
print(' {{ 0x{0}, 0x{1}, "{2}" }},'.format(start, end, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating blocks
We'll need this too.<commit_after>
|
#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Blocks.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
range, name = line.split(';', 2)
start, end = range.split('..')
name = name.lstrip()
names.append((start, end, name))
return names
def write(self, data):
print('''\
struct Block
{
gunichar start;
gunichar end;
const char *name;
};''')
print('static const struct Block all_blocks[] =\n {')
s = ''
offset = 0
for start, end, name in data:
print(' {{ 0x{0}, 0x{1}, "{2}" }},'.format(start, end, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
lib: Add a script for generating blocks
We'll need this too.#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Blocks.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
range, name = line.split(';', 2)
start, end = range.split('..')
name = name.lstrip()
names.append((start, end, name))
return names
def write(self, data):
print('''\
struct Block
{
gunichar start;
gunichar end;
const char *name;
};''')
print('static const struct Block all_blocks[] =\n {')
s = ''
offset = 0
for start, end, name in data:
print(' {{ 0x{0}, 0x{1}, "{2}" }},'.format(start, end, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
<commit_before><commit_msg>lib: Add a script for generating blocks
We'll need this too.<commit_after>#!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/Blocks.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
range, name = line.split(';', 2)
start, end = range.split('..')
name = name.lstrip()
names.append((start, end, name))
return names
def write(self, data):
print('''\
struct Block
{
gunichar start;
gunichar end;
const char *name;
};''')
print('static const struct Block all_blocks[] =\n {')
s = ''
offset = 0
for start, end, name in data:
print(' {{ 0x{0}, 0x{1}, "{2}" }},'.format(start, end, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
|
|
ab077c0d2daf8af4e453384e73c830b469e56d3a
|
tests/test_rover_instance.py
|
tests/test_rover_instance.py
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
|
Add failing test for rover compass
|
Add failing test for rover compass
|
Python
|
mit
|
authentik8/rover
|
Add failing test for rover compass
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
|
<commit_before><commit_msg>Add failing test for rover compass<commit_after>
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
|
Add failing test for rover compass
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
|
<commit_before><commit_msg>Add failing test for rover compass<commit_after>
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
|
|
a056f27c15d388be15740ea861007a9121c27c2c
|
mysite/senseknocker/tests.py
|
mysite/senseknocker/tests.py
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
json = client.post('/senseknocker/handle_form', {
'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'})
# Once we have a bug tracker,
# check that the POST handler actually added a bug.
self.assertEqual(json.content, '[{"success": 1}]')
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
bug_data = {'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'}
json = client.post('/senseknocker/handle_form', bug_data)
# Check there exists at least one bug with the given characteristics
# in the DB. (There can be more than one, hypothechnically.)
self.assert_(list(senseknocker.Bug.objects.filter(
before=bug_data['before'],
expected_behavior=bug_data['expected_behavior'],
actual_behavior=bug_data['actual_behavior']
)))
self.assertEqual(json.content, '[{"success": 1}]')
|
Test that we actually create a senseknocker bug.
|
Test that we actually create a senseknocker bug.
|
Python
|
agpl-3.0
|
waseem18/oh-mainline,sudheesh001/oh-mainline,waseem18/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,vipul-sharma20/oh-mainline,Changaco/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,jledbetter/openhatch,moijes12/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,eeshangarg/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,jledbetter/openhatch,onceuponatimeforever/oh-mainline,willingc/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,ojengwa/oh-mainline,jledbetter/openhatch,ehashman/oh-mainline,mzdaniel/oh-mainline,waseem18/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,eeshangarg/oh-mainline,ehashman/oh-mainline,jledbetter/openhatch,willingc/oh-mainline,campbe13/openhatch,Changaco/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,vipul-sharma20/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,openhatch/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,campbe13/openhatch,jledbetter/openhatch,moijes12/oh-mainline,SnappleCap/oh-mainline,onceuponatimeforever/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,waseem18/oh-mainline,moijes12/oh-mainline,heeraj123/oh-mainline,willingc/oh-mainline,onceuponatimeforever/oh-mainline,sudheesh001/oh-mainline,ojengwa/oh-mainline,mzdaniel/oh-mainline,openhatch/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,campbe13/openhatch,openhatch/oh-mainline,mzdaniel/oh-mainline,nirmeshk/oh-mainline,mzdaniel/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,sudheesh001/oh-mainline
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
json = client.post('/senseknocker/handle_form', {
'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'})
# Once we have a bug tracker,
# check that the POST handler actually added a bug.
self.assertEqual(json.content, '[{"success": 1}]')
Test that we actually create a senseknocker bug.
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
bug_data = {'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'}
json = client.post('/senseknocker/handle_form', bug_data)
# Check there exists at least one bug with the given characteristics
# in the DB. (There can be more than one, hypothechnically.)
self.assert_(list(senseknocker.Bug.objects.filter(
before=bug_data['before'],
expected_behavior=bug_data['expected_behavior'],
actual_behavior=bug_data['actual_behavior']
)))
self.assertEqual(json.content, '[{"success": 1}]')
|
<commit_before># {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
json = client.post('/senseknocker/handle_form', {
'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'})
# Once we have a bug tracker,
# check that the POST handler actually added a bug.
self.assertEqual(json.content, '[{"success": 1}]')
<commit_msg>Test that we actually create a senseknocker bug.<commit_after>
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
bug_data = {'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'}
json = client.post('/senseknocker/handle_form', bug_data)
# Check there exists at least one bug with the given characteristics
# in the DB. (There can be more than one, hypothechnically.)
self.assert_(list(senseknocker.Bug.objects.filter(
before=bug_data['before'],
expected_behavior=bug_data['expected_behavior'],
actual_behavior=bug_data['actual_behavior']
)))
self.assertEqual(json.content, '[{"success": 1}]')
|
# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
json = client.post('/senseknocker/handle_form', {
'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'})
# Once we have a bug tracker,
# check that the POST handler actually added a bug.
self.assertEqual(json.content, '[{"success": 1}]')
Test that we actually create a senseknocker bug.# {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
bug_data = {'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'}
json = client.post('/senseknocker/handle_form', bug_data)
# Check there exists at least one bug with the given characteristics
# in the DB. (There can be more than one, hypothechnically.)
self.assert_(list(senseknocker.Bug.objects.filter(
before=bug_data['before'],
expected_behavior=bug_data['expected_behavior'],
actual_behavior=bug_data['actual_behavior']
)))
self.assertEqual(json.content, '[{"success": 1}]')
|
<commit_before># {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
json = client.post('/senseknocker/handle_form', {
'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'})
# Once we have a bug tracker,
# check that the POST handler actually added a bug.
self.assertEqual(json.content, '[{"success": 1}]')
<commit_msg>Test that we actually create a senseknocker bug.<commit_after># {{{ Imports
from mysite.base.tests import make_twill_url, TwillTests
# }}}
class Form(TwillTests):
fixtures = ['person-paulproteus.json', 'user-paulproteus.json']
def test_form_post_handler(self):
client = self.login_with_client()
bug_data = {'before': 'I was singing "Ave Maria" to a potful of dal.',
'expected_behavior': 'I expected the dal to be stirred.',
'actual_behavior': 'Instead, burnination.'}
json = client.post('/senseknocker/handle_form', bug_data)
# Check there exists at least one bug with the given characteristics
# in the DB. (There can be more than one, hypothechnically.)
self.assert_(list(senseknocker.Bug.objects.filter(
before=bug_data['before'],
expected_behavior=bug_data['expected_behavior'],
actual_behavior=bug_data['actual_behavior']
)))
self.assertEqual(json.content, '[{"success": 1}]')
|
56f2fc4c1244c0d0bbe91158988be67d9f596542
|
vm.py
|
vm.py
|
#!/usr/bin/env python
"""
Synacor challenge for OSCON 2012.
Architecture description in file arch-spec.
"""
import struct
import sys
# Memory with 15-bit address space storing 16-bit numbers.
# {address: number}
MEM = []
# 8 (16-bit) Registers
REGS = [i for i in xrange(8)]
# Unbounded stack
STACK = []
def run():
"""Run application from memory."""
offset = 0
while True:
op = MEM[offset]
def main():
"""Run this as ./vm.py <inputfile>."""
try:
infile = sys.argv[1]
except IndexError:
sys.exit(main.__doc__)
# Read input file into memory.
with open(infile, 'rb') as f:
# Read 16 bits at a time.
chunk = f.read(2)
while chunk != '':
MEM.append(struct.unpack('<H', chunk)[0])
chunk = f.read(2)
# Run it
if __name__ == '__main__':
main()
|
Read input file into "memory"
|
Read input file into "memory"
|
Python
|
mit
|
fwenzel/synacor-challenge,fwenzel/synacor-challenge
|
Read input file into "memory"
|
#!/usr/bin/env python
"""
Synacor challenge for OSCON 2012.
Architecture description in file arch-spec.
"""
import struct
import sys
# Memory with 15-bit address space storing 16-bit numbers.
# {address: number}
MEM = []
# 8 (16-bit) Registers
REGS = [i for i in xrange(8)]
# Unbounded stack
STACK = []
def run():
"""Run application from memory."""
offset = 0
while True:
op = MEM[offset]
def main():
"""Run this as ./vm.py <inputfile>."""
try:
infile = sys.argv[1]
except IndexError:
sys.exit(main.__doc__)
# Read input file into memory.
with open(infile, 'rb') as f:
# Read 16 bits at a time.
chunk = f.read(2)
while chunk != '':
MEM.append(struct.unpack('<H', chunk)[0])
chunk = f.read(2)
# Run it
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Read input file into "memory"<commit_after>
|
#!/usr/bin/env python
"""
Synacor challenge for OSCON 2012.
Architecture description in file arch-spec.
"""
import struct
import sys
# Memory with 15-bit address space storing 16-bit numbers.
# {address: number}
MEM = []
# 8 (16-bit) Registers
REGS = [i for i in xrange(8)]
# Unbounded stack
STACK = []
def run():
"""Run application from memory."""
offset = 0
while True:
op = MEM[offset]
def main():
"""Run this as ./vm.py <inputfile>."""
try:
infile = sys.argv[1]
except IndexError:
sys.exit(main.__doc__)
# Read input file into memory.
with open(infile, 'rb') as f:
# Read 16 bits at a time.
chunk = f.read(2)
while chunk != '':
MEM.append(struct.unpack('<H', chunk)[0])
chunk = f.read(2)
# Run it
if __name__ == '__main__':
main()
|
Read input file into "memory"#!/usr/bin/env python
"""
Synacor challenge for OSCON 2012.
Architecture description in file arch-spec.
"""
import struct
import sys
# Memory with 15-bit address space storing 16-bit numbers.
# {address: number}
MEM = []
# 8 (16-bit) Registers
REGS = [i for i in xrange(8)]
# Unbounded stack
STACK = []
def run():
"""Run application from memory."""
offset = 0
while True:
op = MEM[offset]
def main():
"""Run this as ./vm.py <inputfile>."""
try:
infile = sys.argv[1]
except IndexError:
sys.exit(main.__doc__)
# Read input file into memory.
with open(infile, 'rb') as f:
# Read 16 bits at a time.
chunk = f.read(2)
while chunk != '':
MEM.append(struct.unpack('<H', chunk)[0])
chunk = f.read(2)
# Run it
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Read input file into "memory"<commit_after>#!/usr/bin/env python
"""
Synacor challenge for OSCON 2012.
Architecture description in file arch-spec.
"""
import struct
import sys
# Memory with 15-bit address space storing 16-bit numbers.
# {address: number}
MEM = []
# 8 (16-bit) Registers
REGS = [i for i in xrange(8)]
# Unbounded stack
STACK = []
def run():
"""Run application from memory."""
offset = 0
while True:
op = MEM[offset]
def main():
"""Run this as ./vm.py <inputfile>."""
try:
infile = sys.argv[1]
except IndexError:
sys.exit(main.__doc__)
# Read input file into memory.
with open(infile, 'rb') as f:
# Read 16 bits at a time.
chunk = f.read(2)
while chunk != '':
MEM.append(struct.unpack('<H', chunk)[0])
chunk = f.read(2)
# Run it
if __name__ == '__main__':
main()
|
|
b8d488fe854794ee1a6354536dee677b175dea9d
|
003.py
|
003.py
|
"""
Project Euler Problem 3
=======================
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
def highest_prime_factor(number):
"""
Takes a number and returns it's highest prime factor.
"""
prime_factor = 1
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0 and is_prime(i):
prime_factor = i
return prime_factor
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def test_highest_prime_factor():
assert highest_prime_factor(13195) == 29
def test_is_prime_returns_true_for_primes():
for prime in [2, 3, 5, 7, 11, 13, 29]:
assert is_prime(prime)
def test_is_prime_returns_false_for_not_prime():
for not_prime in [4, 9, 15]:
assert not is_prime(not_prime)
def test_is_prime_returns_false_for_negative():
assert not is_prime(-5)
def test_is_prime_returns_false_for_zero():
assert not is_prime(0)
print(highest_prime_factor(600851475143))
|
Add solution and unit tests for problem 3
|
Add solution and unit tests for problem 3
|
Python
|
mit
|
BeataBak/project-euler-problems
|
Add solution and unit tests for problem 3
|
"""
Project Euler Problem 3
=======================
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
def highest_prime_factor(number):
"""
Takes a number and returns it's highest prime factor.
"""
prime_factor = 1
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0 and is_prime(i):
prime_factor = i
return prime_factor
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def test_highest_prime_factor():
assert highest_prime_factor(13195) == 29
def test_is_prime_returns_true_for_primes():
for prime in [2, 3, 5, 7, 11, 13, 29]:
assert is_prime(prime)
def test_is_prime_returns_false_for_not_prime():
for not_prime in [4, 9, 15]:
assert not is_prime(not_prime)
def test_is_prime_returns_false_for_negative():
assert not is_prime(-5)
def test_is_prime_returns_false_for_zero():
assert not is_prime(0)
print(highest_prime_factor(600851475143))
|
<commit_before><commit_msg>Add solution and unit tests for problem 3<commit_after>
|
"""
Project Euler Problem 3
=======================
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
def highest_prime_factor(number):
"""
Takes a number and returns it's highest prime factor.
"""
prime_factor = 1
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0 and is_prime(i):
prime_factor = i
return prime_factor
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def test_highest_prime_factor():
assert highest_prime_factor(13195) == 29
def test_is_prime_returns_true_for_primes():
for prime in [2, 3, 5, 7, 11, 13, 29]:
assert is_prime(prime)
def test_is_prime_returns_false_for_not_prime():
for not_prime in [4, 9, 15]:
assert not is_prime(not_prime)
def test_is_prime_returns_false_for_negative():
assert not is_prime(-5)
def test_is_prime_returns_false_for_zero():
assert not is_prime(0)
print(highest_prime_factor(600851475143))
|
Add solution and unit tests for problem 3"""
Project Euler Problem 3
=======================
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
def highest_prime_factor(number):
"""
Takes a number and returns it's highest prime factor.
"""
prime_factor = 1
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0 and is_prime(i):
prime_factor = i
return prime_factor
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def test_highest_prime_factor():
assert highest_prime_factor(13195) == 29
def test_is_prime_returns_true_for_primes():
for prime in [2, 3, 5, 7, 11, 13, 29]:
assert is_prime(prime)
def test_is_prime_returns_false_for_not_prime():
for not_prime in [4, 9, 15]:
assert not is_prime(not_prime)
def test_is_prime_returns_false_for_negative():
assert not is_prime(-5)
def test_is_prime_returns_false_for_zero():
assert not is_prime(0)
print(highest_prime_factor(600851475143))
|
<commit_before><commit_msg>Add solution and unit tests for problem 3<commit_after>"""
Project Euler Problem 3
=======================
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
def highest_prime_factor(number):
"""
Takes a number and returns it's highest prime factor.
"""
prime_factor = 1
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0 and is_prime(i):
prime_factor = i
return prime_factor
def is_prime(number):
"""
Takes a number and returns True if it's a prime number, otherwise returns False.
"""
if number == 2 or number == 3:
return True
if number <= 0 or number % 2 == 0 or number % 3 == 0:
return False
for i in range(2, int(number ** 0.5) + 1):
if number % i == 0:
return False
return True
def test_highest_prime_factor():
assert highest_prime_factor(13195) == 29
def test_is_prime_returns_true_for_primes():
for prime in [2, 3, 5, 7, 11, 13, 29]:
assert is_prime(prime)
def test_is_prime_returns_false_for_not_prime():
for not_prime in [4, 9, 15]:
assert not is_prime(not_prime)
def test_is_prime_returns_false_for_negative():
assert not is_prime(-5)
def test_is_prime_returns_false_for_zero():
assert not is_prime(0)
print(highest_prime_factor(600851475143))
|
|
4a1d72300df95c666971c14d94bf77b693581d15
|
problem_42.py
|
problem_42.py
|
from time import time
from itertools import permutations
DIGITS = '1234567890'
DIVS = [2, 3, 5, 7, 11, 13, 17]
def check_divs(pandigital):
for i in range(1, 8):
if int(pandigital[i:i+3]) % DIVS[i-1]:
return False
return True
def main():
print sum(
[int(''.join(p)) for p in permutations(DIGITS) if check_divs(''.join(p))]
)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 42, pandigital substrings divisible by first primes
|
Add problem 42, pandigital substrings divisible by first primes
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 42, pandigital substrings divisible by first primes
|
from time import time
from itertools import permutations
DIGITS = '1234567890'
DIVS = [2, 3, 5, 7, 11, 13, 17]
def check_divs(pandigital):
for i in range(1, 8):
if int(pandigital[i:i+3]) % DIVS[i-1]:
return False
return True
def main():
print sum(
[int(''.join(p)) for p in permutations(DIGITS) if check_divs(''.join(p))]
)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 42, pandigital substrings divisible by first primes<commit_after>
|
from time import time
from itertools import permutations
DIGITS = '1234567890'
DIVS = [2, 3, 5, 7, 11, 13, 17]
def check_divs(pandigital):
for i in range(1, 8):
if int(pandigital[i:i+3]) % DIVS[i-1]:
return False
return True
def main():
print sum(
[int(''.join(p)) for p in permutations(DIGITS) if check_divs(''.join(p))]
)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 42, pandigital substrings divisible by first primesfrom time import time
from itertools import permutations
DIGITS = '1234567890'
DIVS = [2, 3, 5, 7, 11, 13, 17]
def check_divs(pandigital):
for i in range(1, 8):
if int(pandigital[i:i+3]) % DIVS[i-1]:
return False
return True
def main():
print sum(
[int(''.join(p)) for p in permutations(DIGITS) if check_divs(''.join(p))]
)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 42, pandigital substrings divisible by first primes<commit_after>from time import time
from itertools import permutations
DIGITS = '1234567890'
DIVS = [2, 3, 5, 7, 11, 13, 17]
def check_divs(pandigital):
for i in range(1, 8):
if int(pandigital[i:i+3]) % DIVS[i-1]:
return False
return True
def main():
print sum(
[int(''.join(p)) for p in permutations(DIGITS) if check_divs(''.join(p))]
)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
41b65cc8b8cf718373209ab66710dc66ecc8b66c
|
options/script/jiesuan_parameter.py
|
options/script/jiesuan_parameter.py
|
#!/usr/bin/python
# coding: utf-8
import sys
import urllib2
import json
import time
from datetime import datetime
CURSOR='o_cursor'
RD='report_date'
UD='update_date'
CU='COMMODITYDELIVFEEUNIT'
HL='HEDGLONGMARGINRATIO'
HS='HEDGSHORTMARGINRATIO'
IID='INSTRUMENTID'
SP='SETTLEMENTPRICE'
SL='SPECLONGMARGINRATIO'
SS='SPECSHORTMARGINRATIO'
TR='TRADEFEERATIO'
TU='TRADEFEEUNIT'
TODAY=datetime.today().strftime('%Y%m%d')
def _getOrElse(d,k,e):
res=[]
if not isinstance(k,list):
if d.has_key(k) and len(('%s'%d[k]).strip())>0:
return ('%s'%d[k]).strip().encode('utf8')
else:
return e
else:
for key in k:
res.append(_getOrElse(d,key,e))
return res
def getDataOnDate(ts):
alldata=[]
t=0
while True:
try:
time.sleep(3)
url=urllib2.urlopen('http://www.shfe.com.cn/data/dailydata/js/js%s.dat'%ts)
data=url.read()
js=json.loads(data,parse_float=None,parse_int=None)
for ele in js[CURSOR]:
tmplist=_getOrElse(ele,[IID,SP,TR,TU,CU,HL,HS,SL,SS],'None')
tmplist.append(ts)
tmplist.append(js[RD])
tmplist.append(js[UD])
tmplist.append(TODAY)
alldata.append(tmplist)
break
except urllib2.HTTPError as e:
print '%s->No data on date:%s'%(e.code,ts)
break
except Exception as e:
print 'Error when get data on date:%s\n%s'%(ts,e)
t+=60
time.sleep(t)
return alldata
if __name__=='__main__':
with file(sys.argv[2],'a') as output:
alldata=getDataOnDate(sys.argv[1])
for prod in alldata:
output.write('\001'.join(prod))
output.write('\n')
|
Add script for jiesuan parameters
|
Add script for jiesuan parameters
|
Python
|
apache-2.0
|
happy6666/stockStrategies,happy6666/stockStrategies
|
Add script for jiesuan parameters
|
#!/usr/bin/python
# coding: utf-8
import sys
import urllib2
import json
import time
from datetime import datetime
CURSOR='o_cursor'
RD='report_date'
UD='update_date'
CU='COMMODITYDELIVFEEUNIT'
HL='HEDGLONGMARGINRATIO'
HS='HEDGSHORTMARGINRATIO'
IID='INSTRUMENTID'
SP='SETTLEMENTPRICE'
SL='SPECLONGMARGINRATIO'
SS='SPECSHORTMARGINRATIO'
TR='TRADEFEERATIO'
TU='TRADEFEEUNIT'
TODAY=datetime.today().strftime('%Y%m%d')
def _getOrElse(d,k,e):
res=[]
if not isinstance(k,list):
if d.has_key(k) and len(('%s'%d[k]).strip())>0:
return ('%s'%d[k]).strip().encode('utf8')
else:
return e
else:
for key in k:
res.append(_getOrElse(d,key,e))
return res
def getDataOnDate(ts):
alldata=[]
t=0
while True:
try:
time.sleep(3)
url=urllib2.urlopen('http://www.shfe.com.cn/data/dailydata/js/js%s.dat'%ts)
data=url.read()
js=json.loads(data,parse_float=None,parse_int=None)
for ele in js[CURSOR]:
tmplist=_getOrElse(ele,[IID,SP,TR,TU,CU,HL,HS,SL,SS],'None')
tmplist.append(ts)
tmplist.append(js[RD])
tmplist.append(js[UD])
tmplist.append(TODAY)
alldata.append(tmplist)
break
except urllib2.HTTPError as e:
print '%s->No data on date:%s'%(e.code,ts)
break
except Exception as e:
print 'Error when get data on date:%s\n%s'%(ts,e)
t+=60
time.sleep(t)
return alldata
if __name__=='__main__':
with file(sys.argv[2],'a') as output:
alldata=getDataOnDate(sys.argv[1])
for prod in alldata:
output.write('\001'.join(prod))
output.write('\n')
|
<commit_before><commit_msg>Add script for jiesuan parameters<commit_after>
|
#!/usr/bin/python
# coding: utf-8
import sys
import urllib2
import json
import time
from datetime import datetime
CURSOR='o_cursor'
RD='report_date'
UD='update_date'
CU='COMMODITYDELIVFEEUNIT'
HL='HEDGLONGMARGINRATIO'
HS='HEDGSHORTMARGINRATIO'
IID='INSTRUMENTID'
SP='SETTLEMENTPRICE'
SL='SPECLONGMARGINRATIO'
SS='SPECSHORTMARGINRATIO'
TR='TRADEFEERATIO'
TU='TRADEFEEUNIT'
TODAY=datetime.today().strftime('%Y%m%d')
def _getOrElse(d,k,e):
res=[]
if not isinstance(k,list):
if d.has_key(k) and len(('%s'%d[k]).strip())>0:
return ('%s'%d[k]).strip().encode('utf8')
else:
return e
else:
for key in k:
res.append(_getOrElse(d,key,e))
return res
def getDataOnDate(ts):
alldata=[]
t=0
while True:
try:
time.sleep(3)
url=urllib2.urlopen('http://www.shfe.com.cn/data/dailydata/js/js%s.dat'%ts)
data=url.read()
js=json.loads(data,parse_float=None,parse_int=None)
for ele in js[CURSOR]:
tmplist=_getOrElse(ele,[IID,SP,TR,TU,CU,HL,HS,SL,SS],'None')
tmplist.append(ts)
tmplist.append(js[RD])
tmplist.append(js[UD])
tmplist.append(TODAY)
alldata.append(tmplist)
break
except urllib2.HTTPError as e:
print '%s->No data on date:%s'%(e.code,ts)
break
except Exception as e:
print 'Error when get data on date:%s\n%s'%(ts,e)
t+=60
time.sleep(t)
return alldata
if __name__=='__main__':
with file(sys.argv[2],'a') as output:
alldata=getDataOnDate(sys.argv[1])
for prod in alldata:
output.write('\001'.join(prod))
output.write('\n')
|
Add script for jiesuan parameters#!/usr/bin/python
# coding: utf-8
import sys
import urllib2
import json
import time
from datetime import datetime
CURSOR='o_cursor'
RD='report_date'
UD='update_date'
CU='COMMODITYDELIVFEEUNIT'
HL='HEDGLONGMARGINRATIO'
HS='HEDGSHORTMARGINRATIO'
IID='INSTRUMENTID'
SP='SETTLEMENTPRICE'
SL='SPECLONGMARGINRATIO'
SS='SPECSHORTMARGINRATIO'
TR='TRADEFEERATIO'
TU='TRADEFEEUNIT'
TODAY=datetime.today().strftime('%Y%m%d')
def _getOrElse(d,k,e):
res=[]
if not isinstance(k,list):
if d.has_key(k) and len(('%s'%d[k]).strip())>0:
return ('%s'%d[k]).strip().encode('utf8')
else:
return e
else:
for key in k:
res.append(_getOrElse(d,key,e))
return res
def getDataOnDate(ts):
alldata=[]
t=0
while True:
try:
time.sleep(3)
url=urllib2.urlopen('http://www.shfe.com.cn/data/dailydata/js/js%s.dat'%ts)
data=url.read()
js=json.loads(data,parse_float=None,parse_int=None)
for ele in js[CURSOR]:
tmplist=_getOrElse(ele,[IID,SP,TR,TU,CU,HL,HS,SL,SS],'None')
tmplist.append(ts)
tmplist.append(js[RD])
tmplist.append(js[UD])
tmplist.append(TODAY)
alldata.append(tmplist)
break
except urllib2.HTTPError as e:
print '%s->No data on date:%s'%(e.code,ts)
break
except Exception as e:
print 'Error when get data on date:%s\n%s'%(ts,e)
t+=60
time.sleep(t)
return alldata
if __name__=='__main__':
with file(sys.argv[2],'a') as output:
alldata=getDataOnDate(sys.argv[1])
for prod in alldata:
output.write('\001'.join(prod))
output.write('\n')
|
<commit_before><commit_msg>Add script for jiesuan parameters<commit_after>#!/usr/bin/python
# coding: utf-8
import sys
import urllib2
import json
import time
from datetime import datetime
CURSOR='o_cursor'
RD='report_date'
UD='update_date'
CU='COMMODITYDELIVFEEUNIT'
HL='HEDGLONGMARGINRATIO'
HS='HEDGSHORTMARGINRATIO'
IID='INSTRUMENTID'
SP='SETTLEMENTPRICE'
SL='SPECLONGMARGINRATIO'
SS='SPECSHORTMARGINRATIO'
TR='TRADEFEERATIO'
TU='TRADEFEEUNIT'
TODAY=datetime.today().strftime('%Y%m%d')
def _getOrElse(d,k,e):
res=[]
if not isinstance(k,list):
if d.has_key(k) and len(('%s'%d[k]).strip())>0:
return ('%s'%d[k]).strip().encode('utf8')
else:
return e
else:
for key in k:
res.append(_getOrElse(d,key,e))
return res
def getDataOnDate(ts):
alldata=[]
t=0
while True:
try:
time.sleep(3)
url=urllib2.urlopen('http://www.shfe.com.cn/data/dailydata/js/js%s.dat'%ts)
data=url.read()
js=json.loads(data,parse_float=None,parse_int=None)
for ele in js[CURSOR]:
tmplist=_getOrElse(ele,[IID,SP,TR,TU,CU,HL,HS,SL,SS],'None')
tmplist.append(ts)
tmplist.append(js[RD])
tmplist.append(js[UD])
tmplist.append(TODAY)
alldata.append(tmplist)
break
except urllib2.HTTPError as e:
print '%s->No data on date:%s'%(e.code,ts)
break
except Exception as e:
print 'Error when get data on date:%s\n%s'%(ts,e)
t+=60
time.sleep(t)
return alldata
if __name__=='__main__':
with file(sys.argv[2],'a') as output:
alldata=getDataOnDate(sys.argv[1])
for prod in alldata:
output.write('\001'.join(prod))
output.write('\n')
|
|
00bf4796086fce1991578dc0db870cb7ce0e2011
|
corehq/apps/reports/management/commands/find_saved_reports_with_slug.py
|
corehq/apps/reports/management/commands/find_saved_reports_with_slug.py
|
from __future__ import absolute_import, print_function
from django.conf import settings
from dimagi.utils.couch.cache import cache_core
from corehq.apps.reports.models import ReportConfig
from django.core.management import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('report_slug')
def handle(self, report_slug, *args, **options):
kwargs = {'stale': settings.COUCH_STALE_QUERY}
key = ["name slug"]
result = cache_core.cached_view(
ReportConfig.get_db(),
"reportconfig/configs_by_domain",
reduce=False, include_docs=False,
startkey=key, endkey=key + [{}],
**kwargs)
for report_config in result:
domain, owner_id, slug = report_config['key'][1:4]
if slug == report_slug:
print("%s, %s, %s" % (
domain, owner_id, slug
))
|
Add command to find saved reports with slug
|
Add command to find saved reports with slug[skip ci]
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command to find saved reports with slug[skip ci]
|
from __future__ import absolute_import, print_function
from django.conf import settings
from dimagi.utils.couch.cache import cache_core
from corehq.apps.reports.models import ReportConfig
from django.core.management import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('report_slug')
def handle(self, report_slug, *args, **options):
kwargs = {'stale': settings.COUCH_STALE_QUERY}
key = ["name slug"]
result = cache_core.cached_view(
ReportConfig.get_db(),
"reportconfig/configs_by_domain",
reduce=False, include_docs=False,
startkey=key, endkey=key + [{}],
**kwargs)
for report_config in result:
domain, owner_id, slug = report_config['key'][1:4]
if slug == report_slug:
print("%s, %s, %s" % (
domain, owner_id, slug
))
|
<commit_before><commit_msg>Add command to find saved reports with slug[skip ci]<commit_after>
|
from __future__ import absolute_import, print_function
from django.conf import settings
from dimagi.utils.couch.cache import cache_core
from corehq.apps.reports.models import ReportConfig
from django.core.management import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('report_slug')
def handle(self, report_slug, *args, **options):
kwargs = {'stale': settings.COUCH_STALE_QUERY}
key = ["name slug"]
result = cache_core.cached_view(
ReportConfig.get_db(),
"reportconfig/configs_by_domain",
reduce=False, include_docs=False,
startkey=key, endkey=key + [{}],
**kwargs)
for report_config in result:
domain, owner_id, slug = report_config['key'][1:4]
if slug == report_slug:
print("%s, %s, %s" % (
domain, owner_id, slug
))
|
Add command to find saved reports with slug[skip ci]from __future__ import absolute_import, print_function
from django.conf import settings
from dimagi.utils.couch.cache import cache_core
from corehq.apps.reports.models import ReportConfig
from django.core.management import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('report_slug')
def handle(self, report_slug, *args, **options):
kwargs = {'stale': settings.COUCH_STALE_QUERY}
key = ["name slug"]
result = cache_core.cached_view(
ReportConfig.get_db(),
"reportconfig/configs_by_domain",
reduce=False, include_docs=False,
startkey=key, endkey=key + [{}],
**kwargs)
for report_config in result:
domain, owner_id, slug = report_config['key'][1:4]
if slug == report_slug:
print("%s, %s, %s" % (
domain, owner_id, slug
))
|
<commit_before><commit_msg>Add command to find saved reports with slug[skip ci]<commit_after>from __future__ import absolute_import, print_function
from django.conf import settings
from dimagi.utils.couch.cache import cache_core
from corehq.apps.reports.models import ReportConfig
from django.core.management import BaseCommand
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('report_slug')
def handle(self, report_slug, *args, **options):
kwargs = {'stale': settings.COUCH_STALE_QUERY}
key = ["name slug"]
result = cache_core.cached_view(
ReportConfig.get_db(),
"reportconfig/configs_by_domain",
reduce=False, include_docs=False,
startkey=key, endkey=key + [{}],
**kwargs)
for report_config in result:
domain, owner_id, slug = report_config['key'][1:4]
if slug == report_slug:
print("%s, %s, %s" % (
domain, owner_id, slug
))
|
|
575f4678b2528bfcfb5d48fdacebd59a2abd9581
|
tests/slaves_expectations.py
|
tests/slaves_expectations.py
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a list of known slaves, along with their OS and master."""
import argparse
import collections
import json
import logging
import os
import subprocess
import sys
# This file is located inside tests. Update this path if that changes.
BUILD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS = os.path.join(BUILD, 'scripts')
LIST_SLAVES = os.path.join(SCRIPTS, 'tools', 'list_slaves.py')
sys.path.append(SCRIPTS)
from common import chromium_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gen',
'--generate',
action='store_true',
dest='generate',
help='Generate slaves.expected for all masters.',
)
args = parser.parse_args()
masters = chromium_utils.ListMastersWithSlaves()
master_map = {}
for master_path in masters:
# Convert ~/<somewhere>/master.<whatever> to just whatever.
master = os.path.basename(master_path).split('.', 1)[-1]
botmap = json.loads(subprocess.check_output([
LIST_SLAVES, '--json', '--master', master]))
slave_map = collections.defaultdict(set)
for entry in botmap:
assert entry['mastername'] == 'master.%s' % master
for builder in entry['builder']:
slave_map[builder].add(entry['hostname'])
master_map[master_path] = {}
for buildername in sorted(slave_map.keys()):
master_map[master_path][buildername] = sorted(slave_map[buildername])
retcode = 0
for master_path, slaves_expectation in master_map.iteritems():
if os.path.exists(master_path):
slaves_expectation_file = os.path.join(master_path, 'slaves.expected')
if args.generate:
with open(slaves_expectation_file, 'w') as fp:
json.dump(slaves_expectation, fp, indent=2, sort_keys=True)
print 'Wrote expectation: %s.' % slaves_expectation_file
else:
if os.path.exists(slaves_expectation_file):
with open(slaves_expectation_file) as fp:
if json.load(fp) != slaves_expectation:
logging.error(
'Mismatched expectation: %s.', slaves_expectation_file)
retcode = 1
else:
logging.error('File not found: %s.', slaves_expectation_file)
retcode = 1
return retcode
if __name__ == '__main__':
sys.exit(main())
|
Add script for generating slave expectations
|
Add script for generating slave expectations
BUG=489880
R=friedman@chromium.org
Review URL: https://codereview.chromium.org/1178383002.
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@295683 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
eunchong/build,eunchong/build,eunchong/build,eunchong/build
|
Add script for generating slave expectations
BUG=489880
R=friedman@chromium.org
Review URL: https://codereview.chromium.org/1178383002.
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@295683 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a list of known slaves, along with their OS and master."""
import argparse
import collections
import json
import logging
import os
import subprocess
import sys
# This file is located inside tests. Update this path if that changes.
BUILD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS = os.path.join(BUILD, 'scripts')
LIST_SLAVES = os.path.join(SCRIPTS, 'tools', 'list_slaves.py')
sys.path.append(SCRIPTS)
from common import chromium_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gen',
'--generate',
action='store_true',
dest='generate',
help='Generate slaves.expected for all masters.',
)
args = parser.parse_args()
masters = chromium_utils.ListMastersWithSlaves()
master_map = {}
for master_path in masters:
# Convert ~/<somewhere>/master.<whatever> to just whatever.
master = os.path.basename(master_path).split('.', 1)[-1]
botmap = json.loads(subprocess.check_output([
LIST_SLAVES, '--json', '--master', master]))
slave_map = collections.defaultdict(set)
for entry in botmap:
assert entry['mastername'] == 'master.%s' % master
for builder in entry['builder']:
slave_map[builder].add(entry['hostname'])
master_map[master_path] = {}
for buildername in sorted(slave_map.keys()):
master_map[master_path][buildername] = sorted(slave_map[buildername])
retcode = 0
for master_path, slaves_expectation in master_map.iteritems():
if os.path.exists(master_path):
slaves_expectation_file = os.path.join(master_path, 'slaves.expected')
if args.generate:
with open(slaves_expectation_file, 'w') as fp:
json.dump(slaves_expectation, fp, indent=2, sort_keys=True)
print 'Wrote expectation: %s.' % slaves_expectation_file
else:
if os.path.exists(slaves_expectation_file):
with open(slaves_expectation_file) as fp:
if json.load(fp) != slaves_expectation:
logging.error(
'Mismatched expectation: %s.', slaves_expectation_file)
retcode = 1
else:
logging.error('File not found: %s.', slaves_expectation_file)
retcode = 1
return retcode
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating slave expectations
BUG=489880
R=friedman@chromium.org
Review URL: https://codereview.chromium.org/1178383002.
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@295683 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a list of known slaves, along with their OS and master."""
import argparse
import collections
import json
import logging
import os
import subprocess
import sys
# This file is located inside tests. Update this path if that changes.
BUILD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS = os.path.join(BUILD, 'scripts')
LIST_SLAVES = os.path.join(SCRIPTS, 'tools', 'list_slaves.py')
sys.path.append(SCRIPTS)
from common import chromium_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gen',
'--generate',
action='store_true',
dest='generate',
help='Generate slaves.expected for all masters.',
)
args = parser.parse_args()
masters = chromium_utils.ListMastersWithSlaves()
master_map = {}
for master_path in masters:
# Convert ~/<somewhere>/master.<whatever> to just whatever.
master = os.path.basename(master_path).split('.', 1)[-1]
botmap = json.loads(subprocess.check_output([
LIST_SLAVES, '--json', '--master', master]))
slave_map = collections.defaultdict(set)
for entry in botmap:
assert entry['mastername'] == 'master.%s' % master
for builder in entry['builder']:
slave_map[builder].add(entry['hostname'])
master_map[master_path] = {}
for buildername in sorted(slave_map.keys()):
master_map[master_path][buildername] = sorted(slave_map[buildername])
retcode = 0
for master_path, slaves_expectation in master_map.iteritems():
if os.path.exists(master_path):
slaves_expectation_file = os.path.join(master_path, 'slaves.expected')
if args.generate:
with open(slaves_expectation_file, 'w') as fp:
json.dump(slaves_expectation, fp, indent=2, sort_keys=True)
print 'Wrote expectation: %s.' % slaves_expectation_file
else:
if os.path.exists(slaves_expectation_file):
with open(slaves_expectation_file) as fp:
if json.load(fp) != slaves_expectation:
logging.error(
'Mismatched expectation: %s.', slaves_expectation_file)
retcode = 1
else:
logging.error('File not found: %s.', slaves_expectation_file)
retcode = 1
return retcode
if __name__ == '__main__':
sys.exit(main())
|
Add script for generating slave expectations
BUG=489880
R=friedman@chromium.org
Review URL: https://codereview.chromium.org/1178383002.
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@295683 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a list of known slaves, along with their OS and master."""
import argparse
import collections
import json
import logging
import os
import subprocess
import sys
# This file is located inside tests. Update this path if that changes.
BUILD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS = os.path.join(BUILD, 'scripts')
LIST_SLAVES = os.path.join(SCRIPTS, 'tools', 'list_slaves.py')
sys.path.append(SCRIPTS)
from common import chromium_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gen',
'--generate',
action='store_true',
dest='generate',
help='Generate slaves.expected for all masters.',
)
args = parser.parse_args()
masters = chromium_utils.ListMastersWithSlaves()
master_map = {}
for master_path in masters:
# Convert ~/<somewhere>/master.<whatever> to just whatever.
master = os.path.basename(master_path).split('.', 1)[-1]
botmap = json.loads(subprocess.check_output([
LIST_SLAVES, '--json', '--master', master]))
slave_map = collections.defaultdict(set)
for entry in botmap:
assert entry['mastername'] == 'master.%s' % master
for builder in entry['builder']:
slave_map[builder].add(entry['hostname'])
master_map[master_path] = {}
for buildername in sorted(slave_map.keys()):
master_map[master_path][buildername] = sorted(slave_map[buildername])
retcode = 0
for master_path, slaves_expectation in master_map.iteritems():
if os.path.exists(master_path):
slaves_expectation_file = os.path.join(master_path, 'slaves.expected')
if args.generate:
with open(slaves_expectation_file, 'w') as fp:
json.dump(slaves_expectation, fp, indent=2, sort_keys=True)
print 'Wrote expectation: %s.' % slaves_expectation_file
else:
if os.path.exists(slaves_expectation_file):
with open(slaves_expectation_file) as fp:
if json.load(fp) != slaves_expectation:
logging.error(
'Mismatched expectation: %s.', slaves_expectation_file)
retcode = 1
else:
logging.error('File not found: %s.', slaves_expectation_file)
retcode = 1
return retcode
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating slave expectations
BUG=489880
R=friedman@chromium.org
Review URL: https://codereview.chromium.org/1178383002.
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@295683 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dumps a list of known slaves, along with their OS and master."""
import argparse
import collections
import json
import logging
import os
import subprocess
import sys
# This file is located inside tests. Update this path if that changes.
BUILD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SCRIPTS = os.path.join(BUILD, 'scripts')
LIST_SLAVES = os.path.join(SCRIPTS, 'tools', 'list_slaves.py')
sys.path.append(SCRIPTS)
from common import chromium_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-g',
'--gen',
'--generate',
action='store_true',
dest='generate',
help='Generate slaves.expected for all masters.',
)
args = parser.parse_args()
masters = chromium_utils.ListMastersWithSlaves()
master_map = {}
for master_path in masters:
# Convert ~/<somewhere>/master.<whatever> to just whatever.
master = os.path.basename(master_path).split('.', 1)[-1]
botmap = json.loads(subprocess.check_output([
LIST_SLAVES, '--json', '--master', master]))
slave_map = collections.defaultdict(set)
for entry in botmap:
assert entry['mastername'] == 'master.%s' % master
for builder in entry['builder']:
slave_map[builder].add(entry['hostname'])
master_map[master_path] = {}
for buildername in sorted(slave_map.keys()):
master_map[master_path][buildername] = sorted(slave_map[buildername])
retcode = 0
for master_path, slaves_expectation in master_map.iteritems():
if os.path.exists(master_path):
slaves_expectation_file = os.path.join(master_path, 'slaves.expected')
if args.generate:
with open(slaves_expectation_file, 'w') as fp:
json.dump(slaves_expectation, fp, indent=2, sort_keys=True)
print 'Wrote expectation: %s.' % slaves_expectation_file
else:
if os.path.exists(slaves_expectation_file):
with open(slaves_expectation_file) as fp:
if json.load(fp) != slaves_expectation:
logging.error(
'Mismatched expectation: %s.', slaves_expectation_file)
retcode = 1
else:
logging.error('File not found: %s.', slaves_expectation_file)
retcode = 1
return retcode
if __name__ == '__main__':
sys.exit(main())
|
|
8854737fdec0a3ba6195ffb8bb02f62b8aabfb9e
|
aiozk/test/test_recipe.py
|
aiozk/test/test_recipe.py
|
import pytest
from aiozk.recipes.sequential import SequentialRecipe
from aiozk.exc import TimeoutError, NodeExists
@pytest.mark.asyncio
async def test_wait_on_not_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
# NO WAIT
await seq_recipe.wait_on_sibling('not-exist-znode', timeout=0.5)
@pytest.mark.asyncio
async def test_wait_on_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
LABEL = 'test'
await seq_recipe.create_unique_znode(LABEL)
try:
owned_positions, siblings = await seq_recipe.analyze_siblings()
with pytest.raises(TimeoutError):
# SHOULD WAIT
await seq_recipe.wait_on_sibling(siblings[0], timeout=0.5)
finally:
await seq_recipe.delete_unique_znode(LABEL)
await zk.delete(path)
@pytest.mark.asyncio
async def test_delete_not_exist_unique_znode(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
with pytest.raises(KeyError):
# RAISE EXCEPTION
await seq_recipe.delete_unique_znode('test')
await seq_recipe.create_unique_znode('test')
await zk.delete(seq_recipe.owned_paths['test'])
try:
# OK
await seq_recipe.delete_unique_znode('test')
finally:
await zk.delete(path)
@pytest.mark.asyncio
async def test_create_unique_znode_twice(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
await seq_recipe.create_unique_znode('test')
try:
with pytest.raises(NodeExists):
await seq_recipe.create_unique_znode('test')
siblings = await seq_recipe.get_siblings()
assert len(siblings) == 1
finally:
await seq_recipe.delete_unique_znode('test')
await zk.delete(path)
|
Add test code for SequentialRecipe
|
sequential: Add test code for SequentialRecipe
|
Python
|
mit
|
tipsi/aiozk,tipsi/aiozk
|
sequential: Add test code for SequentialRecipe
|
import pytest
from aiozk.recipes.sequential import SequentialRecipe
from aiozk.exc import TimeoutError, NodeExists
@pytest.mark.asyncio
async def test_wait_on_not_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
# NO WAIT
await seq_recipe.wait_on_sibling('not-exist-znode', timeout=0.5)
@pytest.mark.asyncio
async def test_wait_on_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
LABEL = 'test'
await seq_recipe.create_unique_znode(LABEL)
try:
owned_positions, siblings = await seq_recipe.analyze_siblings()
with pytest.raises(TimeoutError):
# SHOULD WAIT
await seq_recipe.wait_on_sibling(siblings[0], timeout=0.5)
finally:
await seq_recipe.delete_unique_znode(LABEL)
await zk.delete(path)
@pytest.mark.asyncio
async def test_delete_not_exist_unique_znode(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
with pytest.raises(KeyError):
# RAISE EXCEPTION
await seq_recipe.delete_unique_znode('test')
await seq_recipe.create_unique_znode('test')
await zk.delete(seq_recipe.owned_paths['test'])
try:
# OK
await seq_recipe.delete_unique_znode('test')
finally:
await zk.delete(path)
@pytest.mark.asyncio
async def test_create_unique_znode_twice(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
await seq_recipe.create_unique_znode('test')
try:
with pytest.raises(NodeExists):
await seq_recipe.create_unique_znode('test')
siblings = await seq_recipe.get_siblings()
assert len(siblings) == 1
finally:
await seq_recipe.delete_unique_znode('test')
await zk.delete(path)
|
<commit_before><commit_msg>sequential: Add test code for SequentialRecipe<commit_after>
|
import pytest
from aiozk.recipes.sequential import SequentialRecipe
from aiozk.exc import TimeoutError, NodeExists
@pytest.mark.asyncio
async def test_wait_on_not_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
# NO WAIT
await seq_recipe.wait_on_sibling('not-exist-znode', timeout=0.5)
@pytest.mark.asyncio
async def test_wait_on_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
LABEL = 'test'
await seq_recipe.create_unique_znode(LABEL)
try:
owned_positions, siblings = await seq_recipe.analyze_siblings()
with pytest.raises(TimeoutError):
# SHOULD WAIT
await seq_recipe.wait_on_sibling(siblings[0], timeout=0.5)
finally:
await seq_recipe.delete_unique_znode(LABEL)
await zk.delete(path)
@pytest.mark.asyncio
async def test_delete_not_exist_unique_znode(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
with pytest.raises(KeyError):
# RAISE EXCEPTION
await seq_recipe.delete_unique_znode('test')
await seq_recipe.create_unique_znode('test')
await zk.delete(seq_recipe.owned_paths['test'])
try:
# OK
await seq_recipe.delete_unique_znode('test')
finally:
await zk.delete(path)
@pytest.mark.asyncio
async def test_create_unique_znode_twice(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
await seq_recipe.create_unique_znode('test')
try:
with pytest.raises(NodeExists):
await seq_recipe.create_unique_znode('test')
siblings = await seq_recipe.get_siblings()
assert len(siblings) == 1
finally:
await seq_recipe.delete_unique_znode('test')
await zk.delete(path)
|
sequential: Add test code for SequentialRecipeimport pytest
from aiozk.recipes.sequential import SequentialRecipe
from aiozk.exc import TimeoutError, NodeExists
@pytest.mark.asyncio
async def test_wait_on_not_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
# NO WAIT
await seq_recipe.wait_on_sibling('not-exist-znode', timeout=0.5)
@pytest.mark.asyncio
async def test_wait_on_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
LABEL = 'test'
await seq_recipe.create_unique_znode(LABEL)
try:
owned_positions, siblings = await seq_recipe.analyze_siblings()
with pytest.raises(TimeoutError):
# SHOULD WAIT
await seq_recipe.wait_on_sibling(siblings[0], timeout=0.5)
finally:
await seq_recipe.delete_unique_znode(LABEL)
await zk.delete(path)
@pytest.mark.asyncio
async def test_delete_not_exist_unique_znode(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
with pytest.raises(KeyError):
# RAISE EXCEPTION
await seq_recipe.delete_unique_znode('test')
await seq_recipe.create_unique_znode('test')
await zk.delete(seq_recipe.owned_paths['test'])
try:
# OK
await seq_recipe.delete_unique_znode('test')
finally:
await zk.delete(path)
@pytest.mark.asyncio
async def test_create_unique_znode_twice(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
await seq_recipe.create_unique_znode('test')
try:
with pytest.raises(NodeExists):
await seq_recipe.create_unique_znode('test')
siblings = await seq_recipe.get_siblings()
assert len(siblings) == 1
finally:
await seq_recipe.delete_unique_znode('test')
await zk.delete(path)
|
<commit_before><commit_msg>sequential: Add test code for SequentialRecipe<commit_after>import pytest
from aiozk.recipes.sequential import SequentialRecipe
from aiozk.exc import TimeoutError, NodeExists
@pytest.mark.asyncio
async def test_wait_on_not_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
# NO WAIT
await seq_recipe.wait_on_sibling('not-exist-znode', timeout=0.5)
@pytest.mark.asyncio
async def test_wait_on_exist_sibling(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
LABEL = 'test'
await seq_recipe.create_unique_znode(LABEL)
try:
owned_positions, siblings = await seq_recipe.analyze_siblings()
with pytest.raises(TimeoutError):
# SHOULD WAIT
await seq_recipe.wait_on_sibling(siblings[0], timeout=0.5)
finally:
await seq_recipe.delete_unique_znode(LABEL)
await zk.delete(path)
@pytest.mark.asyncio
async def test_delete_not_exist_unique_znode(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
with pytest.raises(KeyError):
# RAISE EXCEPTION
await seq_recipe.delete_unique_znode('test')
await seq_recipe.create_unique_znode('test')
await zk.delete(seq_recipe.owned_paths['test'])
try:
# OK
await seq_recipe.delete_unique_znode('test')
finally:
await zk.delete(path)
@pytest.mark.asyncio
async def test_create_unique_znode_twice(zk, path):
seq_recipe = SequentialRecipe(path)
seq_recipe.set_client(zk)
await seq_recipe.create_unique_znode('test')
try:
with pytest.raises(NodeExists):
await seq_recipe.create_unique_znode('test')
siblings = await seq_recipe.get_siblings()
assert len(siblings) == 1
finally:
await seq_recipe.delete_unique_znode('test')
await zk.delete(path)
|
|
7889edb12598fc0e9b004bc8df34bdc59817270c
|
scripts/get_shodan_banners.py
|
scripts/get_shodan_banners.py
|
import re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.argument('query')
def main(filter, stats, query):
counter = 0
filtered_header = []
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
hd = get_headers(match['data'])
if not hd:
continue
if filter:
value = hd.get(filter)
if value:
filtered_header.append(value)
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print()
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
|
Add shodan script to get page headers to test plugins
|
Add shodan script to get page headers to test plugins
|
Python
|
mit
|
spectresearch/detectem
|
Add shodan script to get page headers to test plugins
|
import re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.argument('query')
def main(filter, stats, query):
counter = 0
filtered_header = []
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
hd = get_headers(match['data'])
if not hd:
continue
if filter:
value = hd.get(filter)
if value:
filtered_header.append(value)
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print()
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add shodan script to get page headers to test plugins<commit_after>
|
import re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.argument('query')
def main(filter, stats, query):
counter = 0
filtered_header = []
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
hd = get_headers(match['data'])
if not hd:
continue
if filter:
value = hd.get(filter)
if value:
filtered_header.append(value)
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print()
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
|
Add shodan script to get page headers to test pluginsimport re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.argument('query')
def main(filter, stats, query):
counter = 0
filtered_header = []
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
hd = get_headers(match['data'])
if not hd:
continue
if filter:
value = hd.get(filter)
if value:
filtered_header.append(value)
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print()
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add shodan script to get page headers to test plugins<commit_after>import re
import sys
import os
import pprint
import click
try:
import shodan
except ImportError:
print('Install shodan: pip install shodan')
sys.exit(0)
SHODAN_API_KEY = os.environ['SHODAN_API_KEY']
def get_headers(text):
header_string = re.findall(
'^(.*?)(?:[\r\n]{3,4})', text, flags=re.DOTALL | re.I
)
if not header_string:
return None
data = {}
for line in header_string[0].splitlines():
match = re.findall('^(.*?):(.*)', line)
if match:
key, value = map(lambda v: v.strip(), match[0])
data[key] = value
return data
@click.command()
@click.option(
'--filter',
default=None,
type=str,
help='Filter by header'
)
@click.option(
'--stats',
default=False,
is_flag=True,
help='Include stats'
)
@click.argument('query')
def main(filter, stats, query):
counter = 0
filtered_header = []
api = shodan.Shodan(SHODAN_API_KEY)
try:
result = api.search(query)
except shodan.exception.APIError:
print('[-] API connection error.')
sys.exit(0)
for match in result['matches']:
hd = get_headers(match['data'])
if not hd:
continue
if filter:
value = hd.get(filter)
if value:
filtered_header.append(value)
else:
pprint.pprint(hd, width=160)
counter += 1
if filtered_header:
pprint.pprint(filtered_header, width=160)
if stats:
print()
print('[+] n_matches: {}'.format(len(result['matches'])))
print('[+] n_printed: {}'.format(counter or len(filtered_header)))
if __name__ == '__main__':
main()
|
|
8bd86656ca589c3da1b4da078b5cd9633c018c3c
|
py/serialize-and-deserialize-bst.py
|
py/serialize-and-deserialize-bst.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ans = []
if root:
ans.append(str(root.val))
ans.append(self.serialize(root.left))
ans.append(self.serialize(root.right))
return ' '.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.do_deserialize(data)[0]
def do_deserialize(self, data):
ans = None
if data:
val, data = data.split(' ', 1)
if val:
ans = TreeNode(int(val))
ans.left, data = self.do_deserialize(data)
ans.right, data = self.do_deserialize(data)
return ans, data
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
Add py solution for 449. Serialize and Deserialize BST
|
Add py solution for 449. Serialize and Deserialize BST
449. Serialize and Deserialize BST: https://leetcode.com/problems/serialize-and-deserialize-bst/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 449. Serialize and Deserialize BST
449. Serialize and Deserialize BST: https://leetcode.com/problems/serialize-and-deserialize-bst/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ans = []
if root:
ans.append(str(root.val))
ans.append(self.serialize(root.left))
ans.append(self.serialize(root.right))
return ' '.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.do_deserialize(data)[0]
def do_deserialize(self, data):
ans = None
if data:
val, data = data.split(' ', 1)
if val:
ans = TreeNode(int(val))
ans.left, data = self.do_deserialize(data)
ans.right, data = self.do_deserialize(data)
return ans, data
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
<commit_before><commit_msg>Add py solution for 449. Serialize and Deserialize BST
449. Serialize and Deserialize BST: https://leetcode.com/problems/serialize-and-deserialize-bst/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ans = []
if root:
ans.append(str(root.val))
ans.append(self.serialize(root.left))
ans.append(self.serialize(root.right))
return ' '.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.do_deserialize(data)[0]
def do_deserialize(self, data):
ans = None
if data:
val, data = data.split(' ', 1)
if val:
ans = TreeNode(int(val))
ans.left, data = self.do_deserialize(data)
ans.right, data = self.do_deserialize(data)
return ans, data
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
Add py solution for 449. Serialize and Deserialize BST
449. Serialize and Deserialize BST: https://leetcode.com/problems/serialize-and-deserialize-bst/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ans = []
if root:
ans.append(str(root.val))
ans.append(self.serialize(root.left))
ans.append(self.serialize(root.right))
return ' '.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.do_deserialize(data)[0]
def do_deserialize(self, data):
ans = None
if data:
val, data = data.split(' ', 1)
if val:
ans = TreeNode(int(val))
ans.left, data = self.do_deserialize(data)
ans.right, data = self.do_deserialize(data)
return ans, data
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
<commit_before><commit_msg>Add py solution for 449. Serialize and Deserialize BST
449. Serialize and Deserialize BST: https://leetcode.com/problems/serialize-and-deserialize-bst/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
ans = []
if root:
ans.append(str(root.val))
ans.append(self.serialize(root.left))
ans.append(self.serialize(root.right))
return ' '.join(ans)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
return self.do_deserialize(data)[0]
def do_deserialize(self, data):
ans = None
if data:
val, data = data.split(' ', 1)
if val:
ans = TreeNode(int(val))
ans.left, data = self.do_deserialize(data)
ans.right, data = self.do_deserialize(data)
return ans, data
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
|
|
f146cc855c76af7a7a0ba6176e2cae09b3b26446
|
proj2/ethDist.py
|
proj2/ethDist.py
|
#!/usr/bin/python2
# Processes Ethernet data
f = open("data/eth_data.txt", "r")
# skip header lines
[f.readline() for i in range(3) ]
data = []
for line in f:
data.append(line.split()[2])
eth_types = {'Xerox PUP':0, 'Sprite':0, 'IPv4':0, 'ARP':0, 'Reverse ARP':0,
'AppleTalk ARP':0, 'IEEE 802.1Q VLAN tagging':0, 'IPX':0, 'IPv6':0,
'Loopback':0, 'Unknown':0}
for t in data:
eth_types[t] += 1
print eth_types
|
Add ethernet type distribution script
|
Add ethernet type distribution script
|
Python
|
bsd-3-clause
|
sjbarag/ECE-C433,sjbarag/ECE-C433,sjbarag/ECE-C433
|
Add ethernet type distribution script
|
#!/usr/bin/python2
# Processes Ethernet data
f = open("data/eth_data.txt", "r")
# skip header lines
[f.readline() for i in range(3) ]
data = []
for line in f:
data.append(line.split()[2])
eth_types = {'Xerox PUP':0, 'Sprite':0, 'IPv4':0, 'ARP':0, 'Reverse ARP':0,
'AppleTalk ARP':0, 'IEEE 802.1Q VLAN tagging':0, 'IPX':0, 'IPv6':0,
'Loopback':0, 'Unknown':0}
for t in data:
eth_types[t] += 1
print eth_types
|
<commit_before><commit_msg>Add ethernet type distribution script<commit_after>
|
#!/usr/bin/python2
# Processes Ethernet data
f = open("data/eth_data.txt", "r")
# skip header lines
[f.readline() for i in range(3) ]
data = []
for line in f:
data.append(line.split()[2])
eth_types = {'Xerox PUP':0, 'Sprite':0, 'IPv4':0, 'ARP':0, 'Reverse ARP':0,
'AppleTalk ARP':0, 'IEEE 802.1Q VLAN tagging':0, 'IPX':0, 'IPv6':0,
'Loopback':0, 'Unknown':0}
for t in data:
eth_types[t] += 1
print eth_types
|
Add ethernet type distribution script#!/usr/bin/python2
# Processes Ethernet data
f = open("data/eth_data.txt", "r")
# skip header lines
[f.readline() for i in range(3) ]
data = []
for line in f:
data.append(line.split()[2])
eth_types = {'Xerox PUP':0, 'Sprite':0, 'IPv4':0, 'ARP':0, 'Reverse ARP':0,
'AppleTalk ARP':0, 'IEEE 802.1Q VLAN tagging':0, 'IPX':0, 'IPv6':0,
'Loopback':0, 'Unknown':0}
for t in data:
eth_types[t] += 1
print eth_types
|
<commit_before><commit_msg>Add ethernet type distribution script<commit_after>#!/usr/bin/python2
# Processes Ethernet data
f = open("data/eth_data.txt", "r")
# skip header lines
[f.readline() for i in range(3) ]
data = []
for line in f:
data.append(line.split()[2])
eth_types = {'Xerox PUP':0, 'Sprite':0, 'IPv4':0, 'ARP':0, 'Reverse ARP':0,
'AppleTalk ARP':0, 'IEEE 802.1Q VLAN tagging':0, 'IPX':0, 'IPv6':0,
'Loopback':0, 'Unknown':0}
for t in data:
eth_types[t] += 1
print eth_types
|
|
418b1e1176f4b4d286983f69cf4e5c1deacd4afb
|
sympy/core/tests/test_cache.py
|
sympy/core/tests/test_cache.py
|
from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
|
Add a test for the @cachit decorator.
|
Add a test for the @cachit decorator.
Make sure that the caching decorator correctly
copies over the function docstring and function name.
This fixes issue #744 from the issue tracker.
Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de>
Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz>
|
Python
|
bsd-3-clause
|
madan96/sympy,hargup/sympy,Gadal/sympy,cswiercz/sympy,sampadsaha5/sympy,Shaswat27/sympy,fperez/sympy,atreyv/sympy,lidavidm/sympy,Davidjohnwilson/sympy,AkademieOlympia/sympy,beni55/sympy,Mitchkoens/sympy,beni55/sympy,jbbskinny/sympy,pbrady/sympy,shikil/sympy,kmacinnis/sympy,shikil/sympy,Curious72/sympy,toolforger/sympy,yashsharan/sympy,jaimahajan1997/sympy,asm666/sympy,Titan-C/sympy,asm666/sympy,sahmed95/sympy,hrashk/sympy,yukoba/sympy,wyom/sympy,meghana1995/sympy,yukoba/sympy,oliverlee/sympy,madan96/sympy,saurabhjn76/sympy,cccfran/sympy,grevutiu-gabriel/sympy,bukzor/sympy,garvitr/sympy,kumarkrishna/sympy,AunShiLord/sympy,kmacinnis/sympy,emon10005/sympy,ahhda/sympy,jamesblunt/sympy,ahhda/sympy,saurabhjn76/sympy,sunny94/temp,atsao72/sympy,maniteja123/sympy,drufat/sympy,mattpap/sympy-polys,hazelnusse/sympy-old,MridulS/sympy,Shaswat27/sympy,iamutkarshtiwari/sympy,jerli/sympy,bukzor/sympy,MechCoder/sympy,grevutiu-gabriel/sympy,srjoglekar246/sympy,VaibhavAgarwalVA/sympy,wyom/sympy,meghana1995/sympy,farhaanbukhsh/sympy,dqnykamp/sympy,drufat/sympy,mcdaniel67/sympy,grevutiu-gabriel/sympy,kumarkrishna/sympy,jamesblunt/sympy,postvakje/sympy,souravsingh/sympy,shipci/sympy,shikil/sympy,Designist/sympy,yashsharan/sympy,atsao72/sympy,farhaanbukhsh/sympy,wanglongqi/sympy,toolforger/sympy,vipulroxx/sympy,rahuldan/sympy,jaimahajan1997/sympy,abloomston/sympy,Sumith1896/sympy,liangjiaxing/sympy,pernici/sympy,Arafatk/sympy,wyom/sympy,aktech/sympy,lindsayad/sympy,amitjamadagni/sympy,tovrstra/sympy,lidavidm/sympy,diofant/diofant,AkademieOlympia/sympy,kaichogami/sympy,Vishluck/sympy,yashsharan/sympy,Titan-C/sympy,liangjiaxing/sympy,jamesblunt/sympy,flacjacket/sympy,ga7g08/sympy,Shaswat27/sympy,moble/sympy,Vishluck/sympy,atreyv/sympy,ga7g08/sympy,MechCoder/sympy,pandeyadarsh/sympy,kmacinnis/sympy,kevalds51/sympy,lindsayad/sympy,mcdaniel67/sympy,moble/sympy,sampadsaha5/sympy,mafiya69/sympy,hrashk/sympy,Davidjohnwilson/sympy,kaichogami/sympy,Designist/sympy,hargup/sympy,kaichogami/sympy,hargup/sympy,ChristinaZografou/sympy,emon10005/sympy,jbaayen/sympy,hazelnusse/sympy-old,sahmed95/sympy,jerli/sympy,pandeyadarsh/sympy,Gadal/sympy,ga7g08/sympy,abhiii5459/sympy,mcdaniel67/sympy,MridulS/sympy,emon10005/sympy,aktech/sympy,rahuldan/sympy,meghana1995/sympy,skidzo/sympy,postvakje/sympy,sahilshekhawat/sympy,shipci/sympy,beni55/sympy,skidzo/sympy,bukzor/sympy,ChristinaZografou/sympy,AunShiLord/sympy,Arafatk/sympy,ryanGT/sympy,drufat/sympy,AunShiLord/sympy,atsao72/sympy,abhiii5459/sympy,wanglongqi/sympy,dqnykamp/sympy,pbrady/sympy,kevalds51/sympy,madan96/sympy,liangjiaxing/sympy,souravsingh/sympy,wanglongqi/sympy,VaibhavAgarwalVA/sympy,Gadal/sympy,sahilshekhawat/sympy,asm666/sympy,skidzo/sympy,cswiercz/sympy,yukoba/sympy,lindsayad/sympy,sampadsaha5/sympy,chaffra/sympy,moble/sympy,MridulS/sympy,maniteja123/sympy,debugger22/sympy,jbbskinny/sympy,kevalds51/sympy,Titan-C/sympy,Curious72/sympy,shipci/sympy,dqnykamp/sympy,kumarkrishna/sympy,AkademieOlympia/sympy,skirpichev/omg,vipulroxx/sympy,iamutkarshtiwari/sympy,atreyv/sympy,pbrady/sympy,debugger22/sympy,hrashk/sympy,KevinGoodsell/sympy,sahilshekhawat/sympy,mafiya69/sympy,minrk/sympy,pandeyadarsh/sympy,vipulroxx/sympy,Sumith1896/sympy,oliverlee/sympy,cccfran/sympy,Designist/sympy,chaffra/sympy,cccfran/sympy,sahmed95/sympy,debugger22/sympy,Vishluck/sympy,iamutkarshtiwari/sympy,saurabhjn76/sympy,cswiercz/sympy,kaushik94/sympy,garvitr/sympy,Sumith1896/sympy,amitjamadagni/sympy,toolforger/sympy,Davidjohnwilson/sympy,chaffra/sympy,kaushik94/sympy,abhiii5459/sympy,VaibhavAgarwalVA/sympy,souravsingh/sympy,sunny94/temp,Mitchkoens/sympy,rahuldan/sympy,Mitchkoens/sympy,ahhda/sympy,lidavidm/sympy,sunny94/temp,Arafatk/sympy,abloomston/sympy,maniteja123/sympy,garvitr/sympy,kaushik94/sympy,Curious72/sympy,abloomston/sympy,jerli/sympy,jaimahajan1997/sympy,minrk/sympy,ChristinaZografou/sympy,MechCoder/sympy,jbbskinny/sympy,aktech/sympy,farhaanbukhsh/sympy,postvakje/sympy,mafiya69/sympy,oliverlee/sympy
|
Add a test for the @cachit decorator.
Make sure that the caching decorator correctly
copies over the function docstring and function name.
This fixes issue #744 from the issue tracker.
Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de>
Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz>
|
from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
|
<commit_before><commit_msg>Add a test for the @cachit decorator.
Make sure that the caching decorator correctly
copies over the function docstring and function name.
This fixes issue #744 from the issue tracker.
Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de>
Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz><commit_after>
|
from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
|
Add a test for the @cachit decorator.
Make sure that the caching decorator correctly
copies over the function docstring and function name.
This fixes issue #744 from the issue tracker.
Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de>
Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz>from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
|
<commit_before><commit_msg>Add a test for the @cachit decorator.
Make sure that the caching decorator correctly
copies over the function docstring and function name.
This fixes issue #744 from the issue tracker.
Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de>
Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz><commit_after>from sympy.core.cache import cacheit
def test_cacheit_doc():
@cacheit
def testfn():
"test docstring"
pass
assert testfn.__doc__ == "test docstring"
assert testfn.__name__ == "testfn"
|
|
138d183d0164d31fae51f3bc00dd1ea554703b7c
|
tests/accounts/test_backend.py
|
tests/accounts/test_backend.py
|
import pytest
from django.contrib.auth import get_user
from django.http import HttpRequest
from django.core import mail
from components.accounts.factories import EditorFactory
pytestmark = pytest.mark.django_db
def test_authentication_backend(client):
editor = EditorFactory(base_id=1, is_active=True, username='bryan')
assert client.login(username=editor.username)
request = HttpRequest()
request.session = client.session
user = get_user(request)
assert user is not None
assert not user.is_anonymous()
|
Create a test for our authentication backend.
|
Create a test for our authentication backend.
|
Python
|
apache-2.0
|
hello-base/web,hello-base/web,hello-base/web,hello-base/web
|
Create a test for our authentication backend.
|
import pytest
from django.contrib.auth import get_user
from django.http import HttpRequest
from django.core import mail
from components.accounts.factories import EditorFactory
pytestmark = pytest.mark.django_db
def test_authentication_backend(client):
editor = EditorFactory(base_id=1, is_active=True, username='bryan')
assert client.login(username=editor.username)
request = HttpRequest()
request.session = client.session
user = get_user(request)
assert user is not None
assert not user.is_anonymous()
|
<commit_before><commit_msg>Create a test for our authentication backend.<commit_after>
|
import pytest
from django.contrib.auth import get_user
from django.http import HttpRequest
from django.core import mail
from components.accounts.factories import EditorFactory
pytestmark = pytest.mark.django_db
def test_authentication_backend(client):
editor = EditorFactory(base_id=1, is_active=True, username='bryan')
assert client.login(username=editor.username)
request = HttpRequest()
request.session = client.session
user = get_user(request)
assert user is not None
assert not user.is_anonymous()
|
Create a test for our authentication backend.import pytest
from django.contrib.auth import get_user
from django.http import HttpRequest
from django.core import mail
from components.accounts.factories import EditorFactory
pytestmark = pytest.mark.django_db
def test_authentication_backend(client):
editor = EditorFactory(base_id=1, is_active=True, username='bryan')
assert client.login(username=editor.username)
request = HttpRequest()
request.session = client.session
user = get_user(request)
assert user is not None
assert not user.is_anonymous()
|
<commit_before><commit_msg>Create a test for our authentication backend.<commit_after>import pytest
from django.contrib.auth import get_user
from django.http import HttpRequest
from django.core import mail
from components.accounts.factories import EditorFactory
pytestmark = pytest.mark.django_db
def test_authentication_backend(client):
editor = EditorFactory(base_id=1, is_active=True, username='bryan')
assert client.login(username=editor.username)
request = HttpRequest()
request.session = client.session
user = get_user(request)
assert user is not None
assert not user.is_anonymous()
|
|
e6d28d55309cdf7c25062d469646e0671e877607
|
nose2/tests/functional/support/scenario/tests_in_package/pkg1/test/test_things.py
|
nose2/tests/functional/support/scenario/tests_in_package/pkg1/test/test_things.py
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_gen_method(self):
def check(x):
assert x == 1
yield check, 1
yield check, 2
def test_params_method(self, a):
self.assertEqual(a, 1)
test_params_method.paramList = (1, 2)
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
def test_params_func(a):
assert a == 1
test_params_func.paramList = (1, 2)
def test_params_func_multi_arg(a, b):
assert a == b
test_params_func_multi_arg.paramList = ((1, 1), (1, 2), (2, 2))
|
Add param test cases to func test target project
|
Add param test cases to func test target project
|
Python
|
bsd-2-clause
|
ojengwa/nose2,ezigman/nose2,ezigman/nose2,leth/nose2,leth/nose2,little-dude/nose2,ptthiem/nose2,ptthiem/nose2,little-dude/nose2,ojengwa/nose2
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
Add param test cases to func test target project
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_gen_method(self):
def check(x):
assert x == 1
yield check, 1
yield check, 2
def test_params_method(self, a):
self.assertEqual(a, 1)
test_params_method.paramList = (1, 2)
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
def test_params_func(a):
assert a == 1
test_params_func.paramList = (1, 2)
def test_params_func_multi_arg(a, b):
assert a == b
test_params_func_multi_arg.paramList = ((1, 1), (1, 2), (2, 2))
|
<commit_before>import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
<commit_msg>Add param test cases to func test target project<commit_after>
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_gen_method(self):
def check(x):
assert x == 1
yield check, 1
yield check, 2
def test_params_method(self, a):
self.assertEqual(a, 1)
test_params_method.paramList = (1, 2)
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
def test_params_func(a):
assert a == 1
test_params_func.paramList = (1, 2)
def test_params_func_multi_arg(a, b):
assert a == b
test_params_func_multi_arg.paramList = ((1, 1), (1, 2), (2, 2))
|
import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
Add param test cases to func test target projectimport unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_gen_method(self):
def check(x):
assert x == 1
yield check, 1
yield check, 2
def test_params_method(self, a):
self.assertEqual(a, 1)
test_params_method.paramList = (1, 2)
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
def test_params_func(a):
assert a == 1
test_params_func.paramList = (1, 2)
def test_params_func_multi_arg(a, b):
assert a == b
test_params_func_multi_arg.paramList = ((1, 1), (1, 2), (2, 2))
|
<commit_before>import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
<commit_msg>Add param test cases to func test target project<commit_after>import unittest
class SomeTests(unittest.TestCase):
def test_ok(self):
pass
def test_typeerr(self):
raise TypeError("oops")
def test_failed(self):
print("Hello stdout")
assert False, "I failed"
def test_skippy(self):
raise unittest.SkipTest("I wanted to skip")
def test_gen_method(self):
def check(x):
assert x == 1
yield check, 1
yield check, 2
def test_params_method(self, a):
self.assertEqual(a, 1)
test_params_method.paramList = (1, 2)
def test_func():
assert 1 == 1
def test_gen():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, (i, i,)
test_gen.testGenerator = True
def test_gen_nose_style():
def check(a, b):
assert a == b
for i in range(0, 5):
yield check, i, i
did_setup = False
def setup():
global did_setup
did_setup = True
def test_fixt():
assert did_setup
test_fixt.setup = setup
def test_params_func(a):
assert a == 1
test_params_func.paramList = (1, 2)
def test_params_func_multi_arg(a, b):
assert a == b
test_params_func_multi_arg.paramList = ((1, 1), (1, 2), (2, 2))
|
a9d1c124813b3248188bdcd92799555ee6704c16
|
tests/unit/utils/which_test.py
|
tests/unit/utils/which_test.py
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
Add unit test for salt.utils.which
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.<commit_after>
|
# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.# Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
<commit_before><commit_msg>Add unit test for salt.utils.which
This is a Linux-only test, it will need some mock love to make it
suitable to test for windows paths.<commit_after># Import python libs
import os
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
class TestWhich(TestCase):
def test_salt_utils_which(self):
'''
Tests salt.utils.which function to ensure that it returns True as
expected.
'''
self.assertTrue(salt.utils.which('sh'))
if __name__ == '__main__':
from integration import run_tests
run_tests(TestWhich, needs_daemon=False)
|
|
58fe858cc61f15dda2f9a1ca0b3937e5968fafa1
|
every_election/apps/elections/migrations/0058_set-gla-a-to-ballot.py
|
every_election/apps/elections/migrations/0058_set-gla-a-to-ballot.py
|
# Generated by Django 2.2.10 on 2020-02-18 08:36
from django.db import migrations
def remove_gla_a_subtype(apps, schema_editor):
Election = apps.get_model("elections", "Election")
qs = Election.private_objects.filter(election_id__startswith="gla.a.")
qs.update(group_type=None)
class Migration(migrations.Migration):
dependencies = [("elections", "0057_cleanup_mayor_subtype_titles")]
operations = [migrations.RunPython(remove_gla_a_subtype, migrations.RunPython.noop)]
|
Change past gla.a elections to remove subtype group
|
Change past gla.a elections to remove subtype group
|
Python
|
bsd-3-clause
|
DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection
|
Change past gla.a elections to remove subtype group
|
# Generated by Django 2.2.10 on 2020-02-18 08:36
from django.db import migrations
def remove_gla_a_subtype(apps, schema_editor):
Election = apps.get_model("elections", "Election")
qs = Election.private_objects.filter(election_id__startswith="gla.a.")
qs.update(group_type=None)
class Migration(migrations.Migration):
dependencies = [("elections", "0057_cleanup_mayor_subtype_titles")]
operations = [migrations.RunPython(remove_gla_a_subtype, migrations.RunPython.noop)]
|
<commit_before><commit_msg>Change past gla.a elections to remove subtype group<commit_after>
|
# Generated by Django 2.2.10 on 2020-02-18 08:36
from django.db import migrations
def remove_gla_a_subtype(apps, schema_editor):
Election = apps.get_model("elections", "Election")
qs = Election.private_objects.filter(election_id__startswith="gla.a.")
qs.update(group_type=None)
class Migration(migrations.Migration):
dependencies = [("elections", "0057_cleanup_mayor_subtype_titles")]
operations = [migrations.RunPython(remove_gla_a_subtype, migrations.RunPython.noop)]
|
Change past gla.a elections to remove subtype group# Generated by Django 2.2.10 on 2020-02-18 08:36
from django.db import migrations
def remove_gla_a_subtype(apps, schema_editor):
Election = apps.get_model("elections", "Election")
qs = Election.private_objects.filter(election_id__startswith="gla.a.")
qs.update(group_type=None)
class Migration(migrations.Migration):
dependencies = [("elections", "0057_cleanup_mayor_subtype_titles")]
operations = [migrations.RunPython(remove_gla_a_subtype, migrations.RunPython.noop)]
|
<commit_before><commit_msg>Change past gla.a elections to remove subtype group<commit_after># Generated by Django 2.2.10 on 2020-02-18 08:36
from django.db import migrations
def remove_gla_a_subtype(apps, schema_editor):
Election = apps.get_model("elections", "Election")
qs = Election.private_objects.filter(election_id__startswith="gla.a.")
qs.update(group_type=None)
class Migration(migrations.Migration):
dependencies = [("elections", "0057_cleanup_mayor_subtype_titles")]
operations = [migrations.RunPython(remove_gla_a_subtype, migrations.RunPython.noop)]
|
|
b5187863bb951f53ef3c61ce5097550abfd2d6e9
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Room.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Add_Room.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Room(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
#> name string name of the room
#> x1 int position of the room
#> x2 int position of the room
#> x3 int position of the room
#> x4 int position of the room
#> y1 int position of the room
#> y2 int position of the room
#> y3 int position of the room
#> y4 int position of the room
#> id int ID on the BDD of the room
<= done data sent correctly
<= error error while data is reading
<= already_registered
'''
def __init__(self):
super(Wonderland_Add_Room, self).__init__(outcomes=['done', 'already_registered', 'error'],
output_keys=['id'],
input_keys=['name', 'x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
dataPost = {'name': userdata.name, 'x1': userdata.x1, 'x2': userdata.x2, 'x3': userdata.x3,'x4': userdata.x4,
'y1': userdata.y1, 'y2': userdata.y2, 'y3': userdata.y3, 'y4': userdata.y4}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/rooms/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response:
return 'error'
# have an a name ID to read
if 'id' not in data_response and 'name' not in data_response:
# continue to Error
return 'error'
# have an ID to read
elif 'id' not in data_response:
return 'already_registered'
# return the ID
userdata.id = data_response['id']
return 'done'
|
Add a state for add rooms
|
Add a state for add rooms
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a state for add rooms
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Room(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
#> name string name of the room
#> x1 int position of the room
#> x2 int position of the room
#> x3 int position of the room
#> x4 int position of the room
#> y1 int position of the room
#> y2 int position of the room
#> y3 int position of the room
#> y4 int position of the room
#> id int ID on the BDD of the room
<= done data sent correctly
<= error error while data is reading
<= already_registered
'''
def __init__(self):
super(Wonderland_Add_Room, self).__init__(outcomes=['done', 'already_registered', 'error'],
output_keys=['id'],
input_keys=['name', 'x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
dataPost = {'name': userdata.name, 'x1': userdata.x1, 'x2': userdata.x2, 'x3': userdata.x3,'x4': userdata.x4,
'y1': userdata.y1, 'y2': userdata.y2, 'y3': userdata.y3, 'y4': userdata.y4}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/rooms/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response:
return 'error'
# have an a name ID to read
if 'id' not in data_response and 'name' not in data_response:
# continue to Error
return 'error'
# have an ID to read
elif 'id' not in data_response:
return 'already_registered'
# return the ID
userdata.id = data_response['id']
return 'done'
|
<commit_before><commit_msg>Add a state for add rooms<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Room(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
#> name string name of the room
#> x1 int position of the room
#> x2 int position of the room
#> x3 int position of the room
#> x4 int position of the room
#> y1 int position of the room
#> y2 int position of the room
#> y3 int position of the room
#> y4 int position of the room
#> id int ID on the BDD of the room
<= done data sent correctly
<= error error while data is reading
<= already_registered
'''
def __init__(self):
super(Wonderland_Add_Room, self).__init__(outcomes=['done', 'already_registered', 'error'],
output_keys=['id'],
input_keys=['name', 'x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
dataPost = {'name': userdata.name, 'x1': userdata.x1, 'x2': userdata.x2, 'x3': userdata.x3,'x4': userdata.x4,
'y1': userdata.y1, 'y2': userdata.y2, 'y3': userdata.y3, 'y4': userdata.y4}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/rooms/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response:
return 'error'
# have an a name ID to read
if 'id' not in data_response and 'name' not in data_response:
# continue to Error
return 'error'
# have an ID to read
elif 'id' not in data_response:
return 'already_registered'
# return the ID
userdata.id = data_response['id']
return 'done'
|
Add a state for add rooms#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Room(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
#> name string name of the room
#> x1 int position of the room
#> x2 int position of the room
#> x3 int position of the room
#> x4 int position of the room
#> y1 int position of the room
#> y2 int position of the room
#> y3 int position of the room
#> y4 int position of the room
#> id int ID on the BDD of the room
<= done data sent correctly
<= error error while data is reading
<= already_registered
'''
def __init__(self):
super(Wonderland_Add_Room, self).__init__(outcomes=['done', 'already_registered', 'error'],
output_keys=['id'],
input_keys=['name', 'x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
dataPost = {'name': userdata.name, 'x1': userdata.x1, 'x2': userdata.x2, 'x3': userdata.x3,'x4': userdata.x4,
'y1': userdata.y1, 'y2': userdata.y2, 'y3': userdata.y3, 'y4': userdata.y4}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/rooms/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response:
return 'error'
# have an a name ID to read
if 'id' not in data_response and 'name' not in data_response:
# continue to Error
return 'error'
# have an ID to read
elif 'id' not in data_response:
return 'already_registered'
# return the ID
userdata.id = data_response['id']
return 'done'
|
<commit_before><commit_msg>Add a state for add rooms<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
class Wonderland_Add_Room(EventState):
'''
Add an object to Wonderland.
For the room, enter only ID or Name, not both.
Return the ID of the added human.
#> name string name of the room
#> x1 int position of the room
#> x2 int position of the room
#> x3 int position of the room
#> x4 int position of the room
#> y1 int position of the room
#> y2 int position of the room
#> y3 int position of the room
#> y4 int position of the room
#> id int ID on the BDD of the room
<= done data sent correctly
<= error error while data is reading
<= already_registered
'''
def __init__(self):
super(Wonderland_Add_Room, self).__init__(outcomes=['done', 'already_registered', 'error'],
output_keys=['id'],
input_keys=['name', 'x1', 'x2', 'x3', 'x4', 'y1', 'y2', 'y3', 'y4'])
# generate post key for authentication
self._header = {'api-key': 'asdf'}
def execute(self, userdata):
# Generate URL to contact
dataPost = {'name': userdata.name, 'x1': userdata.x1, 'x2': userdata.x2, 'x3': userdata.x3,'x4': userdata.x4,
'y1': userdata.y1, 'y2': userdata.y2, 'y3': userdata.y3, 'y4': userdata.y4}
# try the request
try:
response = requests.post("http://192.168.0.46:8000/api/rooms/", headers=self._header, data=dataPost)
except requests.exceptions.RequestException as e:
print e
return 'error'
# read response
data_response = json.loads(response.content)
# have a response
if not data_response:
return 'error'
# have an a name ID to read
if 'id' not in data_response and 'name' not in data_response:
# continue to Error
return 'error'
# have an ID to read
elif 'id' not in data_response:
return 'already_registered'
# return the ID
userdata.id = data_response['id']
return 'done'
|
|
3ea8852e504c8503da0ca921ae26e7d333d877f8
|
src/ggrc/migrations/versions/20170224125102_4c5be77c5da3_add_slug_to_revisions.py
|
src/ggrc/migrations/versions/20170224125102_4c5be77c5da3_add_slug_to_revisions.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add slug to revisions
Create Date: 2017-02-24 12:51:02.131671
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4c5be77c5da3"
down_revision = "341f8a645b2f"
def upgrade():
"""Add resource_slug to revisions table."""
op.add_column(
"revisions",
sa.Column("resource_slug", sa.String(length=250), nullable=True)
)
op.create_index(
"ix_revisions_resource_slug",
"revisions",
["resource_slug"],
unique=False,
)
def downgrade():
"""Remove resource_slug from revisions table."""
op.drop_column("revisions", "resource_slug")
|
Add resource_slug to revisions table
|
Add resource_slug to revisions table
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core
|
Add resource_slug to revisions table
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add slug to revisions
Create Date: 2017-02-24 12:51:02.131671
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4c5be77c5da3"
down_revision = "341f8a645b2f"
def upgrade():
"""Add resource_slug to revisions table."""
op.add_column(
"revisions",
sa.Column("resource_slug", sa.String(length=250), nullable=True)
)
op.create_index(
"ix_revisions_resource_slug",
"revisions",
["resource_slug"],
unique=False,
)
def downgrade():
"""Remove resource_slug from revisions table."""
op.drop_column("revisions", "resource_slug")
|
<commit_before><commit_msg>Add resource_slug to revisions table<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add slug to revisions
Create Date: 2017-02-24 12:51:02.131671
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4c5be77c5da3"
down_revision = "341f8a645b2f"
def upgrade():
"""Add resource_slug to revisions table."""
op.add_column(
"revisions",
sa.Column("resource_slug", sa.String(length=250), nullable=True)
)
op.create_index(
"ix_revisions_resource_slug",
"revisions",
["resource_slug"],
unique=False,
)
def downgrade():
"""Remove resource_slug from revisions table."""
op.drop_column("revisions", "resource_slug")
|
Add resource_slug to revisions table# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add slug to revisions
Create Date: 2017-02-24 12:51:02.131671
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4c5be77c5da3"
down_revision = "341f8a645b2f"
def upgrade():
"""Add resource_slug to revisions table."""
op.add_column(
"revisions",
sa.Column("resource_slug", sa.String(length=250), nullable=True)
)
op.create_index(
"ix_revisions_resource_slug",
"revisions",
["resource_slug"],
unique=False,
)
def downgrade():
"""Remove resource_slug from revisions table."""
op.drop_column("revisions", "resource_slug")
|
<commit_before><commit_msg>Add resource_slug to revisions table<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add slug to revisions
Create Date: 2017-02-24 12:51:02.131671
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "4c5be77c5da3"
down_revision = "341f8a645b2f"
def upgrade():
"""Add resource_slug to revisions table."""
op.add_column(
"revisions",
sa.Column("resource_slug", sa.String(length=250), nullable=True)
)
op.create_index(
"ix_revisions_resource_slug",
"revisions",
["resource_slug"],
unique=False,
)
def downgrade():
"""Remove resource_slug from revisions table."""
op.drop_column("revisions", "resource_slug")
|
|
51f044f217da30d62320444c16259f51b66da4d1
|
exp/alto/tools/format_tree.py
|
exp/alto/tools/format_tree.py
|
#!/usr/bin/env python3
import sys
import re
def format_tree(): #converts from Penn Treebank to Stanford output
regex = re.compile(r"\(([A-Za-z_$]+)")
with open(sys.argv[1]) as np_lines:
for line in np_lines:
print(regex.sub(r"\1(", line), end="")
format_tree()
|
Add script for converting Penn Treebank to Stanford format
|
Add script for converting Penn Treebank to Stanford format
|
Python
|
mit
|
kornai/4lang,kornai/4lang,kornai/4lang,kornai/4lang
|
Add script for converting Penn Treebank to Stanford format
|
#!/usr/bin/env python3
import sys
import re
def format_tree(): #converts from Penn Treebank to Stanford output
regex = re.compile(r"\(([A-Za-z_$]+)")
with open(sys.argv[1]) as np_lines:
for line in np_lines:
print(regex.sub(r"\1(", line), end="")
format_tree()
|
<commit_before><commit_msg>Add script for converting Penn Treebank to Stanford format<commit_after>
|
#!/usr/bin/env python3
import sys
import re
def format_tree(): #converts from Penn Treebank to Stanford output
regex = re.compile(r"\(([A-Za-z_$]+)")
with open(sys.argv[1]) as np_lines:
for line in np_lines:
print(regex.sub(r"\1(", line), end="")
format_tree()
|
Add script for converting Penn Treebank to Stanford format#!/usr/bin/env python3
import sys
import re
def format_tree(): #converts from Penn Treebank to Stanford output
regex = re.compile(r"\(([A-Za-z_$]+)")
with open(sys.argv[1]) as np_lines:
for line in np_lines:
print(regex.sub(r"\1(", line), end="")
format_tree()
|
<commit_before><commit_msg>Add script for converting Penn Treebank to Stanford format<commit_after>#!/usr/bin/env python3
import sys
import re
def format_tree(): #converts from Penn Treebank to Stanford output
regex = re.compile(r"\(([A-Za-z_$]+)")
with open(sys.argv[1]) as np_lines:
for line in np_lines:
print(regex.sub(r"\1(", line), end="")
format_tree()
|
|
836d02874eb8c69a51bf6f40877433d106b5057f
|
opentreemap/otm1_migrator/management/commands/post_migrate_validation.py
|
opentreemap/otm1_migrator/management/commands/post_migrate_validation.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from treemap.models import Species, Instance, Tree, Plot, Audit, TreePhoto
from treemap.management.util import InstanceDataCommand
from otm1_migrator.models import OTM1ModelRelic
class Command(InstanceDataCommand):
def pseudo_assert_equals(self, item1, item2, description):
isEqual = 'SUCCESS' if item1 == item2 else 'WARNING'
self.stdout.write('testing assertion: %s ... %s - %s / %s'
% (description, isEqual, item1, item2))
def test_class(self, clz, instance_id=None):
name = clz.__name__.lower()
objects = clz.objects.all()
if instance_id:
objects = objects.filter(instance_id=instance_id)
object_count = objects.count()
object_relics = OTM1ModelRelic.objects.filter(
otm2_model_name__iexact=name)
if instance_id:
object_relics = object_relics.filter(instance_id=instance_id)
object_relic_count = object_relics.count()
self.pseudo_assert_equals(object_count, object_relic_count,
'there are an equal number of '
'%s and %s relics.'
% (name, name))
def handle(self, *args, **options):
if settings.DEBUG:
self.stdout.write('In order to run this command you must manually'
'set DEBUG=False in your settings file. '
'Unfortunately, django runs out of memory when '
'this command is run in DEBUG mode.')
return 1
try:
instance_id = options['instance']
Instance.objects.get(pk=instance_id)
except Instance.DoesNotExist:
self.stdout.write('Invalid instance provided.')
return 1
for clz in (Tree, Plot, Audit, TreePhoto, Species):
self.test_class(clz, instance_id=instance_id)
self.test_class(ContentType)
|
Add mgmt command for basic otm1 migration checks
|
Add mgmt command for basic otm1 migration checks
|
Python
|
agpl-3.0
|
recklessromeo/otm-core,RickMohr/otm-core,RickMohr/otm-core,maurizi/otm-core,RickMohr/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,maurizi/otm-core,clever-crow-consulting/otm-core,recklessromeo/otm-core,recklessromeo/otm-core,RickMohr/otm-core,maurizi/otm-core,maurizi/otm-core,clever-crow-consulting/otm-core,clever-crow-consulting/otm-core
|
Add mgmt command for basic otm1 migration checks
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from treemap.models import Species, Instance, Tree, Plot, Audit, TreePhoto
from treemap.management.util import InstanceDataCommand
from otm1_migrator.models import OTM1ModelRelic
class Command(InstanceDataCommand):
def pseudo_assert_equals(self, item1, item2, description):
isEqual = 'SUCCESS' if item1 == item2 else 'WARNING'
self.stdout.write('testing assertion: %s ... %s - %s / %s'
% (description, isEqual, item1, item2))
def test_class(self, clz, instance_id=None):
name = clz.__name__.lower()
objects = clz.objects.all()
if instance_id:
objects = objects.filter(instance_id=instance_id)
object_count = objects.count()
object_relics = OTM1ModelRelic.objects.filter(
otm2_model_name__iexact=name)
if instance_id:
object_relics = object_relics.filter(instance_id=instance_id)
object_relic_count = object_relics.count()
self.pseudo_assert_equals(object_count, object_relic_count,
'there are an equal number of '
'%s and %s relics.'
% (name, name))
def handle(self, *args, **options):
if settings.DEBUG:
self.stdout.write('In order to run this command you must manually'
'set DEBUG=False in your settings file. '
'Unfortunately, django runs out of memory when '
'this command is run in DEBUG mode.')
return 1
try:
instance_id = options['instance']
Instance.objects.get(pk=instance_id)
except Instance.DoesNotExist:
self.stdout.write('Invalid instance provided.')
return 1
for clz in (Tree, Plot, Audit, TreePhoto, Species):
self.test_class(clz, instance_id=instance_id)
self.test_class(ContentType)
|
<commit_before><commit_msg>Add mgmt command for basic otm1 migration checks<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from treemap.models import Species, Instance, Tree, Plot, Audit, TreePhoto
from treemap.management.util import InstanceDataCommand
from otm1_migrator.models import OTM1ModelRelic
class Command(InstanceDataCommand):
def pseudo_assert_equals(self, item1, item2, description):
isEqual = 'SUCCESS' if item1 == item2 else 'WARNING'
self.stdout.write('testing assertion: %s ... %s - %s / %s'
% (description, isEqual, item1, item2))
def test_class(self, clz, instance_id=None):
name = clz.__name__.lower()
objects = clz.objects.all()
if instance_id:
objects = objects.filter(instance_id=instance_id)
object_count = objects.count()
object_relics = OTM1ModelRelic.objects.filter(
otm2_model_name__iexact=name)
if instance_id:
object_relics = object_relics.filter(instance_id=instance_id)
object_relic_count = object_relics.count()
self.pseudo_assert_equals(object_count, object_relic_count,
'there are an equal number of '
'%s and %s relics.'
% (name, name))
def handle(self, *args, **options):
if settings.DEBUG:
self.stdout.write('In order to run this command you must manually'
'set DEBUG=False in your settings file. '
'Unfortunately, django runs out of memory when '
'this command is run in DEBUG mode.')
return 1
try:
instance_id = options['instance']
Instance.objects.get(pk=instance_id)
except Instance.DoesNotExist:
self.stdout.write('Invalid instance provided.')
return 1
for clz in (Tree, Plot, Audit, TreePhoto, Species):
self.test_class(clz, instance_id=instance_id)
self.test_class(ContentType)
|
Add mgmt command for basic otm1 migration checks# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from treemap.models import Species, Instance, Tree, Plot, Audit, TreePhoto
from treemap.management.util import InstanceDataCommand
from otm1_migrator.models import OTM1ModelRelic
class Command(InstanceDataCommand):
def pseudo_assert_equals(self, item1, item2, description):
isEqual = 'SUCCESS' if item1 == item2 else 'WARNING'
self.stdout.write('testing assertion: %s ... %s - %s / %s'
% (description, isEqual, item1, item2))
def test_class(self, clz, instance_id=None):
name = clz.__name__.lower()
objects = clz.objects.all()
if instance_id:
objects = objects.filter(instance_id=instance_id)
object_count = objects.count()
object_relics = OTM1ModelRelic.objects.filter(
otm2_model_name__iexact=name)
if instance_id:
object_relics = object_relics.filter(instance_id=instance_id)
object_relic_count = object_relics.count()
self.pseudo_assert_equals(object_count, object_relic_count,
'there are an equal number of '
'%s and %s relics.'
% (name, name))
def handle(self, *args, **options):
if settings.DEBUG:
self.stdout.write('In order to run this command you must manually'
'set DEBUG=False in your settings file. '
'Unfortunately, django runs out of memory when '
'this command is run in DEBUG mode.')
return 1
try:
instance_id = options['instance']
Instance.objects.get(pk=instance_id)
except Instance.DoesNotExist:
self.stdout.write('Invalid instance provided.')
return 1
for clz in (Tree, Plot, Audit, TreePhoto, Species):
self.test_class(clz, instance_id=instance_id)
self.test_class(ContentType)
|
<commit_before><commit_msg>Add mgmt command for basic otm1 migration checks<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from treemap.models import Species, Instance, Tree, Plot, Audit, TreePhoto
from treemap.management.util import InstanceDataCommand
from otm1_migrator.models import OTM1ModelRelic
class Command(InstanceDataCommand):
def pseudo_assert_equals(self, item1, item2, description):
isEqual = 'SUCCESS' if item1 == item2 else 'WARNING'
self.stdout.write('testing assertion: %s ... %s - %s / %s'
% (description, isEqual, item1, item2))
def test_class(self, clz, instance_id=None):
name = clz.__name__.lower()
objects = clz.objects.all()
if instance_id:
objects = objects.filter(instance_id=instance_id)
object_count = objects.count()
object_relics = OTM1ModelRelic.objects.filter(
otm2_model_name__iexact=name)
if instance_id:
object_relics = object_relics.filter(instance_id=instance_id)
object_relic_count = object_relics.count()
self.pseudo_assert_equals(object_count, object_relic_count,
'there are an equal number of '
'%s and %s relics.'
% (name, name))
def handle(self, *args, **options):
if settings.DEBUG:
self.stdout.write('In order to run this command you must manually'
'set DEBUG=False in your settings file. '
'Unfortunately, django runs out of memory when '
'this command is run in DEBUG mode.')
return 1
try:
instance_id = options['instance']
Instance.objects.get(pk=instance_id)
except Instance.DoesNotExist:
self.stdout.write('Invalid instance provided.')
return 1
for clz in (Tree, Plot, Audit, TreePhoto, Species):
self.test_class(clz, instance_id=instance_id)
self.test_class(ContentType)
|
|
af33848acf1a4dd62525d83ea061101f54805223
|
tests/rules_tests/CountTest.py
|
tests/rules_tests/CountTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class CountTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for rule's count method
|
Add file for rule's count method
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for rule's count method
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class CountTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for rule's count method<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class CountTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for rule's count method#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class CountTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for rule's count method<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
class CountTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
d4ed3b57689ad7e18f8e81ed58dc7748acc35591
|
wsynphot/io/cache_filters.py
|
wsynphot/io/cache_filters.py
|
import os, re
from wsynphot.io.get_filter_data import (get_filter_index,
get_transmission_data)
from wsynphot.config import get_data_dir
CACHE_DIR = os.path.join(get_data_dir(), 'cached_SVO_FPS')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def cache_as_votable(table, file_path):
"""Caches the passed table on disk as a VOTable.
Parameters
----------
table : astropy.table.Table
Table to be cached
file_path : str
Path where VOTable is to be saved
"""
if not file_path.endswith('.vot'):
file_path += '.vot'
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Write table as votable (overwrite for cases when file already exists)
table.write(file_path, format='votable', overwrite=True)
def download_filter_data(cache_dir=CACHE_DIR):
"""Downloads the entire filter data (filter index and transmission data
of each filter) locally on disk as cache.
Parameters
----------
cache_dir : str, optional
Path of the directory where downloaded data is to be cached
"""
# Get filter index and cache it
index_table = get_filter_index().to_table()
cache_as_votable(index_table,
os.path.join(cache_dir, 'index'))
# Fetch filter_ids from index & iterate
for filter_id in index_table['filterID']:
filter_id = filter_id.decode("utf-8") # convert byte string to literal
# Get transmission data for a filter_id and cache it
try:
print("caching {0} ...".format(filter_id))
facility, instrument, filter_name = re.split('/|\.', filter_id)
filter_table = get_transmission_data(filter_id).to_table()
cache_as_votable(filter_table,
os.path.join(cache_dir, facility, instrument, filter_name))
except Exception as e:
print('Data for Filter ID = {0} could not be downloaded due '
'to:\n{1}'.format(filter_id, e))
|
Add function to download filter data & cache it on disk
|
Add function to download filter data & cache it on disk
|
Python
|
bsd-3-clause
|
wkerzendorf/wsynphot
|
Add function to download filter data & cache it on disk
|
import os, re
from wsynphot.io.get_filter_data import (get_filter_index,
get_transmission_data)
from wsynphot.config import get_data_dir
CACHE_DIR = os.path.join(get_data_dir(), 'cached_SVO_FPS')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def cache_as_votable(table, file_path):
"""Caches the passed table on disk as a VOTable.
Parameters
----------
table : astropy.table.Table
Table to be cached
file_path : str
Path where VOTable is to be saved
"""
if not file_path.endswith('.vot'):
file_path += '.vot'
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Write table as votable (overwrite for cases when file already exists)
table.write(file_path, format='votable', overwrite=True)
def download_filter_data(cache_dir=CACHE_DIR):
"""Downloads the entire filter data (filter index and transmission data
of each filter) locally on disk as cache.
Parameters
----------
cache_dir : str, optional
Path of the directory where downloaded data is to be cached
"""
# Get filter index and cache it
index_table = get_filter_index().to_table()
cache_as_votable(index_table,
os.path.join(cache_dir, 'index'))
# Fetch filter_ids from index & iterate
for filter_id in index_table['filterID']:
filter_id = filter_id.decode("utf-8") # convert byte string to literal
# Get transmission data for a filter_id and cache it
try:
print("caching {0} ...".format(filter_id))
facility, instrument, filter_name = re.split('/|\.', filter_id)
filter_table = get_transmission_data(filter_id).to_table()
cache_as_votable(filter_table,
os.path.join(cache_dir, facility, instrument, filter_name))
except Exception as e:
print('Data for Filter ID = {0} could not be downloaded due '
'to:\n{1}'.format(filter_id, e))
|
<commit_before><commit_msg>Add function to download filter data & cache it on disk<commit_after>
|
import os, re
from wsynphot.io.get_filter_data import (get_filter_index,
get_transmission_data)
from wsynphot.config import get_data_dir
CACHE_DIR = os.path.join(get_data_dir(), 'cached_SVO_FPS')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def cache_as_votable(table, file_path):
"""Caches the passed table on disk as a VOTable.
Parameters
----------
table : astropy.table.Table
Table to be cached
file_path : str
Path where VOTable is to be saved
"""
if not file_path.endswith('.vot'):
file_path += '.vot'
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Write table as votable (overwrite for cases when file already exists)
table.write(file_path, format='votable', overwrite=True)
def download_filter_data(cache_dir=CACHE_DIR):
"""Downloads the entire filter data (filter index and transmission data
of each filter) locally on disk as cache.
Parameters
----------
cache_dir : str, optional
Path of the directory where downloaded data is to be cached
"""
# Get filter index and cache it
index_table = get_filter_index().to_table()
cache_as_votable(index_table,
os.path.join(cache_dir, 'index'))
# Fetch filter_ids from index & iterate
for filter_id in index_table['filterID']:
filter_id = filter_id.decode("utf-8") # convert byte string to literal
# Get transmission data for a filter_id and cache it
try:
print("caching {0} ...".format(filter_id))
facility, instrument, filter_name = re.split('/|\.', filter_id)
filter_table = get_transmission_data(filter_id).to_table()
cache_as_votable(filter_table,
os.path.join(cache_dir, facility, instrument, filter_name))
except Exception as e:
print('Data for Filter ID = {0} could not be downloaded due '
'to:\n{1}'.format(filter_id, e))
|
Add function to download filter data & cache it on diskimport os, re
from wsynphot.io.get_filter_data import (get_filter_index,
get_transmission_data)
from wsynphot.config import get_data_dir
CACHE_DIR = os.path.join(get_data_dir(), 'cached_SVO_FPS')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def cache_as_votable(table, file_path):
"""Caches the passed table on disk as a VOTable.
Parameters
----------
table : astropy.table.Table
Table to be cached
file_path : str
Path where VOTable is to be saved
"""
if not file_path.endswith('.vot'):
file_path += '.vot'
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Write table as votable (overwrite for cases when file already exists)
table.write(file_path, format='votable', overwrite=True)
def download_filter_data(cache_dir=CACHE_DIR):
"""Downloads the entire filter data (filter index and transmission data
of each filter) locally on disk as cache.
Parameters
----------
cache_dir : str, optional
Path of the directory where downloaded data is to be cached
"""
# Get filter index and cache it
index_table = get_filter_index().to_table()
cache_as_votable(index_table,
os.path.join(cache_dir, 'index'))
# Fetch filter_ids from index & iterate
for filter_id in index_table['filterID']:
filter_id = filter_id.decode("utf-8") # convert byte string to literal
# Get transmission data for a filter_id and cache it
try:
print("caching {0} ...".format(filter_id))
facility, instrument, filter_name = re.split('/|\.', filter_id)
filter_table = get_transmission_data(filter_id).to_table()
cache_as_votable(filter_table,
os.path.join(cache_dir, facility, instrument, filter_name))
except Exception as e:
print('Data for Filter ID = {0} could not be downloaded due '
'to:\n{1}'.format(filter_id, e))
|
<commit_before><commit_msg>Add function to download filter data & cache it on disk<commit_after>import os, re
from wsynphot.io.get_filter_data import (get_filter_index,
get_transmission_data)
from wsynphot.config import get_data_dir
CACHE_DIR = os.path.join(get_data_dir(), 'cached_SVO_FPS')
if not os.path.exists(CACHE_DIR):
os.mkdir(CACHE_DIR)
def cache_as_votable(table, file_path):
"""Caches the passed table on disk as a VOTable.
Parameters
----------
table : astropy.table.Table
Table to be cached
file_path : str
Path where VOTable is to be saved
"""
if not file_path.endswith('.vot'):
file_path += '.vot'
dir_path = os.path.dirname(file_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# Write table as votable (overwrite for cases when file already exists)
table.write(file_path, format='votable', overwrite=True)
def download_filter_data(cache_dir=CACHE_DIR):
"""Downloads the entire filter data (filter index and transmission data
of each filter) locally on disk as cache.
Parameters
----------
cache_dir : str, optional
Path of the directory where downloaded data is to be cached
"""
# Get filter index and cache it
index_table = get_filter_index().to_table()
cache_as_votable(index_table,
os.path.join(cache_dir, 'index'))
# Fetch filter_ids from index & iterate
for filter_id in index_table['filterID']:
filter_id = filter_id.decode("utf-8") # convert byte string to literal
# Get transmission data for a filter_id and cache it
try:
print("caching {0} ...".format(filter_id))
facility, instrument, filter_name = re.split('/|\.', filter_id)
filter_table = get_transmission_data(filter_id).to_table()
cache_as_votable(filter_table,
os.path.join(cache_dir, facility, instrument, filter_name))
except Exception as e:
print('Data for Filter ID = {0} could not be downloaded due '
'to:\n{1}'.format(filter_id, e))
|
|
6f79191e7aebabf55ca0a37578dcc8b43ee018b6
|
real_estate_agency/new_buildings/migrations/0002_builder_and_rcfeature_filepath.py
|
real_estate_agency/new_buildings/migrations/0002_builder_and_rcfeature_filepath.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-31 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import real_estate.models.helper
class Migration(migrations.Migration):
dependencies = [
('new_buildings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='builder',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=real_estate.models.helper.get_file_path, verbose_name='логотип компании'),
),
migrations.AlterField(
model_name='residentalcomplexfeature',
name='image',
field=models.ImageField(upload_to=real_estate.models.helper.get_file_path, verbose_name='изображение'),
),
]
|
Add missing migrations for upload_to for new_building app.
|
Add missing migrations for upload_to for new_building app.
Change upload_to for builder and residentalcomplexfeature.
|
Python
|
mit
|
Dybov/real_estate_agency,Dybov/real_estate_agency,Dybov/real_estate_agency
|
Add missing migrations for upload_to for new_building app.
Change upload_to for builder and residentalcomplexfeature.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-31 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import real_estate.models.helper
class Migration(migrations.Migration):
dependencies = [
('new_buildings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='builder',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=real_estate.models.helper.get_file_path, verbose_name='логотип компании'),
),
migrations.AlterField(
model_name='residentalcomplexfeature',
name='image',
field=models.ImageField(upload_to=real_estate.models.helper.get_file_path, verbose_name='изображение'),
),
]
|
<commit_before><commit_msg>Add missing migrations for upload_to for new_building app.
Change upload_to for builder and residentalcomplexfeature.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-31 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import real_estate.models.helper
class Migration(migrations.Migration):
dependencies = [
('new_buildings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='builder',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=real_estate.models.helper.get_file_path, verbose_name='логотип компании'),
),
migrations.AlterField(
model_name='residentalcomplexfeature',
name='image',
field=models.ImageField(upload_to=real_estate.models.helper.get_file_path, verbose_name='изображение'),
),
]
|
Add missing migrations for upload_to for new_building app.
Change upload_to for builder and residentalcomplexfeature.# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-31 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import real_estate.models.helper
class Migration(migrations.Migration):
dependencies = [
('new_buildings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='builder',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=real_estate.models.helper.get_file_path, verbose_name='логотип компании'),
),
migrations.AlterField(
model_name='residentalcomplexfeature',
name='image',
field=models.ImageField(upload_to=real_estate.models.helper.get_file_path, verbose_name='изображение'),
),
]
|
<commit_before><commit_msg>Add missing migrations for upload_to for new_building app.
Change upload_to for builder and residentalcomplexfeature.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-31 20:51
from __future__ import unicode_literals
from django.db import migrations, models
import real_estate.models.helper
class Migration(migrations.Migration):
dependencies = [
('new_buildings', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='builder',
name='logo',
field=models.ImageField(blank=True, null=True, upload_to=real_estate.models.helper.get_file_path, verbose_name='логотип компании'),
),
migrations.AlterField(
model_name='residentalcomplexfeature',
name='image',
field=models.ImageField(upload_to=real_estate.models.helper.get_file_path, verbose_name='изображение'),
),
]
|
|
1a433d9ef7a6f89f3c11c9eb5429f5ad46af6d8a
|
Tools/scripts/reindent-rst.py
|
Tools/scripts/reindent-rst.py
|
#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
Add a script to fixup rst files if the pre-commit hook rejects them.
|
Add a script to fixup rst files if the pre-commit hook rejects them.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add a script to fixup rst files if the pre-commit hook rejects them.
|
#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to fixup rst files if the pre-commit hook rejects them.<commit_after>
|
#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
Add a script to fixup rst files if the pre-commit hook rejects them.#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script to fixup rst files if the pre-commit hook rejects them.<commit_after>#!/usr/bin/env python
# Make a reST file compliant to our pre-commit hook.
# Currently just remove trailing whitespace.
from __future__ import with_statement
import sys, re, shutil
ws_re = re.compile(r'\s+(\r?\n)$')
def main(argv=sys.argv):
rv = 0
for filename in argv[1:]:
try:
with open(filename, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(r'\1', line) for line in lines]
if new_lines != lines:
print 'Fixing %s...' % filename
shutil.copyfile(filename, filename + '.bak')
with open(filename, 'wb') as f:
f.writelines(new_lines)
except Exception, err:
print 'Cannot fix %s: %s' % (filename, err)
rv = 1
return rv
if __name__ == '__main__':
sys.exit(main())
|
|
8273e6561f8ba9a6fda1bbddc4e15fdcdc15096f
|
file_templates.py
|
file_templates.py
|
#!/usr/bin/env python
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-tzvpp def2-tzvpp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {0} {1} {2}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge, multiplicity, xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {0}
#PBS -q shared
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load intel/2013.0
module load openmpi/1.6.5-intel12
module load orca/3.0.1
cp $PBS_O_WORKDIR/{0}.inp $LOCAL
cp $PBS_O_WORKDIR/{0}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {0}.inp >& $PBS_O_WORKDIR/{0}.out
""".format(xyzfile)
|
Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.
|
Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.
|
Python
|
mpl-2.0
|
berquist/mbe
|
Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.
|
#!/usr/bin/env python
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-tzvpp def2-tzvpp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {0} {1} {2}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge, multiplicity, xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {0}
#PBS -q shared
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load intel/2013.0
module load openmpi/1.6.5-intel12
module load orca/3.0.1
cp $PBS_O_WORKDIR/{0}.inp $LOCAL
cp $PBS_O_WORKDIR/{0}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {0}.inp >& $PBS_O_WORKDIR/{0}.out
""".format(xyzfile)
|
<commit_before><commit_msg>Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.<commit_after>
|
#!/usr/bin/env python
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-tzvpp def2-tzvpp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {0} {1} {2}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge, multiplicity, xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {0}
#PBS -q shared
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load intel/2013.0
module load openmpi/1.6.5-intel12
module load orca/3.0.1
cp $PBS_O_WORKDIR/{0}.inp $LOCAL
cp $PBS_O_WORKDIR/{0}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {0}.inp >& $PBS_O_WORKDIR/{0}.out
""".format(xyzfile)
|
Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.#!/usr/bin/env python
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-tzvpp def2-tzvpp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {0} {1} {2}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge, multiplicity, xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {0}
#PBS -q shared
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load intel/2013.0
module load openmpi/1.6.5-intel12
module load orca/3.0.1
cp $PBS_O_WORKDIR/{0}.inp $LOCAL
cp $PBS_O_WORKDIR/{0}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {0}.inp >& $PBS_O_WORKDIR/{0}.out
""".format(xyzfile)
|
<commit_before><commit_msg>Add starting file templates for PBS/Torque job submission, ORCA EPR calculation.<commit_after>#!/usr/bin/env python
def eprfile(charge, multiplicity, xyzfile):
"""
A default template for an EPR input file.
"""
return """! uks pbe0 def2-tzvpp def2-tzvpp/jk ri rijk pmodel somf(1x) noautostart tightscf grid5
%pal
nprocs 1
end
* xyzfile {0} {1} {2}.xyz *
%eprnmr
tol 1e-10
gtensor 1
ori centerofelcharge
printlevel 5
end
""".format(charge, multiplicity, xyzfile)
def pbsfile(xyzfile):
"""
A default template for a PBS job file.
"""
return """#!/bin/bash
#PBS -N {0}
#PBS -q shared
#PBS -l nodes=1:ppn=1
#PBS -l walltime=144:00:00
#PBS -j oe
#PBS -l qos=low
#PBS -m abe
#PBS -M erb74@pitt.edu
module purge
module load intel/2013.0
module load openmpi/1.6.5-intel12
module load orca/3.0.1
cp $PBS_O_WORKDIR/{0}.inp $LOCAL
cp $PBS_O_WORKDIR/{0}.xyz $LOCAL
cd $LOCAL
run_on_exit() {{
set -v
cp $LOCAL/* $PBS_O_WORKDIR
}}
trap run_on_exit EXIT
$(which orca) {0}.inp >& $PBS_O_WORKDIR/{0}.out
""".format(xyzfile)
|
|
dfdce7895396ea0b80068543ed115094d677a6e2
|
tests/test_simulation_plot.py
|
tests/test_simulation_plot.py
|
"""
Tests Simulation class read method
"""
import os
import yaml
import numpy as np
from teemof import Simulation
from teemof.parameters import k_parameters
k_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'thermal-conductivity.yaml')
time_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'time.yaml')
trial_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial')
trial_set_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial-set')
run_info_ref_file = os.path.join(trial_dir, 'Run1', 'run_info.yaml')
thermo_ref_file = os.path.join(trial_dir, 'Run1', 'thermo.yaml')
k_est_iso_ref = [0.8624217134742657, 0.6839092609282974, 0.9263423943319228, 0.8656413422445915, 0.8983945996223535,
0.8802163796582159, 0.6416173216846418, 0.8356379755434158, 0.7098404203275488, 0.8686063495516347]
def test_simulation_get_plot_data_for_run():
"""Test Simulation class get_plot_data method for pulling correct data for different plots of a run"""
k_par = k_parameters.copy()
k_par['read_thermo'] = True
run_dir = os.path.join(trial_dir, 'Run1')
sim = Simulation(read=run_dir, parameters=k_par, setup='run')
with open(k_ref_file, 'r') as kref:
k_ref = yaml.load(kref)
with open(time_ref_file, 'r') as tiref:
time_ref = yaml.load(tiref)
with open(thermo_ref_file, 'r') as tref:
thermo_ref = yaml.load(tref)
assert sim.get_plot_data('thermo') == thermo_ref
k_plot_data = sim.get_plot_data('k')
assert k_plot_data['x'] == time_ref
assert k_plot_data['y'][sim.run['directions'].index('x')] == k_ref
assert k_plot_data['legend'] == sim.run['directions']
|
Add simulation get_plot_data test for a run
|
Add simulation get_plot_data test for a run
|
Python
|
mit
|
kbsezginel/tee_mof,kbsezginel/tee_mof
|
Add simulation get_plot_data test for a run
|
"""
Tests Simulation class read method
"""
import os
import yaml
import numpy as np
from teemof import Simulation
from teemof.parameters import k_parameters
k_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'thermal-conductivity.yaml')
time_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'time.yaml')
trial_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial')
trial_set_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial-set')
run_info_ref_file = os.path.join(trial_dir, 'Run1', 'run_info.yaml')
thermo_ref_file = os.path.join(trial_dir, 'Run1', 'thermo.yaml')
k_est_iso_ref = [0.8624217134742657, 0.6839092609282974, 0.9263423943319228, 0.8656413422445915, 0.8983945996223535,
0.8802163796582159, 0.6416173216846418, 0.8356379755434158, 0.7098404203275488, 0.8686063495516347]
def test_simulation_get_plot_data_for_run():
"""Test Simulation class get_plot_data method for pulling correct data for different plots of a run"""
k_par = k_parameters.copy()
k_par['read_thermo'] = True
run_dir = os.path.join(trial_dir, 'Run1')
sim = Simulation(read=run_dir, parameters=k_par, setup='run')
with open(k_ref_file, 'r') as kref:
k_ref = yaml.load(kref)
with open(time_ref_file, 'r') as tiref:
time_ref = yaml.load(tiref)
with open(thermo_ref_file, 'r') as tref:
thermo_ref = yaml.load(tref)
assert sim.get_plot_data('thermo') == thermo_ref
k_plot_data = sim.get_plot_data('k')
assert k_plot_data['x'] == time_ref
assert k_plot_data['y'][sim.run['directions'].index('x')] == k_ref
assert k_plot_data['legend'] == sim.run['directions']
|
<commit_before><commit_msg>Add simulation get_plot_data test for a run<commit_after>
|
"""
Tests Simulation class read method
"""
import os
import yaml
import numpy as np
from teemof import Simulation
from teemof.parameters import k_parameters
k_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'thermal-conductivity.yaml')
time_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'time.yaml')
trial_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial')
trial_set_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial-set')
run_info_ref_file = os.path.join(trial_dir, 'Run1', 'run_info.yaml')
thermo_ref_file = os.path.join(trial_dir, 'Run1', 'thermo.yaml')
k_est_iso_ref = [0.8624217134742657, 0.6839092609282974, 0.9263423943319228, 0.8656413422445915, 0.8983945996223535,
0.8802163796582159, 0.6416173216846418, 0.8356379755434158, 0.7098404203275488, 0.8686063495516347]
def test_simulation_get_plot_data_for_run():
"""Test Simulation class get_plot_data method for pulling correct data for different plots of a run"""
k_par = k_parameters.copy()
k_par['read_thermo'] = True
run_dir = os.path.join(trial_dir, 'Run1')
sim = Simulation(read=run_dir, parameters=k_par, setup='run')
with open(k_ref_file, 'r') as kref:
k_ref = yaml.load(kref)
with open(time_ref_file, 'r') as tiref:
time_ref = yaml.load(tiref)
with open(thermo_ref_file, 'r') as tref:
thermo_ref = yaml.load(tref)
assert sim.get_plot_data('thermo') == thermo_ref
k_plot_data = sim.get_plot_data('k')
assert k_plot_data['x'] == time_ref
assert k_plot_data['y'][sim.run['directions'].index('x')] == k_ref
assert k_plot_data['legend'] == sim.run['directions']
|
Add simulation get_plot_data test for a run"""
Tests Simulation class read method
"""
import os
import yaml
import numpy as np
from teemof import Simulation
from teemof.parameters import k_parameters
k_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'thermal-conductivity.yaml')
time_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'time.yaml')
trial_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial')
trial_set_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial-set')
run_info_ref_file = os.path.join(trial_dir, 'Run1', 'run_info.yaml')
thermo_ref_file = os.path.join(trial_dir, 'Run1', 'thermo.yaml')
k_est_iso_ref = [0.8624217134742657, 0.6839092609282974, 0.9263423943319228, 0.8656413422445915, 0.8983945996223535,
0.8802163796582159, 0.6416173216846418, 0.8356379755434158, 0.7098404203275488, 0.8686063495516347]
def test_simulation_get_plot_data_for_run():
"""Test Simulation class get_plot_data method for pulling correct data for different plots of a run"""
k_par = k_parameters.copy()
k_par['read_thermo'] = True
run_dir = os.path.join(trial_dir, 'Run1')
sim = Simulation(read=run_dir, parameters=k_par, setup='run')
with open(k_ref_file, 'r') as kref:
k_ref = yaml.load(kref)
with open(time_ref_file, 'r') as tiref:
time_ref = yaml.load(tiref)
with open(thermo_ref_file, 'r') as tref:
thermo_ref = yaml.load(tref)
assert sim.get_plot_data('thermo') == thermo_ref
k_plot_data = sim.get_plot_data('k')
assert k_plot_data['x'] == time_ref
assert k_plot_data['y'][sim.run['directions'].index('x')] == k_ref
assert k_plot_data['legend'] == sim.run['directions']
|
<commit_before><commit_msg>Add simulation get_plot_data test for a run<commit_after>"""
Tests Simulation class read method
"""
import os
import yaml
import numpy as np
from teemof import Simulation
from teemof.parameters import k_parameters
k_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'thermal-conductivity.yaml')
time_ref_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'time.yaml')
trial_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial')
trial_set_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ideal-mof-trial-set')
run_info_ref_file = os.path.join(trial_dir, 'Run1', 'run_info.yaml')
thermo_ref_file = os.path.join(trial_dir, 'Run1', 'thermo.yaml')
k_est_iso_ref = [0.8624217134742657, 0.6839092609282974, 0.9263423943319228, 0.8656413422445915, 0.8983945996223535,
0.8802163796582159, 0.6416173216846418, 0.8356379755434158, 0.7098404203275488, 0.8686063495516347]
def test_simulation_get_plot_data_for_run():
"""Test Simulation class get_plot_data method for pulling correct data for different plots of a run"""
k_par = k_parameters.copy()
k_par['read_thermo'] = True
run_dir = os.path.join(trial_dir, 'Run1')
sim = Simulation(read=run_dir, parameters=k_par, setup='run')
with open(k_ref_file, 'r') as kref:
k_ref = yaml.load(kref)
with open(time_ref_file, 'r') as tiref:
time_ref = yaml.load(tiref)
with open(thermo_ref_file, 'r') as tref:
thermo_ref = yaml.load(tref)
assert sim.get_plot_data('thermo') == thermo_ref
k_plot_data = sim.get_plot_data('k')
assert k_plot_data['x'] == time_ref
assert k_plot_data['y'][sim.run['directions'].index('x')] == k_ref
assert k_plot_data['legend'] == sim.run['directions']
|
|
ff11bdd4affc4feb57b75ddfff3f705c8f57d1ba
|
share.py
|
share.py
|
#!/usr/bin/python
import smtplib
from email.mime.text import MIMEText
mailto_list="123997654@qq.com"
mail_host="smtp.gmail.com"
mail_user="yhhong1943"
mail_pass="hyh159263."
mail_postfix="gmail.com"
def send_mail(to_list,sub,content):
me="hello"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='gb2312')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP(mail_host,587)
s.ehlo()
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail(mailto_list,"hello","<a href='http://blog.csdn.net/yinghuihong'>yinghuihong..</a>"):
print "success"
else:
print "fail"
|
Test push to the second repository.
|
Test push to the second repository.
|
Python
|
apache-2.0
|
yhhong/android-json-http
|
Test push to the second repository.
|
#!/usr/bin/python
import smtplib
from email.mime.text import MIMEText
mailto_list="123997654@qq.com"
mail_host="smtp.gmail.com"
mail_user="yhhong1943"
mail_pass="hyh159263."
mail_postfix="gmail.com"
def send_mail(to_list,sub,content):
me="hello"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='gb2312')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP(mail_host,587)
s.ehlo()
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail(mailto_list,"hello","<a href='http://blog.csdn.net/yinghuihong'>yinghuihong..</a>"):
print "success"
else:
print "fail"
|
<commit_before><commit_msg>Test push to the second repository.<commit_after>
|
#!/usr/bin/python
import smtplib
from email.mime.text import MIMEText
mailto_list="123997654@qq.com"
mail_host="smtp.gmail.com"
mail_user="yhhong1943"
mail_pass="hyh159263."
mail_postfix="gmail.com"
def send_mail(to_list,sub,content):
me="hello"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='gb2312')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP(mail_host,587)
s.ehlo()
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail(mailto_list,"hello","<a href='http://blog.csdn.net/yinghuihong'>yinghuihong..</a>"):
print "success"
else:
print "fail"
|
Test push to the second repository.#!/usr/bin/python
import smtplib
from email.mime.text import MIMEText
mailto_list="123997654@qq.com"
mail_host="smtp.gmail.com"
mail_user="yhhong1943"
mail_pass="hyh159263."
mail_postfix="gmail.com"
def send_mail(to_list,sub,content):
me="hello"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='gb2312')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP(mail_host,587)
s.ehlo()
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail(mailto_list,"hello","<a href='http://blog.csdn.net/yinghuihong'>yinghuihong..</a>"):
print "success"
else:
print "fail"
|
<commit_before><commit_msg>Test push to the second repository.<commit_after>#!/usr/bin/python
import smtplib
from email.mime.text import MIMEText
mailto_list="123997654@qq.com"
mail_host="smtp.gmail.com"
mail_user="yhhong1943"
mail_pass="hyh159263."
mail_postfix="gmail.com"
def send_mail(to_list,sub,content):
me="hello"+"<"+mail_user+"@"+mail_postfix+">"
msg = MIMEText(content,_subtype='html',_charset='gb2312')
msg['Subject'] = sub
msg['From'] = me
msg['To'] = to_list
try:
s = smtplib.SMTP(mail_host,587)
s.ehlo()
s.starttls()
s.login(mail_user,mail_pass)
s.sendmail(me, to_list, msg.as_string())
s.close()
return True
except Exception, e:
print str(e)
return False
if __name__ == '__main__':
if send_mail(mailto_list,"hello","<a href='http://blog.csdn.net/yinghuihong'>yinghuihong..</a>"):
print "success"
else:
print "fail"
|
|
9a2963999b2f2e974fd3a286b0e4b74b010c4ba3
|
python/nooploop/nooploop.py
|
python/nooploop/nooploop.py
|
import argparse
import timeit
def noop():
return None
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default=1000000)
args = parser.parse_args()
start = timeit.default_timer()
for x in range(int(args.count)):
noop()
elapsed = timeit.default_timer() - start
ops_per_second = int(args.count) / elapsed
print(f"Called {args.count} functions in {elapsed:.2f} seconds ({ops_per_second:.2f} ops/s)")
|
Add python test for simple no-op loop
|
Add python test for simple no-op loop
|
Python
|
mit
|
selvasingh/azure-sdk-for-java,Azure/azure-sdk-for-java,Azure/azure-sdk-for-java,Azure/azure-sdk-for-java,Azure/azure-sdk-for-java,selvasingh/azure-sdk-for-java,selvasingh/azure-sdk-for-java,Azure/azure-sdk-for-java
|
Add python test for simple no-op loop
|
import argparse
import timeit
def noop():
return None
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default=1000000)
args = parser.parse_args()
start = timeit.default_timer()
for x in range(int(args.count)):
noop()
elapsed = timeit.default_timer() - start
ops_per_second = int(args.count) / elapsed
print(f"Called {args.count} functions in {elapsed:.2f} seconds ({ops_per_second:.2f} ops/s)")
|
<commit_before><commit_msg>Add python test for simple no-op loop<commit_after>
|
import argparse
import timeit
def noop():
return None
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default=1000000)
args = parser.parse_args()
start = timeit.default_timer()
for x in range(int(args.count)):
noop()
elapsed = timeit.default_timer() - start
ops_per_second = int(args.count) / elapsed
print(f"Called {args.count} functions in {elapsed:.2f} seconds ({ops_per_second:.2f} ops/s)")
|
Add python test for simple no-op loopimport argparse
import timeit
def noop():
return None
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default=1000000)
args = parser.parse_args()
start = timeit.default_timer()
for x in range(int(args.count)):
noop()
elapsed = timeit.default_timer() - start
ops_per_second = int(args.count) / elapsed
print(f"Called {args.count} functions in {elapsed:.2f} seconds ({ops_per_second:.2f} ops/s)")
|
<commit_before><commit_msg>Add python test for simple no-op loop<commit_after>import argparse
import timeit
def noop():
return None
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--count", default=1000000)
args = parser.parse_args()
start = timeit.default_timer()
for x in range(int(args.count)):
noop()
elapsed = timeit.default_timer() - start
ops_per_second = int(args.count) / elapsed
print(f"Called {args.count} functions in {elapsed:.2f} seconds ({ops_per_second:.2f} ops/s)")
|
|
8493ae8c32f49ed54e685fa847c3d68916545d6e
|
api/test/testDocumentation.py
|
api/test/testDocumentation.py
|
'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced fun APIs
'''
from api.routes import Routes
from api.test.BaseTest import TestSetup, SUCCESSFUL_GET_CODE, INVALID_ID,\
NOT_FOUND_CODE
from datetime import datetime
import uuid
START_OF_PLATFORM = 2016
YEAR_WITH_NO_DATA = 1992
class TestDocumentation(TestSetup):
def testStaticDocumetnationFiles(self):
documentation_routes = ['dindex',
'dresponse',
'doplayer',
'dobat',
'dogame',
'dosponsor',
'doteam',
'doteamroster',
'doleague',
'dofun',
'dopagination',
'dbplayer',
'dbbat',
'dbgame',
'dbsponsor',
'dbteam',
'dbteamroster',
'dbleague',
'dbfun',
'dvgame',
'dvplayer',
'dvteam',
'dvfun',
'dvplayerLookup',
'dvplayerteamLookup',
'dvleagueleaders',
'dvschedule',
'dbotsubmitscore',
'dbotcaptain',
'dbotupcominggames',
'dbottransaction',
'dbotcaptaingames',
'dbottransaction']
for route in documentation_routes:
with self.app.get(Routes[route]) as result:
error_message = ("Unable to get documentation for {} at {}"
.format(route, Routes[route]))
self.assertEqual(SUCCESSFUL_GET_CODE,
result.status_code,
error_message)
|
Add a simple test for documentation
|
Add a simple test for documentation
|
Python
|
apache-2.0
|
fras2560/mlsb-platform,fras2560/mlsb-platform,fras2560/mlsb-platform
|
Add a simple test for documentation
|
'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced fun APIs
'''
from api.routes import Routes
from api.test.BaseTest import TestSetup, SUCCESSFUL_GET_CODE, INVALID_ID,\
NOT_FOUND_CODE
from datetime import datetime
import uuid
START_OF_PLATFORM = 2016
YEAR_WITH_NO_DATA = 1992
class TestDocumentation(TestSetup):
def testStaticDocumetnationFiles(self):
documentation_routes = ['dindex',
'dresponse',
'doplayer',
'dobat',
'dogame',
'dosponsor',
'doteam',
'doteamroster',
'doleague',
'dofun',
'dopagination',
'dbplayer',
'dbbat',
'dbgame',
'dbsponsor',
'dbteam',
'dbteamroster',
'dbleague',
'dbfun',
'dvgame',
'dvplayer',
'dvteam',
'dvfun',
'dvplayerLookup',
'dvplayerteamLookup',
'dvleagueleaders',
'dvschedule',
'dbotsubmitscore',
'dbotcaptain',
'dbotupcominggames',
'dbottransaction',
'dbotcaptaingames',
'dbottransaction']
for route in documentation_routes:
with self.app.get(Routes[route]) as result:
error_message = ("Unable to get documentation for {} at {}"
.format(route, Routes[route]))
self.assertEqual(SUCCESSFUL_GET_CODE,
result.status_code,
error_message)
|
<commit_before><commit_msg>Add a simple test for documentation<commit_after>
|
'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced fun APIs
'''
from api.routes import Routes
from api.test.BaseTest import TestSetup, SUCCESSFUL_GET_CODE, INVALID_ID,\
NOT_FOUND_CODE
from datetime import datetime
import uuid
START_OF_PLATFORM = 2016
YEAR_WITH_NO_DATA = 1992
class TestDocumentation(TestSetup):
def testStaticDocumetnationFiles(self):
documentation_routes = ['dindex',
'dresponse',
'doplayer',
'dobat',
'dogame',
'dosponsor',
'doteam',
'doteamroster',
'doleague',
'dofun',
'dopagination',
'dbplayer',
'dbbat',
'dbgame',
'dbsponsor',
'dbteam',
'dbteamroster',
'dbleague',
'dbfun',
'dvgame',
'dvplayer',
'dvteam',
'dvfun',
'dvplayerLookup',
'dvplayerteamLookup',
'dvleagueleaders',
'dvschedule',
'dbotsubmitscore',
'dbotcaptain',
'dbotupcominggames',
'dbottransaction',
'dbotcaptaingames',
'dbottransaction']
for route in documentation_routes:
with self.app.get(Routes[route]) as result:
error_message = ("Unable to get documentation for {} at {}"
.format(route, Routes[route]))
self.assertEqual(SUCCESSFUL_GET_CODE,
result.status_code,
error_message)
|
Add a simple test for documentation'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced fun APIs
'''
from api.routes import Routes
from api.test.BaseTest import TestSetup, SUCCESSFUL_GET_CODE, INVALID_ID,\
NOT_FOUND_CODE
from datetime import datetime
import uuid
START_OF_PLATFORM = 2016
YEAR_WITH_NO_DATA = 1992
class TestDocumentation(TestSetup):
def testStaticDocumetnationFiles(self):
documentation_routes = ['dindex',
'dresponse',
'doplayer',
'dobat',
'dogame',
'dosponsor',
'doteam',
'doteamroster',
'doleague',
'dofun',
'dopagination',
'dbplayer',
'dbbat',
'dbgame',
'dbsponsor',
'dbteam',
'dbteamroster',
'dbleague',
'dbfun',
'dvgame',
'dvplayer',
'dvteam',
'dvfun',
'dvplayerLookup',
'dvplayerteamLookup',
'dvleagueleaders',
'dvschedule',
'dbotsubmitscore',
'dbotcaptain',
'dbotupcominggames',
'dbottransaction',
'dbotcaptaingames',
'dbottransaction']
for route in documentation_routes:
with self.app.get(Routes[route]) as result:
error_message = ("Unable to get documentation for {} at {}"
.format(route, Routes[route]))
self.assertEqual(SUCCESSFUL_GET_CODE,
result.status_code,
error_message)
|
<commit_before><commit_msg>Add a simple test for documentation<commit_after>'''
@author: Dallas Fraser
@author: 2019-03-13
@organization: MLSB API
@summary: Tests all the advanced fun APIs
'''
from api.routes import Routes
from api.test.BaseTest import TestSetup, SUCCESSFUL_GET_CODE, INVALID_ID,\
NOT_FOUND_CODE
from datetime import datetime
import uuid
START_OF_PLATFORM = 2016
YEAR_WITH_NO_DATA = 1992
class TestDocumentation(TestSetup):
def testStaticDocumetnationFiles(self):
documentation_routes = ['dindex',
'dresponse',
'doplayer',
'dobat',
'dogame',
'dosponsor',
'doteam',
'doteamroster',
'doleague',
'dofun',
'dopagination',
'dbplayer',
'dbbat',
'dbgame',
'dbsponsor',
'dbteam',
'dbteamroster',
'dbleague',
'dbfun',
'dvgame',
'dvplayer',
'dvteam',
'dvfun',
'dvplayerLookup',
'dvplayerteamLookup',
'dvleagueleaders',
'dvschedule',
'dbotsubmitscore',
'dbotcaptain',
'dbotupcominggames',
'dbottransaction',
'dbotcaptaingames',
'dbottransaction']
for route in documentation_routes:
with self.app.get(Routes[route]) as result:
error_message = ("Unable to get documentation for {} at {}"
.format(route, Routes[route]))
self.assertEqual(SUCCESSFUL_GET_CODE,
result.status_code,
error_message)
|
|
e4401ba44a5faea7efcd262fde1b5bf1085fbe30
|
wagtail/wagtailimages/utils.py
|
wagtail/wagtailimages/utils.py
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
if f.closed:
# Reopen the file
file = open(os.path.join(settings.MEDIA_ROOT, f.name), 'rb')
close = True
else:
# Seek to first byte but save position to be restored later
file_position = f.tell()
f.seek(0)
file = f
close = False
# Open image file
image = Image.open(file)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
# Close/restore file
if close:
file.close()
else:
f.seek(file_position)
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
# Open image file
file_position = f.tell()
f.seek(0)
image = Image.open(f)
f.seek(file_position)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
|
Revert "Reopen images for validation if they are closed"
|
Revert "Reopen images for validation if they are closed"
This reverts commit 7d43b1cf6eda74c86209a4cae0d71557ce9bdbc0.
|
Python
|
bsd-3-clause
|
benemery/wagtail,serzans/wagtail,davecranwell/wagtail,hamsterbacke23/wagtail,takeshineshiro/wagtail,mjec/wagtail,nealtodd/wagtail,kurtrwall/wagtail,gasman/wagtail,stevenewey/wagtail,WQuanfeng/wagtail,100Shapes/wagtail,chimeno/wagtail,nutztherookie/wagtail,wagtail/wagtail,chimeno/wagtail,iansprice/wagtail,rv816/wagtail,jorge-marques/wagtail,janusnic/wagtail,timorieber/wagtail,chimeno/wagtail,Klaudit/wagtail,mephizzle/wagtail,jnns/wagtail,darith27/wagtail,100Shapes/wagtail,benjaoming/wagtail,Klaudit/wagtail,gasman/wagtail,iho/wagtail,KimGlazebrook/wagtail-experiment,rv816/wagtail,mikedingjan/wagtail,jnns/wagtail,timorieber/wagtail,hamsterbacke23/wagtail,tangentlabs/wagtail,quru/wagtail,FlipperPA/wagtail,WQuanfeng/wagtail,Pennebaker/wagtail,takeflight/wagtail,inonit/wagtail,jnns/wagtail,zerolab/wagtail,benjaoming/wagtail,chrxr/wagtail,darith27/wagtail,jorge-marques/wagtail,Pennebaker/wagtail,tangentlabs/wagtail,quru/wagtail,janusnic/wagtail,rsalmaso/wagtail,nrsimha/wagtail,takeflight/wagtail,tangentlabs/wagtail,kaedroho/wagtail,benjaoming/wagtail,gasman/wagtail,nutztherookie/wagtail,JoshBarr/wagtail,mephizzle/wagtail,rv816/wagtail,chimeno/wagtail,darith27/wagtail,serzans/wagtail,nutztherookie/wagtail,jorge-marques/wagtail,bjesus/wagtail,WQuanfeng/wagtail,inonit/wagtail,nutztherookie/wagtail,nimasmi/wagtail,mayapurmedia/wagtail,janusnic/wagtail,mixxorz/wagtail,KimGlazebrook/wagtail-experiment,Tivix/wagtail,willcodefortea/wagtail,zerolab/wagtail,iho/wagtail,taedori81/wagtail,gasman/wagtail,chrxr/wagtail,torchbox/wagtail,dresiu/wagtail,nimasmi/wagtail,nrsimha/wagtail,inonit/wagtail,m-sanders/wagtail,JoshBarr/wagtail,takeshineshiro/wagtail,gogobook/wagtail,benjaoming/wagtail,FlipperPA/wagtail,nilnvoid/wagtail,chrxr/wagtail,lojack/wagtail,kurtrwall/wagtail,100Shapes/wagtail,jorge-marques/wagtail,kurtrwall/wagtail,nrsimha/wagtail,takeshineshiro/wagtail,inonit/wagtail,willcodefortea/wagtail,mjec/wagtail,zerolab/wagtail,bjesus/wagtail,Toshakins/wagtail,takeflight/wagtail,hamsterbacke23/wagtail,rjsproxy/wagtail,davecranwell/wagtail,marctc/wagtail,benemery/wagtail,kurtw/wagtail,jordij/wagtail,nealtodd/wagtail,wagtail/wagtail,stevenewey/wagtail,KimGlazebrook/wagtail-experiment,rsalmaso/wagtail,Pennebaker/wagtail,Toshakins/wagtail,iansprice/wagtail,taedori81/wagtail,JoshBarr/wagtail,torchbox/wagtail,mixxorz/wagtail,JoshBarr/wagtail,gasman/wagtail,hanpama/wagtail,WQuanfeng/wagtail,nimasmi/wagtail,gogobook/wagtail,KimGlazebrook/wagtail-experiment,Tivix/wagtail,zerolab/wagtail,mephizzle/wagtail,helenwarren/pied-wagtail,marctc/wagtail,jnns/wagtail,takeflight/wagtail,wagtail/wagtail,torchbox/wagtail,benemery/wagtail,jordij/wagtail,kurtw/wagtail,davecranwell/wagtail,taedori81/wagtail,timorieber/wagtail,nilnvoid/wagtail,jordij/wagtail,kaedroho/wagtail,wagtail/wagtail,helenwarren/pied-wagtail,takeshineshiro/wagtail,nrsimha/wagtail,dresiu/wagtail,marctc/wagtail,lojack/wagtail,rsalmaso/wagtail,m-sanders/wagtail,zerolab/wagtail,janusnic/wagtail,chimeno/wagtail,taedori81/wagtail,FlipperPA/wagtail,marctc/wagtail,dresiu/wagtail,kurtw/wagtail,mixxorz/wagtail,m-sanders/wagtail,nilnvoid/wagtail,jordij/wagtail,mayapurmedia/wagtail,kaedroho/wagtail,kurtrwall/wagtail,wagtail/wagtail,torchbox/wagtail,dresiu/wagtail,stevenewey/wagtail,mayapurmedia/wagtail,mikedingjan/wagtail,nilnvoid/wagtail,kaedroho/wagtail,rjsproxy/wagtail,darith27/wagtail,mjec/wagtail,thenewguy/wagtail,lojack/wagtail,jorge-marques/wagtail,Klaudit/wagtail,rjsproxy/wagtail,willcodefortea/wagtail,stevenewey/wagtail,hanpama/wagtail,mikedingjan/wagtail,hanpama/wagtail,nealtodd/wagtail,serzans/wagtail,mjec/wagtail,Toshakins/wagtail,iansprice/wagtail,rjsproxy/wagtail,serzans/wagtail,mayapurmedia/wagtail,willcodefortea/wagtail,mixxorz/wagtail,hamsterbacke23/wagtail,Klaudit/wagtail,m-sanders/wagtail,Toshakins/wagtail,hanpama/wagtail,quru/wagtail,Tivix/wagtail,bjesus/wagtail,timorieber/wagtail,dresiu/wagtail,quru/wagtail,mephizzle/wagtail,kaedroho/wagtail,thenewguy/wagtail,davecranwell/wagtail,rsalmaso/wagtail,gogobook/wagtail,helenwarren/pied-wagtail,kurtw/wagtail,thenewguy/wagtail,gogobook/wagtail,mikedingjan/wagtail,rsalmaso/wagtail,iansprice/wagtail,thenewguy/wagtail,bjesus/wagtail,thenewguy/wagtail,mixxorz/wagtail,nealtodd/wagtail,iho/wagtail,tangentlabs/wagtail,nimasmi/wagtail,FlipperPA/wagtail,rv816/wagtail,Pennebaker/wagtail,iho/wagtail,chrxr/wagtail,Tivix/wagtail,taedori81/wagtail,benemery/wagtail
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
if f.closed:
# Reopen the file
file = open(os.path.join(settings.MEDIA_ROOT, f.name), 'rb')
close = True
else:
# Seek to first byte but save position to be restored later
file_position = f.tell()
f.seek(0)
file = f
close = False
# Open image file
image = Image.open(file)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
# Close/restore file
if close:
file.close()
else:
f.seek(file_position)
Revert "Reopen images for validation if they are closed"
This reverts commit 7d43b1cf6eda74c86209a4cae0d71557ce9bdbc0.
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
# Open image file
file_position = f.tell()
f.seek(0)
image = Image.open(f)
f.seek(file_position)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
|
<commit_before>import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
if f.closed:
# Reopen the file
file = open(os.path.join(settings.MEDIA_ROOT, f.name), 'rb')
close = True
else:
# Seek to first byte but save position to be restored later
file_position = f.tell()
f.seek(0)
file = f
close = False
# Open image file
image = Image.open(file)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
# Close/restore file
if close:
file.close()
else:
f.seek(file_position)
<commit_msg>Revert "Reopen images for validation if they are closed"
This reverts commit 7d43b1cf6eda74c86209a4cae0d71557ce9bdbc0.<commit_after>
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
# Open image file
file_position = f.tell()
f.seek(0)
image = Image.open(f)
f.seek(file_position)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
|
import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
if f.closed:
# Reopen the file
file = open(os.path.join(settings.MEDIA_ROOT, f.name), 'rb')
close = True
else:
# Seek to first byte but save position to be restored later
file_position = f.tell()
f.seek(0)
file = f
close = False
# Open image file
image = Image.open(file)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
# Close/restore file
if close:
file.close()
else:
f.seek(file_position)
Revert "Reopen images for validation if they are closed"
This reverts commit 7d43b1cf6eda74c86209a4cae0d71557ce9bdbc0.import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
# Open image file
file_position = f.tell()
f.seek(0)
image = Image.open(f)
f.seek(file_position)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
|
<commit_before>import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
if f.closed:
# Reopen the file
file = open(os.path.join(settings.MEDIA_ROOT, f.name), 'rb')
close = True
else:
# Seek to first byte but save position to be restored later
file_position = f.tell()
f.seek(0)
file = f
close = False
# Open image file
image = Image.open(file)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
# Close/restore file
if close:
file.close()
else:
f.seek(file_position)
<commit_msg>Revert "Reopen images for validation if they are closed"
This reverts commit 7d43b1cf6eda74c86209a4cae0d71557ce9bdbc0.<commit_after>import os
from PIL import Image
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def validate_image_format(f):
# Check file extension
extension = os.path.splitext(f.name)[1].lower()[1:]
if extension == 'jpg':
extension = 'jpeg'
if extension not in ['gif', 'jpeg', 'png']:
raise ValidationError(_("Not a valid image. Please use a gif, jpeg or png file with the correct file extension."))
# Open image file
file_position = f.tell()
f.seek(0)
image = Image.open(f)
f.seek(file_position)
# Check that the internal format matches the extension
if image.format.upper() != extension.upper():
raise ValidationError(_("Not a valid %s image. Please use a gif, jpeg or png file with the correct file extension.") % (extension.upper()))
|
1a4d3d00fea775e7adaa6ff1d40346fe9e3842d8
|
sandbox/samples/python/studio/plugins/basicenumerator/__init__.py
|
sandbox/samples/python/studio/plugins/basicenumerator/__init__.py
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
Add basicenumerator appleseed.studio Python plugin sample
|
Add basicenumerator appleseed.studio Python plugin sample
|
Python
|
mit
|
pjessesco/appleseed,Biart95/appleseed,pjessesco/appleseed,est77/appleseed,luisbarrancos/appleseed,Biart95/appleseed,appleseedhq/appleseed,Vertexwahn/appleseed,luisbarrancos/appleseed,pjessesco/appleseed,est77/appleseed,luisbarrancos/appleseed,aytekaman/appleseed,aytekaman/appleseed,Biart95/appleseed,dictoon/appleseed,aytekaman/appleseed,dictoon/appleseed,appleseedhq/appleseed,dictoon/appleseed,Vertexwahn/appleseed,dictoon/appleseed,pjessesco/appleseed,aytekaman/appleseed,appleseedhq/appleseed,est77/appleseed,appleseedhq/appleseed,Vertexwahn/appleseed,Vertexwahn/appleseed,dictoon/appleseed,Biart95/appleseed,aytekaman/appleseed,est77/appleseed,Vertexwahn/appleseed,luisbarrancos/appleseed,pjessesco/appleseed,luisbarrancos/appleseed,appleseedhq/appleseed,Biart95/appleseed,est77/appleseed
|
Add basicenumerator appleseed.studio Python plugin sample
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
<commit_before><commit_msg>Add basicenumerator appleseed.studio Python plugin sample<commit_after>
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
Add basicenumerator appleseed.studio Python plugin sample
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
<commit_before><commit_msg>Add basicenumerator appleseed.studio Python plugin sample<commit_after>
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017 Francois Beaune, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from appleseed.studio import *
def register():
# In the future, this plugin will be able to add menus or panels to appleseed.studio.
pass
def list_objects():
"""Print names of all objects in the scene."""
scene = current_project().get_scene()
assemblies = scene.assemblies()
for ass_key in assemblies:
list_objects_in_assembly(assemblies[ass_key])
def list_objects_in_assembly(ass):
"""Print names of objects in a given assembly and all its child assemblies."""
# Print names of objects inside this assembly.
for obj in ass.objects():
print(obj.get_name())
# Recurse into child assemblies.
child_assemblies = ass.assemblies()
for sub_ass_key in child_assemblies:
list_objects_in_assembly(child_assemblies[sub_ass_key])
|
|
dbdada2dd1b61b10f402ee8163442d2653c1876b
|
training/level-1-the-zen-of-python/dragon-warrior/palindrome/tmarsha1-palindrome.py
|
training/level-1-the-zen-of-python/dragon-warrior/palindrome/tmarsha1-palindrome.py
|
__author__ = 'tmarsha1'
""" find largest palindrome for the product of 2 three digit numbers
(100-999) or (100^2 - 999^2)
Answer is 913 * 993 = 906609
"""
import re
class Word(object):
def __init__(self, values):
concat = ""
for value in values:
concat = concat + self.format_value(value)
self.concatenated_word = concat
def format_value(self, value):
value = str(value).lower()
value = re.sub("[^A-Za-z0-9]", "", value)
return value
def is_palindrome(self):
return self.concatenated_word == self.concatenated_word[::-1]
def to_str(self):
return self.concatenated_word
def is_palindrome(value):
result = False
value = str(value)
if value == value[::-1]:
result = True
return result
def compare_palindromes(word1, word2):
result = word1
if int(word1.to_str()) < int(word2.to_str()):
result = word2
return result
def brittle_palindrome_solution(lower_bounds, upper_bounds):
max_palindrome = Word("0")
for first_item in range(lower_bounds, upper_bounds):
for second_item in range(lower_bounds, upper_bounds):
product = first_item * second_item
word = Word(str(product))
if word.is_palindrome():
max_palindrome = compare_palindromes(max_palindrome, word)
return max_palindrome
if __name__ == '__main__':
print("palindrome: %s" % brittle_palindrome_solution(100, 1000).to_str())
|
Add homework solution for tmarsha1.
|
Add homework solution for tmarsha1.
|
Python
|
artistic-2.0
|
bigfatpanda-training/pandas-practical-python-primer,bigfatpanda-training/pandas-practical-python-primer
|
Add homework solution for tmarsha1.
|
__author__ = 'tmarsha1'
""" find largest palindrome for the product of 2 three digit numbers
(100-999) or (100^2 - 999^2)
Answer is 913 * 993 = 906609
"""
import re
class Word(object):
def __init__(self, values):
concat = ""
for value in values:
concat = concat + self.format_value(value)
self.concatenated_word = concat
def format_value(self, value):
value = str(value).lower()
value = re.sub("[^A-Za-z0-9]", "", value)
return value
def is_palindrome(self):
return self.concatenated_word == self.concatenated_word[::-1]
def to_str(self):
return self.concatenated_word
def is_palindrome(value):
result = False
value = str(value)
if value == value[::-1]:
result = True
return result
def compare_palindromes(word1, word2):
result = word1
if int(word1.to_str()) < int(word2.to_str()):
result = word2
return result
def brittle_palindrome_solution(lower_bounds, upper_bounds):
max_palindrome = Word("0")
for first_item in range(lower_bounds, upper_bounds):
for second_item in range(lower_bounds, upper_bounds):
product = first_item * second_item
word = Word(str(product))
if word.is_palindrome():
max_palindrome = compare_palindromes(max_palindrome, word)
return max_palindrome
if __name__ == '__main__':
print("palindrome: %s" % brittle_palindrome_solution(100, 1000).to_str())
|
<commit_before><commit_msg>Add homework solution for tmarsha1.<commit_after>
|
__author__ = 'tmarsha1'
""" find largest palindrome for the product of 2 three digit numbers
(100-999) or (100^2 - 999^2)
Answer is 913 * 993 = 906609
"""
import re
class Word(object):
def __init__(self, values):
concat = ""
for value in values:
concat = concat + self.format_value(value)
self.concatenated_word = concat
def format_value(self, value):
value = str(value).lower()
value = re.sub("[^A-Za-z0-9]", "", value)
return value
def is_palindrome(self):
return self.concatenated_word == self.concatenated_word[::-1]
def to_str(self):
return self.concatenated_word
def is_palindrome(value):
result = False
value = str(value)
if value == value[::-1]:
result = True
return result
def compare_palindromes(word1, word2):
result = word1
if int(word1.to_str()) < int(word2.to_str()):
result = word2
return result
def brittle_palindrome_solution(lower_bounds, upper_bounds):
max_palindrome = Word("0")
for first_item in range(lower_bounds, upper_bounds):
for second_item in range(lower_bounds, upper_bounds):
product = first_item * second_item
word = Word(str(product))
if word.is_palindrome():
max_palindrome = compare_palindromes(max_palindrome, word)
return max_palindrome
if __name__ == '__main__':
print("palindrome: %s" % brittle_palindrome_solution(100, 1000).to_str())
|
Add homework solution for tmarsha1.__author__ = 'tmarsha1'
""" find largest palindrome for the product of 2 three digit numbers
(100-999) or (100^2 - 999^2)
Answer is 913 * 993 = 906609
"""
import re
class Word(object):
def __init__(self, values):
concat = ""
for value in values:
concat = concat + self.format_value(value)
self.concatenated_word = concat
def format_value(self, value):
value = str(value).lower()
value = re.sub("[^A-Za-z0-9]", "", value)
return value
def is_palindrome(self):
return self.concatenated_word == self.concatenated_word[::-1]
def to_str(self):
return self.concatenated_word
def is_palindrome(value):
result = False
value = str(value)
if value == value[::-1]:
result = True
return result
def compare_palindromes(word1, word2):
result = word1
if int(word1.to_str()) < int(word2.to_str()):
result = word2
return result
def brittle_palindrome_solution(lower_bounds, upper_bounds):
max_palindrome = Word("0")
for first_item in range(lower_bounds, upper_bounds):
for second_item in range(lower_bounds, upper_bounds):
product = first_item * second_item
word = Word(str(product))
if word.is_palindrome():
max_palindrome = compare_palindromes(max_palindrome, word)
return max_palindrome
if __name__ == '__main__':
print("palindrome: %s" % brittle_palindrome_solution(100, 1000).to_str())
|
<commit_before><commit_msg>Add homework solution for tmarsha1.<commit_after>__author__ = 'tmarsha1'
""" find largest palindrome for the product of 2 three digit numbers
(100-999) or (100^2 - 999^2)
Answer is 913 * 993 = 906609
"""
import re
class Word(object):
def __init__(self, values):
concat = ""
for value in values:
concat = concat + self.format_value(value)
self.concatenated_word = concat
def format_value(self, value):
value = str(value).lower()
value = re.sub("[^A-Za-z0-9]", "", value)
return value
def is_palindrome(self):
return self.concatenated_word == self.concatenated_word[::-1]
def to_str(self):
return self.concatenated_word
def is_palindrome(value):
result = False
value = str(value)
if value == value[::-1]:
result = True
return result
def compare_palindromes(word1, word2):
result = word1
if int(word1.to_str()) < int(word2.to_str()):
result = word2
return result
def brittle_palindrome_solution(lower_bounds, upper_bounds):
max_palindrome = Word("0")
for first_item in range(lower_bounds, upper_bounds):
for second_item in range(lower_bounds, upper_bounds):
product = first_item * second_item
word = Word(str(product))
if word.is_palindrome():
max_palindrome = compare_palindromes(max_palindrome, word)
return max_palindrome
if __name__ == '__main__':
print("palindrome: %s" % brittle_palindrome_solution(100, 1000).to_str())
|
|
ab44c1615f124b558e30d5d1432876dcc60dbf3d
|
testrepository/repository/samba_buildfarm.py
|
testrepository/repository/samba_buildfarm.py
|
# Copyright (c) 2009, 2010 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Access to the Samba build farm."""
import subunit
import urllib
from testrepository.repository import (
AbstractRepository,
AbstractRepositoryFactory,
AbstractTestRun,
RepositoryNotFound,
)
BUILD_FARM_URL = "http://build.samba.org/"
class RepositoryFactory(AbstractRepositoryFactory):
def initialise(klass, url):
"""Create a repository at url/path."""
raise NotImplementedError(klass.initialise)
def open(self, url):
if not url.startswith(BUILD_FARM_URL):
raise RepositoryNotFound(url)
return Repository(url)
class Repository(AbstractRepository):
"""Access to the subunit results on the Samba build farm.
"""
def __init__(self, base):
"""Create a repository object for the Samba build farm at base.
"""
self.base = base.rstrip("/")+"/"
recent_ids_url = urllib.basejoin(self.base, "+recent-ids")
f = urllib.urlopen(recent_ids_url, "r")
try:
self.recent_ids = [x.rstrip("\n") for x in f.readlines()]
finally:
f.close()
def count(self):
return len(self.recent_ids)
def latest_id(self):
if len(self.recent_ids) == 0:
raise KeyError("No tests in repository")
return len(self.recent_ids) - 1
def get_failing(self):
raise NotImplementedError(self.get_failing)
def get_test_run(self, run_id):
return _HttpRun(self.base, self.recent_ids[run_id])
def _get_inserter(self, partial):
raise NotImplementedError(self._get_inserter)
class _HttpRun(AbstractTestRun):
"""A test run that was inserted into the repository."""
def __init__(self, base_url, run_id):
"""Create a _HttpRun with the content subunit_content."""
self.base_url = base_url
self.run_id = run_id
self.url = urllib.basejoin(self.base_url,
"../../build/%s/+subunit" % self.run_id)
def get_subunit_stream(self):
return urllib.urlopen(self.url)
def get_test(self):
return subunit.ProtocolTestCase(self.get_subunit_stream())
|
Add repository backend for samba buildfarm.
|
Add repository backend for samba buildfarm.
|
Python
|
apache-2.0
|
mtreinish/stestr,masayukig/stestr,mtreinish/stestr,masayukig/stestr
|
Add repository backend for samba buildfarm.
|
# Copyright (c) 2009, 2010 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Access to the Samba build farm."""
import subunit
import urllib
from testrepository.repository import (
AbstractRepository,
AbstractRepositoryFactory,
AbstractTestRun,
RepositoryNotFound,
)
BUILD_FARM_URL = "http://build.samba.org/"
class RepositoryFactory(AbstractRepositoryFactory):
def initialise(klass, url):
"""Create a repository at url/path."""
raise NotImplementedError(klass.initialise)
def open(self, url):
if not url.startswith(BUILD_FARM_URL):
raise RepositoryNotFound(url)
return Repository(url)
class Repository(AbstractRepository):
"""Access to the subunit results on the Samba build farm.
"""
def __init__(self, base):
"""Create a repository object for the Samba build farm at base.
"""
self.base = base.rstrip("/")+"/"
recent_ids_url = urllib.basejoin(self.base, "+recent-ids")
f = urllib.urlopen(recent_ids_url, "r")
try:
self.recent_ids = [x.rstrip("\n") for x in f.readlines()]
finally:
f.close()
def count(self):
return len(self.recent_ids)
def latest_id(self):
if len(self.recent_ids) == 0:
raise KeyError("No tests in repository")
return len(self.recent_ids) - 1
def get_failing(self):
raise NotImplementedError(self.get_failing)
def get_test_run(self, run_id):
return _HttpRun(self.base, self.recent_ids[run_id])
def _get_inserter(self, partial):
raise NotImplementedError(self._get_inserter)
class _HttpRun(AbstractTestRun):
"""A test run that was inserted into the repository."""
def __init__(self, base_url, run_id):
"""Create a _HttpRun with the content subunit_content."""
self.base_url = base_url
self.run_id = run_id
self.url = urllib.basejoin(self.base_url,
"../../build/%s/+subunit" % self.run_id)
def get_subunit_stream(self):
return urllib.urlopen(self.url)
def get_test(self):
return subunit.ProtocolTestCase(self.get_subunit_stream())
|
<commit_before><commit_msg>Add repository backend for samba buildfarm.<commit_after>
|
# Copyright (c) 2009, 2010 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Access to the Samba build farm."""
import subunit
import urllib
from testrepository.repository import (
AbstractRepository,
AbstractRepositoryFactory,
AbstractTestRun,
RepositoryNotFound,
)
BUILD_FARM_URL = "http://build.samba.org/"
class RepositoryFactory(AbstractRepositoryFactory):
def initialise(klass, url):
"""Create a repository at url/path."""
raise NotImplementedError(klass.initialise)
def open(self, url):
if not url.startswith(BUILD_FARM_URL):
raise RepositoryNotFound(url)
return Repository(url)
class Repository(AbstractRepository):
"""Access to the subunit results on the Samba build farm.
"""
def __init__(self, base):
"""Create a repository object for the Samba build farm at base.
"""
self.base = base.rstrip("/")+"/"
recent_ids_url = urllib.basejoin(self.base, "+recent-ids")
f = urllib.urlopen(recent_ids_url, "r")
try:
self.recent_ids = [x.rstrip("\n") for x in f.readlines()]
finally:
f.close()
def count(self):
return len(self.recent_ids)
def latest_id(self):
if len(self.recent_ids) == 0:
raise KeyError("No tests in repository")
return len(self.recent_ids) - 1
def get_failing(self):
raise NotImplementedError(self.get_failing)
def get_test_run(self, run_id):
return _HttpRun(self.base, self.recent_ids[run_id])
def _get_inserter(self, partial):
raise NotImplementedError(self._get_inserter)
class _HttpRun(AbstractTestRun):
"""A test run that was inserted into the repository."""
def __init__(self, base_url, run_id):
"""Create a _HttpRun with the content subunit_content."""
self.base_url = base_url
self.run_id = run_id
self.url = urllib.basejoin(self.base_url,
"../../build/%s/+subunit" % self.run_id)
def get_subunit_stream(self):
return urllib.urlopen(self.url)
def get_test(self):
return subunit.ProtocolTestCase(self.get_subunit_stream())
|
Add repository backend for samba buildfarm.# Copyright (c) 2009, 2010 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Access to the Samba build farm."""
import subunit
import urllib
from testrepository.repository import (
AbstractRepository,
AbstractRepositoryFactory,
AbstractTestRun,
RepositoryNotFound,
)
BUILD_FARM_URL = "http://build.samba.org/"
class RepositoryFactory(AbstractRepositoryFactory):
def initialise(klass, url):
"""Create a repository at url/path."""
raise NotImplementedError(klass.initialise)
def open(self, url):
if not url.startswith(BUILD_FARM_URL):
raise RepositoryNotFound(url)
return Repository(url)
class Repository(AbstractRepository):
"""Access to the subunit results on the Samba build farm.
"""
def __init__(self, base):
"""Create a repository object for the Samba build farm at base.
"""
self.base = base.rstrip("/")+"/"
recent_ids_url = urllib.basejoin(self.base, "+recent-ids")
f = urllib.urlopen(recent_ids_url, "r")
try:
self.recent_ids = [x.rstrip("\n") for x in f.readlines()]
finally:
f.close()
def count(self):
return len(self.recent_ids)
def latest_id(self):
if len(self.recent_ids) == 0:
raise KeyError("No tests in repository")
return len(self.recent_ids) - 1
def get_failing(self):
raise NotImplementedError(self.get_failing)
def get_test_run(self, run_id):
return _HttpRun(self.base, self.recent_ids[run_id])
def _get_inserter(self, partial):
raise NotImplementedError(self._get_inserter)
class _HttpRun(AbstractTestRun):
"""A test run that was inserted into the repository."""
def __init__(self, base_url, run_id):
"""Create a _HttpRun with the content subunit_content."""
self.base_url = base_url
self.run_id = run_id
self.url = urllib.basejoin(self.base_url,
"../../build/%s/+subunit" % self.run_id)
def get_subunit_stream(self):
return urllib.urlopen(self.url)
def get_test(self):
return subunit.ProtocolTestCase(self.get_subunit_stream())
|
<commit_before><commit_msg>Add repository backend for samba buildfarm.<commit_after># Copyright (c) 2009, 2010 Testrepository Contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Access to the Samba build farm."""
import subunit
import urllib
from testrepository.repository import (
AbstractRepository,
AbstractRepositoryFactory,
AbstractTestRun,
RepositoryNotFound,
)
BUILD_FARM_URL = "http://build.samba.org/"
class RepositoryFactory(AbstractRepositoryFactory):
def initialise(klass, url):
"""Create a repository at url/path."""
raise NotImplementedError(klass.initialise)
def open(self, url):
if not url.startswith(BUILD_FARM_URL):
raise RepositoryNotFound(url)
return Repository(url)
class Repository(AbstractRepository):
"""Access to the subunit results on the Samba build farm.
"""
def __init__(self, base):
"""Create a repository object for the Samba build farm at base.
"""
self.base = base.rstrip("/")+"/"
recent_ids_url = urllib.basejoin(self.base, "+recent-ids")
f = urllib.urlopen(recent_ids_url, "r")
try:
self.recent_ids = [x.rstrip("\n") for x in f.readlines()]
finally:
f.close()
def count(self):
return len(self.recent_ids)
def latest_id(self):
if len(self.recent_ids) == 0:
raise KeyError("No tests in repository")
return len(self.recent_ids) - 1
def get_failing(self):
raise NotImplementedError(self.get_failing)
def get_test_run(self, run_id):
return _HttpRun(self.base, self.recent_ids[run_id])
def _get_inserter(self, partial):
raise NotImplementedError(self._get_inserter)
class _HttpRun(AbstractTestRun):
"""A test run that was inserted into the repository."""
def __init__(self, base_url, run_id):
"""Create a _HttpRun with the content subunit_content."""
self.base_url = base_url
self.run_id = run_id
self.url = urllib.basejoin(self.base_url,
"../../build/%s/+subunit" % self.run_id)
def get_subunit_stream(self):
return urllib.urlopen(self.url)
def get_test(self):
return subunit.ProtocolTestCase(self.get_subunit_stream())
|
|
3b582b60ddcf3e46529bdca52a0bbe7fbe6f117d
|
bin/get_gdrive_credentials.py
|
bin/get_gdrive_credentials.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: python get_gdrive_credentials.py PATH_TO_SECRET_FILE
The purpose of this script is to create google login credentials to be used by google drive
writer.
Expected workflow is:
1.- Get the client secret file. If you haven't one, please follow this tutorial:
https://developers.google.com/drive/web/quickstart/python
2.- Execute this script. It will open a browser tab in which you have to login with your
Google account. It will create a credentials file (file path will be printed).
3.- You can use the info contained in both files to configure a export using google
drive writer.
"""
from __future__ import print_function
import os
import tempfile
from pydrive.auth import GoogleAuth
def parse_args():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('client-secret', help='Client Secret file identifying user')
parser.add_argument('--output', help='Path to Credentials File with authorization code')
args = parser.parse_args()
if not args.output:
args.output = tempfile.mkdtemp()
return args
def run(args):
gauth = GoogleAuth()
gauth.LoadClientConfigFile(args.client_secret)
gauth.LocalWebserverAuth()
credentials_file = os.path.join(args.dest, 'gdrive-credentials.json')
gauth.SaveCredentialsFile(credentials_file)
print('Credentials file saved to {}'.format(credentials_file))
if '__main__' == __name__:
args = parse_args()
run(args)
|
Add script to assist in Gdrive Credentials.
|
Add script to assist in Gdrive Credentials.
|
Python
|
bsd-3-clause
|
scrapinghub/exporters
|
Add script to assist in Gdrive Credentials.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: python get_gdrive_credentials.py PATH_TO_SECRET_FILE
The purpose of this script is to create google login credentials to be used by google drive
writer.
Expected workflow is:
1.- Get the client secret file. If you haven't one, please follow this tutorial:
https://developers.google.com/drive/web/quickstart/python
2.- Execute this script. It will open a browser tab in which you have to login with your
Google account. It will create a credentials file (file path will be printed).
3.- You can use the info contained in both files to configure a export using google
drive writer.
"""
from __future__ import print_function
import os
import tempfile
from pydrive.auth import GoogleAuth
def parse_args():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('client-secret', help='Client Secret file identifying user')
parser.add_argument('--output', help='Path to Credentials File with authorization code')
args = parser.parse_args()
if not args.output:
args.output = tempfile.mkdtemp()
return args
def run(args):
gauth = GoogleAuth()
gauth.LoadClientConfigFile(args.client_secret)
gauth.LocalWebserverAuth()
credentials_file = os.path.join(args.dest, 'gdrive-credentials.json')
gauth.SaveCredentialsFile(credentials_file)
print('Credentials file saved to {}'.format(credentials_file))
if '__main__' == __name__:
args = parse_args()
run(args)
|
<commit_before><commit_msg>Add script to assist in Gdrive Credentials.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: python get_gdrive_credentials.py PATH_TO_SECRET_FILE
The purpose of this script is to create google login credentials to be used by google drive
writer.
Expected workflow is:
1.- Get the client secret file. If you haven't one, please follow this tutorial:
https://developers.google.com/drive/web/quickstart/python
2.- Execute this script. It will open a browser tab in which you have to login with your
Google account. It will create a credentials file (file path will be printed).
3.- You can use the info contained in both files to configure a export using google
drive writer.
"""
from __future__ import print_function
import os
import tempfile
from pydrive.auth import GoogleAuth
def parse_args():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('client-secret', help='Client Secret file identifying user')
parser.add_argument('--output', help='Path to Credentials File with authorization code')
args = parser.parse_args()
if not args.output:
args.output = tempfile.mkdtemp()
return args
def run(args):
gauth = GoogleAuth()
gauth.LoadClientConfigFile(args.client_secret)
gauth.LocalWebserverAuth()
credentials_file = os.path.join(args.dest, 'gdrive-credentials.json')
gauth.SaveCredentialsFile(credentials_file)
print('Credentials file saved to {}'.format(credentials_file))
if '__main__' == __name__:
args = parse_args()
run(args)
|
Add script to assist in Gdrive Credentials.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: python get_gdrive_credentials.py PATH_TO_SECRET_FILE
The purpose of this script is to create google login credentials to be used by google drive
writer.
Expected workflow is:
1.- Get the client secret file. If you haven't one, please follow this tutorial:
https://developers.google.com/drive/web/quickstart/python
2.- Execute this script. It will open a browser tab in which you have to login with your
Google account. It will create a credentials file (file path will be printed).
3.- You can use the info contained in both files to configure a export using google
drive writer.
"""
from __future__ import print_function
import os
import tempfile
from pydrive.auth import GoogleAuth
def parse_args():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('client-secret', help='Client Secret file identifying user')
parser.add_argument('--output', help='Path to Credentials File with authorization code')
args = parser.parse_args()
if not args.output:
args.output = tempfile.mkdtemp()
return args
def run(args):
gauth = GoogleAuth()
gauth.LoadClientConfigFile(args.client_secret)
gauth.LocalWebserverAuth()
credentials_file = os.path.join(args.dest, 'gdrive-credentials.json')
gauth.SaveCredentialsFile(credentials_file)
print('Credentials file saved to {}'.format(credentials_file))
if '__main__' == __name__:
args = parse_args()
run(args)
|
<commit_before><commit_msg>Add script to assist in Gdrive Credentials.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: python get_gdrive_credentials.py PATH_TO_SECRET_FILE
The purpose of this script is to create google login credentials to be used by google drive
writer.
Expected workflow is:
1.- Get the client secret file. If you haven't one, please follow this tutorial:
https://developers.google.com/drive/web/quickstart/python
2.- Execute this script. It will open a browser tab in which you have to login with your
Google account. It will create a credentials file (file path will be printed).
3.- You can use the info contained in both files to configure a export using google
drive writer.
"""
from __future__ import print_function
import os
import tempfile
from pydrive.auth import GoogleAuth
def parse_args():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('client-secret', help='Client Secret file identifying user')
parser.add_argument('--output', help='Path to Credentials File with authorization code')
args = parser.parse_args()
if not args.output:
args.output = tempfile.mkdtemp()
return args
def run(args):
gauth = GoogleAuth()
gauth.LoadClientConfigFile(args.client_secret)
gauth.LocalWebserverAuth()
credentials_file = os.path.join(args.dest, 'gdrive-credentials.json')
gauth.SaveCredentialsFile(credentials_file)
print('Credentials file saved to {}'.format(credentials_file))
if '__main__' == __name__:
args = parse_args()
run(args)
|
|
51101cc1ca180a7a9a494517d111d1e1186bd199
|
basics/numpy_structured_data.py
|
basics/numpy_structured_data.py
|
import numpy as np
# old way to do it, make 3 different arrays of data. The downside of this is that it doesnt necessarily
# tell us that these 3 are related data sets. Though then again its quite simple if you're not an idiot
name = ["Alice", "Bob", "Cathy", "Doug"]
age = [25, 45, 37, 19]
weight = [55.0, 85.5, 68.0, 61.5]
# Now we can use a compound data type for structured arrays, creating
# a structured array with zeros of the 10character, 4byte integer, and 8byte float
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')})
print(data.dtype)
# now we can fill up this structured array. Hey this is R almost
data['name'] = name
data['age'] = age
data['weight'] = weight
print(data)
# this is cool as it is now arranged as a np object and arragend in one contigious block of memory
# moreover all this stuff is nw accessible by name/index
# all names eg
data['name']
# This allows us to to cool filtering
# get names where age is under 30
data[data['age'] < 30]['name']
# Pandas basically greatly expands on the functionality and usefulness of these structures
# with proper input/output control and stuff
## Super advanced compound types
# it is possile to define even more advanced compound types. For example you fcan create a type
# where each element contains an array or matrix of values
# here will create a data type with a mat component consisting of a 3x3 floating point matrix
# wow
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3,3))])
X = np.zeros(1,dtype = tp)
print(X[0])
print(X['mat'][0])
|
Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframes
|
Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframes
|
Python
|
mit
|
paulmorio/grusData,paulmorio/grusData
|
Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframes
|
import numpy as np
# old way to do it, make 3 different arrays of data. The downside of this is that it doesnt necessarily
# tell us that these 3 are related data sets. Though then again its quite simple if you're not an idiot
name = ["Alice", "Bob", "Cathy", "Doug"]
age = [25, 45, 37, 19]
weight = [55.0, 85.5, 68.0, 61.5]
# Now we can use a compound data type for structured arrays, creating
# a structured array with zeros of the 10character, 4byte integer, and 8byte float
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')})
print(data.dtype)
# now we can fill up this structured array. Hey this is R almost
data['name'] = name
data['age'] = age
data['weight'] = weight
print(data)
# this is cool as it is now arranged as a np object and arragend in one contigious block of memory
# moreover all this stuff is nw accessible by name/index
# all names eg
data['name']
# This allows us to to cool filtering
# get names where age is under 30
data[data['age'] < 30]['name']
# Pandas basically greatly expands on the functionality and usefulness of these structures
# with proper input/output control and stuff
## Super advanced compound types
# it is possile to define even more advanced compound types. For example you fcan create a type
# where each element contains an array or matrix of values
# here will create a data type with a mat component consisting of a 3x3 floating point matrix
# wow
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3,3))])
X = np.zeros(1,dtype = tp)
print(X[0])
print(X['mat'][0])
|
<commit_before><commit_msg>Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframes<commit_after>
|
import numpy as np
# old way to do it, make 3 different arrays of data. The downside of this is that it doesnt necessarily
# tell us that these 3 are related data sets. Though then again its quite simple if you're not an idiot
name = ["Alice", "Bob", "Cathy", "Doug"]
age = [25, 45, 37, 19]
weight = [55.0, 85.5, 68.0, 61.5]
# Now we can use a compound data type for structured arrays, creating
# a structured array with zeros of the 10character, 4byte integer, and 8byte float
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')})
print(data.dtype)
# now we can fill up this structured array. Hey this is R almost
data['name'] = name
data['age'] = age
data['weight'] = weight
print(data)
# this is cool as it is now arranged as a np object and arragend in one contigious block of memory
# moreover all this stuff is nw accessible by name/index
# all names eg
data['name']
# This allows us to to cool filtering
# get names where age is under 30
data[data['age'] < 30]['name']
# Pandas basically greatly expands on the functionality and usefulness of these structures
# with proper input/output control and stuff
## Super advanced compound types
# it is possile to define even more advanced compound types. For example you fcan create a type
# where each element contains an array or matrix of values
# here will create a data type with a mat component consisting of a 3x3 floating point matrix
# wow
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3,3))])
X = np.zeros(1,dtype = tp)
print(X[0])
print(X['mat'][0])
|
Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframesimport numpy as np
# old way to do it, make 3 different arrays of data. The downside of this is that it doesnt necessarily
# tell us that these 3 are related data sets. Though then again its quite simple if you're not an idiot
name = ["Alice", "Bob", "Cathy", "Doug"]
age = [25, 45, 37, 19]
weight = [55.0, 85.5, 68.0, 61.5]
# Now we can use a compound data type for structured arrays, creating
# a structured array with zeros of the 10character, 4byte integer, and 8byte float
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')})
print(data.dtype)
# now we can fill up this structured array. Hey this is R almost
data['name'] = name
data['age'] = age
data['weight'] = weight
print(data)
# this is cool as it is now arranged as a np object and arragend in one contigious block of memory
# moreover all this stuff is nw accessible by name/index
# all names eg
data['name']
# This allows us to to cool filtering
# get names where age is under 30
data[data['age'] < 30]['name']
# Pandas basically greatly expands on the functionality and usefulness of these structures
# with proper input/output control and stuff
## Super advanced compound types
# it is possile to define even more advanced compound types. For example you fcan create a type
# where each element contains an array or matrix of values
# here will create a data type with a mat component consisting of a 3x3 floating point matrix
# wow
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3,3))])
X = np.zeros(1,dtype = tp)
print(X[0])
print(X['mat'][0])
|
<commit_before><commit_msg>Work on understanding a bit about structured arrays in NUmpy before moving on to pandas dataframes<commit_after>import numpy as np
# old way to do it, make 3 different arrays of data. The downside of this is that it doesnt necessarily
# tell us that these 3 are related data sets. Though then again its quite simple if you're not an idiot
name = ["Alice", "Bob", "Cathy", "Doug"]
age = [25, 45, 37, 19]
weight = [55.0, 85.5, 68.0, 61.5]
# Now we can use a compound data type for structured arrays, creating
# a structured array with zeros of the 10character, 4byte integer, and 8byte float
data = np.zeros(4, dtype={'names':('name', 'age', 'weight'),
'formats':('U10', 'i4', 'f8')})
print(data.dtype)
# now we can fill up this structured array. Hey this is R almost
data['name'] = name
data['age'] = age
data['weight'] = weight
print(data)
# this is cool as it is now arranged as a np object and arragend in one contigious block of memory
# moreover all this stuff is nw accessible by name/index
# all names eg
data['name']
# This allows us to to cool filtering
# get names where age is under 30
data[data['age'] < 30]['name']
# Pandas basically greatly expands on the functionality and usefulness of these structures
# with proper input/output control and stuff
## Super advanced compound types
# it is possile to define even more advanced compound types. For example you fcan create a type
# where each element contains an array or matrix of values
# here will create a data type with a mat component consisting of a 3x3 floating point matrix
# wow
tp = np.dtype([('id', 'i8'), ('mat', 'f8', (3,3))])
X = np.zeros(1,dtype = tp)
print(X[0])
print(X['mat'][0])
|
|
79d1ab43d187d8ba1350965673b930fa0b3879b6
|
rosbridge_suite/rosbridge_library/src/rosbridge_library/internal/pngcompression.py
|
rosbridge_suite/rosbridge_library/src/rosbridge_library/internal/pngcompression.py
|
from pypng.code import png
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
bytes = list(bytearray(string))
png_image = png.from_array([bytes], 'L')
buff = StringIO()
png_image.save(buff)
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
from pypng.code import png
from PIL import Image
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
i = Image.fromstring('L', (len(string), 1), string)
buff = StringIO()
i.save(buff, "png")
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
Use python imaging library to encode PNG instead of pypng
|
Use python imaging library to encode PNG instead of pypng
|
Python
|
bsd-3-clause
|
WangRobo/rosbridge_suite,vladrotea/rosbridge_suite,kbendick/rosbridge_suite,vladrotea/rosbridge_suite,RobotWebTools/rosbridge_suite,DLu/rosbridge_suite,SNU-Sigma/rosbridge_suite,DLu/rosbridge_suite,DLu/rosbridge_suite,mayfieldrobotics/rosbridge_suite,mayfieldrobotics/rosbridge_suite,WangRobo/rosbridge_suite,SNU-Sigma/rosbridge_suite,WangRobo/rosbridge_suite,kbendick/rosbridge_suite,SNU-Sigma/rosbridge_suite,kbendick/rosbridge_suite,SNU-Sigma/rosbridge_suite,vladrotea/rosbridge_suite,mayfieldrobotics/rosbridge_suite
|
from pypng.code import png
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
bytes = list(bytearray(string))
png_image = png.from_array([bytes], 'L')
buff = StringIO()
png_image.save(buff)
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
Use python imaging library to encode PNG instead of pypng
|
from pypng.code import png
from PIL import Image
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
i = Image.fromstring('L', (len(string), 1), string)
buff = StringIO()
i.save(buff, "png")
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
<commit_before>from pypng.code import png
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
bytes = list(bytearray(string))
png_image = png.from_array([bytes], 'L')
buff = StringIO()
png_image.save(buff)
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
<commit_msg>Use python imaging library to encode PNG instead of pypng<commit_after>
|
from pypng.code import png
from PIL import Image
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
i = Image.fromstring('L', (len(string), 1), string)
buff = StringIO()
i.save(buff, "png")
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
from pypng.code import png
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
bytes = list(bytearray(string))
png_image = png.from_array([bytes], 'L')
buff = StringIO()
png_image.save(buff)
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
Use python imaging library to encode PNG instead of pypngfrom pypng.code import png
from PIL import Image
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
i = Image.fromstring('L', (len(string), 1), string)
buff = StringIO()
i.save(buff, "png")
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
<commit_before>from pypng.code import png
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
bytes = list(bytearray(string))
png_image = png.from_array([bytes], 'L')
buff = StringIO()
png_image.save(buff)
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
<commit_msg>Use python imaging library to encode PNG instead of pypng<commit_after>from pypng.code import png
from PIL import Image
from base64 import standard_b64encode, standard_b64decode
from StringIO import StringIO
def encode(string):
""" PNG-compress the string, return the b64 encoded bytes """
i = Image.fromstring('L', (len(string), 1), string)
buff = StringIO()
i.save(buff, "png")
encoded = standard_b64encode(buff.getvalue())
return encoded
def decode(string):
""" b64 decode the string, then PNG-decompress """
decoded = standard_b64decode(string)
reader = png.Reader(bytes=decoded)
width, height, rawpixels, metadata = reader.read()
pixels = list(rawpixels)[0]
return str(bytearray(pixels))
|
f17ce5bf794040d4193b9b3d276e2784a41dce0a
|
zou/app/models/output_type.py
|
zou/app/models/output_type.py
|
from zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class OutputType(db.Model, BaseMixin, SerializerMixin):
name = db.Column(db.String(40), unique=True, nullable=False)
short_name = db.Column(db.String(20), nullable=False)
|
Add model for output type
|
Add model for output type
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add model for output type
|
from zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class OutputType(db.Model, BaseMixin, SerializerMixin):
name = db.Column(db.String(40), unique=True, nullable=False)
short_name = db.Column(db.String(20), nullable=False)
|
<commit_before><commit_msg>Add model for output type<commit_after>
|
from zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class OutputType(db.Model, BaseMixin, SerializerMixin):
name = db.Column(db.String(40), unique=True, nullable=False)
short_name = db.Column(db.String(20), nullable=False)
|
Add model for output typefrom zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class OutputType(db.Model, BaseMixin, SerializerMixin):
name = db.Column(db.String(40), unique=True, nullable=False)
short_name = db.Column(db.String(20), nullable=False)
|
<commit_before><commit_msg>Add model for output type<commit_after>from zou.app import db
from zou.app.models.serializer import SerializerMixin
from zou.app.models.base import BaseMixin
class OutputType(db.Model, BaseMixin, SerializerMixin):
name = db.Column(db.String(40), unique=True, nullable=False)
short_name = db.Column(db.String(20), nullable=False)
|
|
9ce0806a5e4262b299e3bd98c8a722a309734dc3
|
examples/cplearning.py
|
examples/cplearning.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from semcplogic.dataset import Dataset
from semcplogic.cpmodel import TableResultInterpreter
from semcplogic.cpcompiler import CPCompiler, ProblogExample
from semcplogic.problogresult import GnuplotDrawer
import pprint
#We use "breaks" because break is a reserved keyword in problog
#We add the probabilities here, but they get removed on compiling
cpcode = """0.8 : breaks <-- throws(mary).
0.6 : breaks <-- throws(john).
0.5 : throws(mary) <-- true.
1 : throws(john) <-- true.
""".split("\n")
cc = CPCompiler()
data = []
#Two ideas that don't work
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"throws(john)"))
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john),problog_not(breaks)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),problog_not(breaks)"))
#To make it work, be sure the proofs only contain positive information
#if necessary, marginalize variables
data.append(ProblogExample(0,"breaks",weight=0.76))
data.append(ProblogExample(1,"throws(mary)",weight=0.5))
data.append(ProblogExample(2,"throws(john)",weight=1))
data.append(ProblogExample(3,"throws(john),throws(mary),breaks",weight=0.46))
#We give the weights, so don't calculate them
cc.weight=False
runmodel = cc.compileCode(cpcode,otherexamples=data)
runmodel.iterations = 10
result = runmodel.run()
g = GnuplotDrawer()
g.draw(result)
import pdb;pdb.set_trace()
|
Add example of CP-logic parameter learning
|
Add example of CP-logic parameter learning
|
Python
|
bsd-2-clause
|
verhoevenv/semcplogic,verhoevenv/semcplogic,verhoevenv/semcplogic
|
Add example of CP-logic parameter learning
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from semcplogic.dataset import Dataset
from semcplogic.cpmodel import TableResultInterpreter
from semcplogic.cpcompiler import CPCompiler, ProblogExample
from semcplogic.problogresult import GnuplotDrawer
import pprint
#We use "breaks" because break is a reserved keyword in problog
#We add the probabilities here, but they get removed on compiling
cpcode = """0.8 : breaks <-- throws(mary).
0.6 : breaks <-- throws(john).
0.5 : throws(mary) <-- true.
1 : throws(john) <-- true.
""".split("\n")
cc = CPCompiler()
data = []
#Two ideas that don't work
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"throws(john)"))
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john),problog_not(breaks)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),problog_not(breaks)"))
#To make it work, be sure the proofs only contain positive information
#if necessary, marginalize variables
data.append(ProblogExample(0,"breaks",weight=0.76))
data.append(ProblogExample(1,"throws(mary)",weight=0.5))
data.append(ProblogExample(2,"throws(john)",weight=1))
data.append(ProblogExample(3,"throws(john),throws(mary),breaks",weight=0.46))
#We give the weights, so don't calculate them
cc.weight=False
runmodel = cc.compileCode(cpcode,otherexamples=data)
runmodel.iterations = 10
result = runmodel.run()
g = GnuplotDrawer()
g.draw(result)
import pdb;pdb.set_trace()
|
<commit_before><commit_msg>Add example of CP-logic parameter learning<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from semcplogic.dataset import Dataset
from semcplogic.cpmodel import TableResultInterpreter
from semcplogic.cpcompiler import CPCompiler, ProblogExample
from semcplogic.problogresult import GnuplotDrawer
import pprint
#We use "breaks" because break is a reserved keyword in problog
#We add the probabilities here, but they get removed on compiling
cpcode = """0.8 : breaks <-- throws(mary).
0.6 : breaks <-- throws(john).
0.5 : throws(mary) <-- true.
1 : throws(john) <-- true.
""".split("\n")
cc = CPCompiler()
data = []
#Two ideas that don't work
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"throws(john)"))
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john),problog_not(breaks)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),problog_not(breaks)"))
#To make it work, be sure the proofs only contain positive information
#if necessary, marginalize variables
data.append(ProblogExample(0,"breaks",weight=0.76))
data.append(ProblogExample(1,"throws(mary)",weight=0.5))
data.append(ProblogExample(2,"throws(john)",weight=1))
data.append(ProblogExample(3,"throws(john),throws(mary),breaks",weight=0.46))
#We give the weights, so don't calculate them
cc.weight=False
runmodel = cc.compileCode(cpcode,otherexamples=data)
runmodel.iterations = 10
result = runmodel.run()
g = GnuplotDrawer()
g.draw(result)
import pdb;pdb.set_trace()
|
Add example of CP-logic parameter learning#!/usr/bin/python
# -*- coding: utf-8 -*-
from semcplogic.dataset import Dataset
from semcplogic.cpmodel import TableResultInterpreter
from semcplogic.cpcompiler import CPCompiler, ProblogExample
from semcplogic.problogresult import GnuplotDrawer
import pprint
#We use "breaks" because break is a reserved keyword in problog
#We add the probabilities here, but they get removed on compiling
cpcode = """0.8 : breaks <-- throws(mary).
0.6 : breaks <-- throws(john).
0.5 : throws(mary) <-- true.
1 : throws(john) <-- true.
""".split("\n")
cc = CPCompiler()
data = []
#Two ideas that don't work
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"throws(john)"))
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john),problog_not(breaks)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),problog_not(breaks)"))
#To make it work, be sure the proofs only contain positive information
#if necessary, marginalize variables
data.append(ProblogExample(0,"breaks",weight=0.76))
data.append(ProblogExample(1,"throws(mary)",weight=0.5))
data.append(ProblogExample(2,"throws(john)",weight=1))
data.append(ProblogExample(3,"throws(john),throws(mary),breaks",weight=0.46))
#We give the weights, so don't calculate them
cc.weight=False
runmodel = cc.compileCode(cpcode,otherexamples=data)
runmodel.iterations = 10
result = runmodel.run()
g = GnuplotDrawer()
g.draw(result)
import pdb;pdb.set_trace()
|
<commit_before><commit_msg>Add example of CP-logic parameter learning<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
from semcplogic.dataset import Dataset
from semcplogic.cpmodel import TableResultInterpreter
from semcplogic.cpcompiler import CPCompiler, ProblogExample
from semcplogic.problogresult import GnuplotDrawer
import pprint
#We use "breaks" because break is a reserved keyword in problog
#We add the probabilities here, but they get removed on compiling
cpcode = """0.8 : breaks <-- throws(mary).
0.6 : breaks <-- throws(john).
0.5 : throws(mary) <-- true.
1 : throws(john) <-- true.
""".split("\n")
cc = CPCompiler()
data = []
#Two ideas that don't work
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"throws(john)"))
#i = 0
#for i in xrange(i+1,i+1+23):
# data.append(ProblogExample(i,"throws(mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+2):
# data.append(ProblogExample(i,"throws(mary),throws(john),problog_not(breaks)"))
#for i in xrange(i+1,i+1+15):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),breaks"))
#for i in xrange(i+1,i+1+10):
# data.append(ProblogExample(i,"problog_not(throws_mary),throws(john),problog_not(breaks)"))
#To make it work, be sure the proofs only contain positive information
#if necessary, marginalize variables
data.append(ProblogExample(0,"breaks",weight=0.76))
data.append(ProblogExample(1,"throws(mary)",weight=0.5))
data.append(ProblogExample(2,"throws(john)",weight=1))
data.append(ProblogExample(3,"throws(john),throws(mary),breaks",weight=0.46))
#We give the weights, so don't calculate them
cc.weight=False
runmodel = cc.compileCode(cpcode,otherexamples=data)
runmodel.iterations = 10
result = runmodel.run()
g = GnuplotDrawer()
g.draw(result)
import pdb;pdb.set_trace()
|
|
e164ff0feda4be50e9d63695881b4d28a7702cc6
|
examples/Gauss_example.py
|
examples/Gauss_example.py
|
import sys
import time
import numpy as np
from abcpy.core import *
from abcpy.distributions import *
from distributed import Client
from dask.dot import dot_graph
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
def normal_simu(n, mu, prng=None, latents=None):
if latents is None:
if prng is None:
prng = np.random.RandomState()
latents = prng.randn(n)
u = mu + latents
y = u
return y
def mean(y):
mu = np.mean(y, axis=1, keepdims=True)
return mu
def distance(x, y):
d = np.linalg.norm( np.array(x) - np.array(y), ord=2, axis=0)
return d
def main():
n = 1000
mu = 1.6
# Set up observed data y
latents = np.random.randn(n)
y = normal_simu(n, mu, latents=latents)
# Plot
plt.hist(y)
# Set up the simulator
simulator = partial(normal_simu, n)
# Specify the graphical model
mu = Prior('mu', 'uniform', 0, 4)
Y = Simulator('normal_simu', simulator, mu, observed=y)
S1 = Summary('S1', mean, Y)
d = Discrepancy('d', distance, S1)
# Specify the number of simulations
N = 1000000
# Time and run parallel
s = time.time()
dists = d.generate(N, batch_size=10000).compute()
print("Elapsed time %d sec" % (time.time() - s))
# Take the parameters
mu_sample = mu.generate(N).compute()
# Set threshold and reject to get posteriors
eps = 0.01
accepts = dists < eps
mu_post = mu_sample[accepts]
print("Number of accepted samples %d" % sum(accepts))
if len(mu_post) > 0:
print("Posterior for mu")
plt.hist(mu_post, bins=20)
else:
print("No accepted samples")
if __name__ == "__main__":
main()
|
Add script variant of the Gauss example
|
Add script variant of the Gauss example
|
Python
|
mit
|
akangasr/elfi
|
Add script variant of the Gauss example
|
import sys
import time
import numpy as np
from abcpy.core import *
from abcpy.distributions import *
from distributed import Client
from dask.dot import dot_graph
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
def normal_simu(n, mu, prng=None, latents=None):
if latents is None:
if prng is None:
prng = np.random.RandomState()
latents = prng.randn(n)
u = mu + latents
y = u
return y
def mean(y):
mu = np.mean(y, axis=1, keepdims=True)
return mu
def distance(x, y):
d = np.linalg.norm( np.array(x) - np.array(y), ord=2, axis=0)
return d
def main():
n = 1000
mu = 1.6
# Set up observed data y
latents = np.random.randn(n)
y = normal_simu(n, mu, latents=latents)
# Plot
plt.hist(y)
# Set up the simulator
simulator = partial(normal_simu, n)
# Specify the graphical model
mu = Prior('mu', 'uniform', 0, 4)
Y = Simulator('normal_simu', simulator, mu, observed=y)
S1 = Summary('S1', mean, Y)
d = Discrepancy('d', distance, S1)
# Specify the number of simulations
N = 1000000
# Time and run parallel
s = time.time()
dists = d.generate(N, batch_size=10000).compute()
print("Elapsed time %d sec" % (time.time() - s))
# Take the parameters
mu_sample = mu.generate(N).compute()
# Set threshold and reject to get posteriors
eps = 0.01
accepts = dists < eps
mu_post = mu_sample[accepts]
print("Number of accepted samples %d" % sum(accepts))
if len(mu_post) > 0:
print("Posterior for mu")
plt.hist(mu_post, bins=20)
else:
print("No accepted samples")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script variant of the Gauss example<commit_after>
|
import sys
import time
import numpy as np
from abcpy.core import *
from abcpy.distributions import *
from distributed import Client
from dask.dot import dot_graph
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
def normal_simu(n, mu, prng=None, latents=None):
if latents is None:
if prng is None:
prng = np.random.RandomState()
latents = prng.randn(n)
u = mu + latents
y = u
return y
def mean(y):
mu = np.mean(y, axis=1, keepdims=True)
return mu
def distance(x, y):
d = np.linalg.norm( np.array(x) - np.array(y), ord=2, axis=0)
return d
def main():
n = 1000
mu = 1.6
# Set up observed data y
latents = np.random.randn(n)
y = normal_simu(n, mu, latents=latents)
# Plot
plt.hist(y)
# Set up the simulator
simulator = partial(normal_simu, n)
# Specify the graphical model
mu = Prior('mu', 'uniform', 0, 4)
Y = Simulator('normal_simu', simulator, mu, observed=y)
S1 = Summary('S1', mean, Y)
d = Discrepancy('d', distance, S1)
# Specify the number of simulations
N = 1000000
# Time and run parallel
s = time.time()
dists = d.generate(N, batch_size=10000).compute()
print("Elapsed time %d sec" % (time.time() - s))
# Take the parameters
mu_sample = mu.generate(N).compute()
# Set threshold and reject to get posteriors
eps = 0.01
accepts = dists < eps
mu_post = mu_sample[accepts]
print("Number of accepted samples %d" % sum(accepts))
if len(mu_post) > 0:
print("Posterior for mu")
plt.hist(mu_post, bins=20)
else:
print("No accepted samples")
if __name__ == "__main__":
main()
|
Add script variant of the Gauss exampleimport sys
import time
import numpy as np
from abcpy.core import *
from abcpy.distributions import *
from distributed import Client
from dask.dot import dot_graph
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
def normal_simu(n, mu, prng=None, latents=None):
if latents is None:
if prng is None:
prng = np.random.RandomState()
latents = prng.randn(n)
u = mu + latents
y = u
return y
def mean(y):
mu = np.mean(y, axis=1, keepdims=True)
return mu
def distance(x, y):
d = np.linalg.norm( np.array(x) - np.array(y), ord=2, axis=0)
return d
def main():
n = 1000
mu = 1.6
# Set up observed data y
latents = np.random.randn(n)
y = normal_simu(n, mu, latents=latents)
# Plot
plt.hist(y)
# Set up the simulator
simulator = partial(normal_simu, n)
# Specify the graphical model
mu = Prior('mu', 'uniform', 0, 4)
Y = Simulator('normal_simu', simulator, mu, observed=y)
S1 = Summary('S1', mean, Y)
d = Discrepancy('d', distance, S1)
# Specify the number of simulations
N = 1000000
# Time and run parallel
s = time.time()
dists = d.generate(N, batch_size=10000).compute()
print("Elapsed time %d sec" % (time.time() - s))
# Take the parameters
mu_sample = mu.generate(N).compute()
# Set threshold and reject to get posteriors
eps = 0.01
accepts = dists < eps
mu_post = mu_sample[accepts]
print("Number of accepted samples %d" % sum(accepts))
if len(mu_post) > 0:
print("Posterior for mu")
plt.hist(mu_post, bins=20)
else:
print("No accepted samples")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script variant of the Gauss example<commit_after>import sys
import time
import numpy as np
from abcpy.core import *
from abcpy.distributions import *
from distributed import Client
from dask.dot import dot_graph
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
def normal_simu(n, mu, prng=None, latents=None):
if latents is None:
if prng is None:
prng = np.random.RandomState()
latents = prng.randn(n)
u = mu + latents
y = u
return y
def mean(y):
mu = np.mean(y, axis=1, keepdims=True)
return mu
def distance(x, y):
d = np.linalg.norm( np.array(x) - np.array(y), ord=2, axis=0)
return d
def main():
n = 1000
mu = 1.6
# Set up observed data y
latents = np.random.randn(n)
y = normal_simu(n, mu, latents=latents)
# Plot
plt.hist(y)
# Set up the simulator
simulator = partial(normal_simu, n)
# Specify the graphical model
mu = Prior('mu', 'uniform', 0, 4)
Y = Simulator('normal_simu', simulator, mu, observed=y)
S1 = Summary('S1', mean, Y)
d = Discrepancy('d', distance, S1)
# Specify the number of simulations
N = 1000000
# Time and run parallel
s = time.time()
dists = d.generate(N, batch_size=10000).compute()
print("Elapsed time %d sec" % (time.time() - s))
# Take the parameters
mu_sample = mu.generate(N).compute()
# Set threshold and reject to get posteriors
eps = 0.01
accepts = dists < eps
mu_post = mu_sample[accepts]
print("Number of accepted samples %d" % sum(accepts))
if len(mu_post) > 0:
print("Posterior for mu")
plt.hist(mu_post, bins=20)
else:
print("No accepted samples")
if __name__ == "__main__":
main()
|
|
678c9849e4763d645f571a0b558394afa04477c5
|
bathy/make_okada.py
|
bathy/make_okada.py
|
#!/usr/bin/env python
import sys
import numpy
import matplotlib.pyplot as plt
import clawpack.geoclaw.okada2 as okada
import clawpack.geoclaw.topo as topo
import clawpack.geoclaw.dtopotools as dtopotools
import clawpack.visclaw.colormaps as colormaps
plot_fault = False
if len(sys.argv) > 1:
if sys.argv[1] == "plot":
plot_fault = True
# Base subfault
# M 7.8
# L 90
# W 70
# depth of hypocenter 20 km (it is written in the text)
# coordinates of epicenter: 17.110 -99.100 (taken from: http://usuarios.geofisica.unam.mx/vladimir/sismos/100a%F1os.html; here they give a hypocentral depth of 33, but let's try with 20, as mentioned aove)
# strike 296
# dip 25
# rake 90
# top of the fault plane at 12 km.
# They use a value of rigidity of 5 X 10^11 dyna/cm^2 -> 5e11 * 1e2 = 5e13
subfaults = []
test_slips = [165, 200]
for (n, slip) in enumerate(test_slips):
subfaults.append(topo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"}))
subfaults[-1].coordinates = [-99.25, 16.6]
subfaults[-1].coordinate_specification = "top center"
subfaults[-1].slip = slip
subfaults[-1].rake = 90.0
subfaults[-1].strike = 296
subfaults[-1].dip = 25.0
subfaults[-1].depth = 12.0
subfaults[-1].dimensions = (90.0, 70.0)
subfaults[-1].write('./okada_1957_du%s.tt3' % slip)
print "Subfault Characteristics:"
print " Mw = %s" % str(subfaults[-1].Mw(mu=5e11))
print " du = %s" % subfaults[-1].slip
print " Containing Rect = %s" % subfaults[-1].containing_rect()
if plot_fault:
fig = plt.figure()
for (n,subfault) in enumerate(subfaults):
axes = fig.add_subplot(1,2,n+1)
subfault.plot(axes)
subfault.plot_fault_rect(axes, color='k')
subfault.plot_rake(axes)
axes.set_title(r"$M_w = %s$, $\Delta u = %s$ cm" % (subfault.Mw(mu=5e11),
subfault.slip))
plt.show()
|
Add simple script for creating Okada deformations
|
Add simple script for creating Okada deformations
|
Python
|
mit
|
mandli/compsyn-geoclaw
|
Add simple script for creating Okada deformations
|
#!/usr/bin/env python
import sys
import numpy
import matplotlib.pyplot as plt
import clawpack.geoclaw.okada2 as okada
import clawpack.geoclaw.topo as topo
import clawpack.geoclaw.dtopotools as dtopotools
import clawpack.visclaw.colormaps as colormaps
plot_fault = False
if len(sys.argv) > 1:
if sys.argv[1] == "plot":
plot_fault = True
# Base subfault
# M 7.8
# L 90
# W 70
# depth of hypocenter 20 km (it is written in the text)
# coordinates of epicenter: 17.110 -99.100 (taken from: http://usuarios.geofisica.unam.mx/vladimir/sismos/100a%F1os.html; here they give a hypocentral depth of 33, but let's try with 20, as mentioned aove)
# strike 296
# dip 25
# rake 90
# top of the fault plane at 12 km.
# They use a value of rigidity of 5 X 10^11 dyna/cm^2 -> 5e11 * 1e2 = 5e13
subfaults = []
test_slips = [165, 200]
for (n, slip) in enumerate(test_slips):
subfaults.append(topo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"}))
subfaults[-1].coordinates = [-99.25, 16.6]
subfaults[-1].coordinate_specification = "top center"
subfaults[-1].slip = slip
subfaults[-1].rake = 90.0
subfaults[-1].strike = 296
subfaults[-1].dip = 25.0
subfaults[-1].depth = 12.0
subfaults[-1].dimensions = (90.0, 70.0)
subfaults[-1].write('./okada_1957_du%s.tt3' % slip)
print "Subfault Characteristics:"
print " Mw = %s" % str(subfaults[-1].Mw(mu=5e11))
print " du = %s" % subfaults[-1].slip
print " Containing Rect = %s" % subfaults[-1].containing_rect()
if plot_fault:
fig = plt.figure()
for (n,subfault) in enumerate(subfaults):
axes = fig.add_subplot(1,2,n+1)
subfault.plot(axes)
subfault.plot_fault_rect(axes, color='k')
subfault.plot_rake(axes)
axes.set_title(r"$M_w = %s$, $\Delta u = %s$ cm" % (subfault.Mw(mu=5e11),
subfault.slip))
plt.show()
|
<commit_before><commit_msg>Add simple script for creating Okada deformations<commit_after>
|
#!/usr/bin/env python
import sys
import numpy
import matplotlib.pyplot as plt
import clawpack.geoclaw.okada2 as okada
import clawpack.geoclaw.topo as topo
import clawpack.geoclaw.dtopotools as dtopotools
import clawpack.visclaw.colormaps as colormaps
plot_fault = False
if len(sys.argv) > 1:
if sys.argv[1] == "plot":
plot_fault = True
# Base subfault
# M 7.8
# L 90
# W 70
# depth of hypocenter 20 km (it is written in the text)
# coordinates of epicenter: 17.110 -99.100 (taken from: http://usuarios.geofisica.unam.mx/vladimir/sismos/100a%F1os.html; here they give a hypocentral depth of 33, but let's try with 20, as mentioned aove)
# strike 296
# dip 25
# rake 90
# top of the fault plane at 12 km.
# They use a value of rigidity of 5 X 10^11 dyna/cm^2 -> 5e11 * 1e2 = 5e13
subfaults = []
test_slips = [165, 200]
for (n, slip) in enumerate(test_slips):
subfaults.append(topo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"}))
subfaults[-1].coordinates = [-99.25, 16.6]
subfaults[-1].coordinate_specification = "top center"
subfaults[-1].slip = slip
subfaults[-1].rake = 90.0
subfaults[-1].strike = 296
subfaults[-1].dip = 25.0
subfaults[-1].depth = 12.0
subfaults[-1].dimensions = (90.0, 70.0)
subfaults[-1].write('./okada_1957_du%s.tt3' % slip)
print "Subfault Characteristics:"
print " Mw = %s" % str(subfaults[-1].Mw(mu=5e11))
print " du = %s" % subfaults[-1].slip
print " Containing Rect = %s" % subfaults[-1].containing_rect()
if plot_fault:
fig = plt.figure()
for (n,subfault) in enumerate(subfaults):
axes = fig.add_subplot(1,2,n+1)
subfault.plot(axes)
subfault.plot_fault_rect(axes, color='k')
subfault.plot_rake(axes)
axes.set_title(r"$M_w = %s$, $\Delta u = %s$ cm" % (subfault.Mw(mu=5e11),
subfault.slip))
plt.show()
|
Add simple script for creating Okada deformations#!/usr/bin/env python
import sys
import numpy
import matplotlib.pyplot as plt
import clawpack.geoclaw.okada2 as okada
import clawpack.geoclaw.topo as topo
import clawpack.geoclaw.dtopotools as dtopotools
import clawpack.visclaw.colormaps as colormaps
plot_fault = False
if len(sys.argv) > 1:
if sys.argv[1] == "plot":
plot_fault = True
# Base subfault
# M 7.8
# L 90
# W 70
# depth of hypocenter 20 km (it is written in the text)
# coordinates of epicenter: 17.110 -99.100 (taken from: http://usuarios.geofisica.unam.mx/vladimir/sismos/100a%F1os.html; here they give a hypocentral depth of 33, but let's try with 20, as mentioned aove)
# strike 296
# dip 25
# rake 90
# top of the fault plane at 12 km.
# They use a value of rigidity of 5 X 10^11 dyna/cm^2 -> 5e11 * 1e2 = 5e13
subfaults = []
test_slips = [165, 200]
for (n, slip) in enumerate(test_slips):
subfaults.append(topo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"}))
subfaults[-1].coordinates = [-99.25, 16.6]
subfaults[-1].coordinate_specification = "top center"
subfaults[-1].slip = slip
subfaults[-1].rake = 90.0
subfaults[-1].strike = 296
subfaults[-1].dip = 25.0
subfaults[-1].depth = 12.0
subfaults[-1].dimensions = (90.0, 70.0)
subfaults[-1].write('./okada_1957_du%s.tt3' % slip)
print "Subfault Characteristics:"
print " Mw = %s" % str(subfaults[-1].Mw(mu=5e11))
print " du = %s" % subfaults[-1].slip
print " Containing Rect = %s" % subfaults[-1].containing_rect()
if plot_fault:
fig = plt.figure()
for (n,subfault) in enumerate(subfaults):
axes = fig.add_subplot(1,2,n+1)
subfault.plot(axes)
subfault.plot_fault_rect(axes, color='k')
subfault.plot_rake(axes)
axes.set_title(r"$M_w = %s$, $\Delta u = %s$ cm" % (subfault.Mw(mu=5e11),
subfault.slip))
plt.show()
|
<commit_before><commit_msg>Add simple script for creating Okada deformations<commit_after>#!/usr/bin/env python
import sys
import numpy
import matplotlib.pyplot as plt
import clawpack.geoclaw.okada2 as okada
import clawpack.geoclaw.topo as topo
import clawpack.geoclaw.dtopotools as dtopotools
import clawpack.visclaw.colormaps as colormaps
plot_fault = False
if len(sys.argv) > 1:
if sys.argv[1] == "plot":
plot_fault = True
# Base subfault
# M 7.8
# L 90
# W 70
# depth of hypocenter 20 km (it is written in the text)
# coordinates of epicenter: 17.110 -99.100 (taken from: http://usuarios.geofisica.unam.mx/vladimir/sismos/100a%F1os.html; here they give a hypocentral depth of 33, but let's try with 20, as mentioned aove)
# strike 296
# dip 25
# rake 90
# top of the fault plane at 12 km.
# They use a value of rigidity of 5 X 10^11 dyna/cm^2 -> 5e11 * 1e2 = 5e13
subfaults = []
test_slips = [165, 200]
for (n, slip) in enumerate(test_slips):
subfaults.append(topo.SubFault(units={"slip":"cm", "dimensions":"km", "depth":"km"}))
subfaults[-1].coordinates = [-99.25, 16.6]
subfaults[-1].coordinate_specification = "top center"
subfaults[-1].slip = slip
subfaults[-1].rake = 90.0
subfaults[-1].strike = 296
subfaults[-1].dip = 25.0
subfaults[-1].depth = 12.0
subfaults[-1].dimensions = (90.0, 70.0)
subfaults[-1].write('./okada_1957_du%s.tt3' % slip)
print "Subfault Characteristics:"
print " Mw = %s" % str(subfaults[-1].Mw(mu=5e11))
print " du = %s" % subfaults[-1].slip
print " Containing Rect = %s" % subfaults[-1].containing_rect()
if plot_fault:
fig = plt.figure()
for (n,subfault) in enumerate(subfaults):
axes = fig.add_subplot(1,2,n+1)
subfault.plot(axes)
subfault.plot_fault_rect(axes, color='k')
subfault.plot_rake(axes)
axes.set_title(r"$M_w = %s$, $\Delta u = %s$ cm" % (subfault.Mw(mu=5e11),
subfault.slip))
plt.show()
|
|
55bf12c80fe14b317fdc27d74c27642a144d4530
|
tests/test_generate_files.py
|
tests/test_generate_files.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
"""
def test_generate_files_nontemplated_exception():
pass
|
Create new test module to convert the first test
|
Create new test module to convert the first test
|
Python
|
bsd-3-clause
|
ramiroluz/cookiecutter,dajose/cookiecutter,lucius-feng/cookiecutter,terryjbates/cookiecutter,tylerdave/cookiecutter,sp1rs/cookiecutter,takeflight/cookiecutter,nhomar/cookiecutter,christabor/cookiecutter,willingc/cookiecutter,hackebrot/cookiecutter,luzfcb/cookiecutter,jhermann/cookiecutter,ramiroluz/cookiecutter,pjbull/cookiecutter,lgp171188/cookiecutter,venumech/cookiecutter,vincentbernat/cookiecutter,stevepiercy/cookiecutter,0k/cookiecutter,michaeljoseph/cookiecutter,dajose/cookiecutter,lgp171188/cookiecutter,christabor/cookiecutter,benthomasson/cookiecutter,takeflight/cookiecutter,luzfcb/cookiecutter,audreyr/cookiecutter,0k/cookiecutter,michaeljoseph/cookiecutter,atlassian/cookiecutter,benthomasson/cookiecutter,Vauxoo/cookiecutter,kkujawinski/cookiecutter,tylerdave/cookiecutter,moi65/cookiecutter,foodszhang/cookiecutter,drgarcia1986/cookiecutter,drgarcia1986/cookiecutter,cguardia/cookiecutter,pjbull/cookiecutter,vincentbernat/cookiecutter,venumech/cookiecutter,willingc/cookiecutter,nhomar/cookiecutter,janusnic/cookiecutter,cichm/cookiecutter,terryjbates/cookiecutter,moi65/cookiecutter,ionelmc/cookiecutter,Vauxoo/cookiecutter,hackebrot/cookiecutter,letolab/cookiecutter,letolab/cookiecutter,cichm/cookiecutter,agconti/cookiecutter,vintasoftware/cookiecutter,foodszhang/cookiecutter,atlassian/cookiecutter,sp1rs/cookiecutter,agconti/cookiecutter,janusnic/cookiecutter,jhermann/cookiecutter,vintasoftware/cookiecutter,stevepiercy/cookiecutter,ionelmc/cookiecutter,Springerle/cookiecutter,lucius-feng/cookiecutter,cguardia/cookiecutter,kkujawinski/cookiecutter,audreyr/cookiecutter,Springerle/cookiecutter
|
Create new test module to convert the first test
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
"""
def test_generate_files_nontemplated_exception():
pass
|
<commit_before><commit_msg>Create new test module to convert the first test<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
"""
def test_generate_files_nontemplated_exception():
pass
|
Create new test module to convert the first test#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
"""
def test_generate_files_nontemplated_exception():
pass
|
<commit_before><commit_msg>Create new test module to convert the first test<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_files
-------------------
Test formerly known from a unittest residing in test_generate.py named
TestGenerateFiles.test_generate_files_nontemplated_exception
"""
def test_generate_files_nontemplated_exception():
pass
|
|
29e9d3a5fbac2730acd4c2115399556b09fb83e5
|
tools/psycopg2_experiment.py
|
tools/psycopg2_experiment.py
|
#!/usr/bin/env python
'''
A CLI tool for formulating an Abba url using data from PostgreSQL
'''
from __future__ import print_function
import argparse
import psycopg2
import sys
TOOL_DESCRIPTION = '''
Formulates an Abba url using data from PostgreSQL
The query passed to this tool should return three columns, which will
become the label, success count, and trial count in Abba. If the query
flag is not specified, the query will be taken from standard input.
Note that the db parameters are optional, and if not provided psycopg2
will attempt to connect to the default locally-hosted database.
'''
def parse_arguments():
'''
Parse the arguments from the command line for this program
'''
parser = argparse.ArgumentParser(
description=TOOL_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-d', '--db_params', metavar='PARAMS',
help='A libpq connection string with params for the target database'
)
parser.add_argument(
'-q', '--query',
help='The query which will provide the data for Abba',
)
return parser.parse_args()
def build_url_from_database_query(dsn, query):
'''
Build an Abba URL using data from a PostgreSQL connection and query
'''
url_template = 'http://thumbtack.com/labs/abba#{}'
cursor = psycopg2.connect(dsn).cursor()
cursor.execute(query)
if not cursor.rowcount:
return url_template.format('')
rows = cursor.fetchall()
if len(rows[0]) != 3:
raise ValueError('Query does not return 3 columns of data')
groups_querystr = '&'.join('{}={}%2C{}'.format(*row) for row in rows)
return url_template.format(groups_querystr)
def main():
args = parse_arguments()
query = args.query if args.query is not None else sys.stdin.read()
params = args.db_params if args.db_params else ''
print(build_url_from_database_query(params, query))
if __name__ == '__main__':
main()
|
Add tool for pulling data from PostgreSQL to Abba
|
Add tool for pulling data from PostgreSQL to Abba
|
Python
|
bsd-3-clause
|
thumbtack/abba,thii/abbajs,thumbtack/abba,thumbtack/abba
|
Add tool for pulling data from PostgreSQL to Abba
|
#!/usr/bin/env python
'''
A CLI tool for formulating an Abba url using data from PostgreSQL
'''
from __future__ import print_function
import argparse
import psycopg2
import sys
TOOL_DESCRIPTION = '''
Formulates an Abba url using data from PostgreSQL
The query passed to this tool should return three columns, which will
become the label, success count, and trial count in Abba. If the query
flag is not specified, the query will be taken from standard input.
Note that the db parameters are optional, and if not provided psycopg2
will attempt to connect to the default locally-hosted database.
'''
def parse_arguments():
'''
Parse the arguments from the command line for this program
'''
parser = argparse.ArgumentParser(
description=TOOL_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-d', '--db_params', metavar='PARAMS',
help='A libpq connection string with params for the target database'
)
parser.add_argument(
'-q', '--query',
help='The query which will provide the data for Abba',
)
return parser.parse_args()
def build_url_from_database_query(dsn, query):
'''
Build an Abba URL using data from a PostgreSQL connection and query
'''
url_template = 'http://thumbtack.com/labs/abba#{}'
cursor = psycopg2.connect(dsn).cursor()
cursor.execute(query)
if not cursor.rowcount:
return url_template.format('')
rows = cursor.fetchall()
if len(rows[0]) != 3:
raise ValueError('Query does not return 3 columns of data')
groups_querystr = '&'.join('{}={}%2C{}'.format(*row) for row in rows)
return url_template.format(groups_querystr)
def main():
args = parse_arguments()
query = args.query if args.query is not None else sys.stdin.read()
params = args.db_params if args.db_params else ''
print(build_url_from_database_query(params, query))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool for pulling data from PostgreSQL to Abba<commit_after>
|
#!/usr/bin/env python
'''
A CLI tool for formulating an Abba url using data from PostgreSQL
'''
from __future__ import print_function
import argparse
import psycopg2
import sys
TOOL_DESCRIPTION = '''
Formulates an Abba url using data from PostgreSQL
The query passed to this tool should return three columns, which will
become the label, success count, and trial count in Abba. If the query
flag is not specified, the query will be taken from standard input.
Note that the db parameters are optional, and if not provided psycopg2
will attempt to connect to the default locally-hosted database.
'''
def parse_arguments():
'''
Parse the arguments from the command line for this program
'''
parser = argparse.ArgumentParser(
description=TOOL_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-d', '--db_params', metavar='PARAMS',
help='A libpq connection string with params for the target database'
)
parser.add_argument(
'-q', '--query',
help='The query which will provide the data for Abba',
)
return parser.parse_args()
def build_url_from_database_query(dsn, query):
'''
Build an Abba URL using data from a PostgreSQL connection and query
'''
url_template = 'http://thumbtack.com/labs/abba#{}'
cursor = psycopg2.connect(dsn).cursor()
cursor.execute(query)
if not cursor.rowcount:
return url_template.format('')
rows = cursor.fetchall()
if len(rows[0]) != 3:
raise ValueError('Query does not return 3 columns of data')
groups_querystr = '&'.join('{}={}%2C{}'.format(*row) for row in rows)
return url_template.format(groups_querystr)
def main():
args = parse_arguments()
query = args.query if args.query is not None else sys.stdin.read()
params = args.db_params if args.db_params else ''
print(build_url_from_database_query(params, query))
if __name__ == '__main__':
main()
|
Add tool for pulling data from PostgreSQL to Abba#!/usr/bin/env python
'''
A CLI tool for formulating an Abba url using data from PostgreSQL
'''
from __future__ import print_function
import argparse
import psycopg2
import sys
TOOL_DESCRIPTION = '''
Formulates an Abba url using data from PostgreSQL
The query passed to this tool should return three columns, which will
become the label, success count, and trial count in Abba. If the query
flag is not specified, the query will be taken from standard input.
Note that the db parameters are optional, and if not provided psycopg2
will attempt to connect to the default locally-hosted database.
'''
def parse_arguments():
'''
Parse the arguments from the command line for this program
'''
parser = argparse.ArgumentParser(
description=TOOL_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-d', '--db_params', metavar='PARAMS',
help='A libpq connection string with params for the target database'
)
parser.add_argument(
'-q', '--query',
help='The query which will provide the data for Abba',
)
return parser.parse_args()
def build_url_from_database_query(dsn, query):
'''
Build an Abba URL using data from a PostgreSQL connection and query
'''
url_template = 'http://thumbtack.com/labs/abba#{}'
cursor = psycopg2.connect(dsn).cursor()
cursor.execute(query)
if not cursor.rowcount:
return url_template.format('')
rows = cursor.fetchall()
if len(rows[0]) != 3:
raise ValueError('Query does not return 3 columns of data')
groups_querystr = '&'.join('{}={}%2C{}'.format(*row) for row in rows)
return url_template.format(groups_querystr)
def main():
args = parse_arguments()
query = args.query if args.query is not None else sys.stdin.read()
params = args.db_params if args.db_params else ''
print(build_url_from_database_query(params, query))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool for pulling data from PostgreSQL to Abba<commit_after>#!/usr/bin/env python
'''
A CLI tool for formulating an Abba url using data from PostgreSQL
'''
from __future__ import print_function
import argparse
import psycopg2
import sys
TOOL_DESCRIPTION = '''
Formulates an Abba url using data from PostgreSQL
The query passed to this tool should return three columns, which will
become the label, success count, and trial count in Abba. If the query
flag is not specified, the query will be taken from standard input.
Note that the db parameters are optional, and if not provided psycopg2
will attempt to connect to the default locally-hosted database.
'''
def parse_arguments():
'''
Parse the arguments from the command line for this program
'''
parser = argparse.ArgumentParser(
description=TOOL_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'-d', '--db_params', metavar='PARAMS',
help='A libpq connection string with params for the target database'
)
parser.add_argument(
'-q', '--query',
help='The query which will provide the data for Abba',
)
return parser.parse_args()
def build_url_from_database_query(dsn, query):
'''
Build an Abba URL using data from a PostgreSQL connection and query
'''
url_template = 'http://thumbtack.com/labs/abba#{}'
cursor = psycopg2.connect(dsn).cursor()
cursor.execute(query)
if not cursor.rowcount:
return url_template.format('')
rows = cursor.fetchall()
if len(rows[0]) != 3:
raise ValueError('Query does not return 3 columns of data')
groups_querystr = '&'.join('{}={}%2C{}'.format(*row) for row in rows)
return url_template.format(groups_querystr)
def main():
args = parse_arguments()
query = args.query if args.query is not None else sys.stdin.read()
params = args.db_params if args.db_params else ''
print(build_url_from_database_query(params, query))
if __name__ == '__main__':
main()
|
|
5f093d9230ac65e8c9a6d8d9a43e01d06729f260
|
phwatch.py
|
phwatch.py
|
import requests, datetime
#todo: argparse
acceptable = [7.5, 8.5] #good data interval
interval = 15 #minutes of samples to check (0.33 samples per minute)
threshold = 2 #number of points outside interval to look for
medium = "water quality"
metric = "pH"
def gethalbyname(data, name):
for d in data:
if d["name"] == name:
return d
baseurl = "http://lewaspedia.enge.vt.edu:8080"
r = requests.get(baseurl+"/sites/stroubles1/metricgroups")
d = gethalbyname(r.json, medium)["_embedded"]
tsurl = gethalbyname(d["metrics"], metric)["_links"]["timeseries"]["href"]
timestamp = (datetime.datetime.now()-datetime.timedelta(minutes=15)).isoformat()
ts = requests.get(baseurl+tsurl, params={'since': timestamp})
unacceptable = []
for value, time in ts.json["data"]:
if value > acceptable[1] or value < acceptable[0]:
unacceptable.append([value, time])
if len(unacceptable) > threshold:
print "{0} unacceptable data points! {1}".format(len(unacceptable), unacceptable)
exit(len(unacceptable))
|
Add script to watch a metric for unexpectedly high or low values
|
Add script to watch a metric for unexpectedly high or low values
|
Python
|
mit
|
LEWASatVT/lewas
|
Add script to watch a metric for unexpectedly high or low values
|
import requests, datetime
#todo: argparse
acceptable = [7.5, 8.5] #good data interval
interval = 15 #minutes of samples to check (0.33 samples per minute)
threshold = 2 #number of points outside interval to look for
medium = "water quality"
metric = "pH"
def gethalbyname(data, name):
for d in data:
if d["name"] == name:
return d
baseurl = "http://lewaspedia.enge.vt.edu:8080"
r = requests.get(baseurl+"/sites/stroubles1/metricgroups")
d = gethalbyname(r.json, medium)["_embedded"]
tsurl = gethalbyname(d["metrics"], metric)["_links"]["timeseries"]["href"]
timestamp = (datetime.datetime.now()-datetime.timedelta(minutes=15)).isoformat()
ts = requests.get(baseurl+tsurl, params={'since': timestamp})
unacceptable = []
for value, time in ts.json["data"]:
if value > acceptable[1] or value < acceptable[0]:
unacceptable.append([value, time])
if len(unacceptable) > threshold:
print "{0} unacceptable data points! {1}".format(len(unacceptable), unacceptable)
exit(len(unacceptable))
|
<commit_before><commit_msg>Add script to watch a metric for unexpectedly high or low values<commit_after>
|
import requests, datetime
#todo: argparse
acceptable = [7.5, 8.5] #good data interval
interval = 15 #minutes of samples to check (0.33 samples per minute)
threshold = 2 #number of points outside interval to look for
medium = "water quality"
metric = "pH"
def gethalbyname(data, name):
for d in data:
if d["name"] == name:
return d
baseurl = "http://lewaspedia.enge.vt.edu:8080"
r = requests.get(baseurl+"/sites/stroubles1/metricgroups")
d = gethalbyname(r.json, medium)["_embedded"]
tsurl = gethalbyname(d["metrics"], metric)["_links"]["timeseries"]["href"]
timestamp = (datetime.datetime.now()-datetime.timedelta(minutes=15)).isoformat()
ts = requests.get(baseurl+tsurl, params={'since': timestamp})
unacceptable = []
for value, time in ts.json["data"]:
if value > acceptable[1] or value < acceptable[0]:
unacceptable.append([value, time])
if len(unacceptable) > threshold:
print "{0} unacceptable data points! {1}".format(len(unacceptable), unacceptable)
exit(len(unacceptable))
|
Add script to watch a metric for unexpectedly high or low valuesimport requests, datetime
#todo: argparse
acceptable = [7.5, 8.5] #good data interval
interval = 15 #minutes of samples to check (0.33 samples per minute)
threshold = 2 #number of points outside interval to look for
medium = "water quality"
metric = "pH"
def gethalbyname(data, name):
for d in data:
if d["name"] == name:
return d
baseurl = "http://lewaspedia.enge.vt.edu:8080"
r = requests.get(baseurl+"/sites/stroubles1/metricgroups")
d = gethalbyname(r.json, medium)["_embedded"]
tsurl = gethalbyname(d["metrics"], metric)["_links"]["timeseries"]["href"]
timestamp = (datetime.datetime.now()-datetime.timedelta(minutes=15)).isoformat()
ts = requests.get(baseurl+tsurl, params={'since': timestamp})
unacceptable = []
for value, time in ts.json["data"]:
if value > acceptable[1] or value < acceptable[0]:
unacceptable.append([value, time])
if len(unacceptable) > threshold:
print "{0} unacceptable data points! {1}".format(len(unacceptable), unacceptable)
exit(len(unacceptable))
|
<commit_before><commit_msg>Add script to watch a metric for unexpectedly high or low values<commit_after>import requests, datetime
#todo: argparse
acceptable = [7.5, 8.5] #good data interval
interval = 15 #minutes of samples to check (0.33 samples per minute)
threshold = 2 #number of points outside interval to look for
medium = "water quality"
metric = "pH"
def gethalbyname(data, name):
for d in data:
if d["name"] == name:
return d
baseurl = "http://lewaspedia.enge.vt.edu:8080"
r = requests.get(baseurl+"/sites/stroubles1/metricgroups")
d = gethalbyname(r.json, medium)["_embedded"]
tsurl = gethalbyname(d["metrics"], metric)["_links"]["timeseries"]["href"]
timestamp = (datetime.datetime.now()-datetime.timedelta(minutes=15)).isoformat()
ts = requests.get(baseurl+tsurl, params={'since': timestamp})
unacceptable = []
for value, time in ts.json["data"]:
if value > acceptable[1] or value < acceptable[0]:
unacceptable.append([value, time])
if len(unacceptable) > threshold:
print "{0} unacceptable data points! {1}".format(len(unacceptable), unacceptable)
exit(len(unacceptable))
|
|
06d7a2c45f6d93870fe8cba74d5fb563c6b62149
|
conf/init_mongodb.py
|
conf/init_mongodb.py
|
import getpass
import hashlib
from pymongo import MongoClient
spider_db = MongoClient().spider
admin_present = False
for user in spider_db.auth.find():
if user["username"] == "admin":
admin_present = True
break
if not admin_present:
password1 = getpass.getpass("Give a password for the admin user: ")
password2 = getpass.getpass("Give the password again: ")
if password1 == password2:
user = {
'username': "admin",
'password': hashlib.sha256(password1.encode("utf-8")).hexdigest(),
'role': ["admin"],
}
spider_db.auth.insert_one(user)
print("Add 'admin' user with password supplied.")
else:
print("Passwords not corresponding...")
else:
print("'admin' user already created.")
|
Add initialisation script to add the admin user.
|
Add initialisation script to add the admin user.
|
Python
|
apache-2.0
|
asteroide/immo_spider,asteroide/immo_spider,asteroide/immo_spider,asteroide/immo_spider
|
Add initialisation script to add the admin user.
|
import getpass
import hashlib
from pymongo import MongoClient
spider_db = MongoClient().spider
admin_present = False
for user in spider_db.auth.find():
if user["username"] == "admin":
admin_present = True
break
if not admin_present:
password1 = getpass.getpass("Give a password for the admin user: ")
password2 = getpass.getpass("Give the password again: ")
if password1 == password2:
user = {
'username': "admin",
'password': hashlib.sha256(password1.encode("utf-8")).hexdigest(),
'role': ["admin"],
}
spider_db.auth.insert_one(user)
print("Add 'admin' user with password supplied.")
else:
print("Passwords not corresponding...")
else:
print("'admin' user already created.")
|
<commit_before><commit_msg>Add initialisation script to add the admin user.<commit_after>
|
import getpass
import hashlib
from pymongo import MongoClient
spider_db = MongoClient().spider
admin_present = False
for user in spider_db.auth.find():
if user["username"] == "admin":
admin_present = True
break
if not admin_present:
password1 = getpass.getpass("Give a password for the admin user: ")
password2 = getpass.getpass("Give the password again: ")
if password1 == password2:
user = {
'username': "admin",
'password': hashlib.sha256(password1.encode("utf-8")).hexdigest(),
'role': ["admin"],
}
spider_db.auth.insert_one(user)
print("Add 'admin' user with password supplied.")
else:
print("Passwords not corresponding...")
else:
print("'admin' user already created.")
|
Add initialisation script to add the admin user.import getpass
import hashlib
from pymongo import MongoClient
spider_db = MongoClient().spider
admin_present = False
for user in spider_db.auth.find():
if user["username"] == "admin":
admin_present = True
break
if not admin_present:
password1 = getpass.getpass("Give a password for the admin user: ")
password2 = getpass.getpass("Give the password again: ")
if password1 == password2:
user = {
'username': "admin",
'password': hashlib.sha256(password1.encode("utf-8")).hexdigest(),
'role': ["admin"],
}
spider_db.auth.insert_one(user)
print("Add 'admin' user with password supplied.")
else:
print("Passwords not corresponding...")
else:
print("'admin' user already created.")
|
<commit_before><commit_msg>Add initialisation script to add the admin user.<commit_after>import getpass
import hashlib
from pymongo import MongoClient
spider_db = MongoClient().spider
admin_present = False
for user in spider_db.auth.find():
if user["username"] == "admin":
admin_present = True
break
if not admin_present:
password1 = getpass.getpass("Give a password for the admin user: ")
password2 = getpass.getpass("Give the password again: ")
if password1 == password2:
user = {
'username': "admin",
'password': hashlib.sha256(password1.encode("utf-8")).hexdigest(),
'role': ["admin"],
}
spider_db.auth.insert_one(user)
print("Add 'admin' user with password supplied.")
else:
print("Passwords not corresponding...")
else:
print("'admin' user already created.")
|
|
6da8c10aa9cf849ec6af097c0dae85b3bef930c3
|
django_modelviews/templatetags/modelview_list.py
|
django_modelviews/templatetags/modelview_list.py
|
from django import template
from django.db import models
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def model_row(instance, fields):
for name in fields.split(','):
f = instance._meta.get_field(name)
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = u'<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk)
else:
value = unicode(fk)
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
else:
value = unicode(getattr(instance, f.name))
yield (f.verbose_name, value)
|
Add simple model_row template filter
|
Add simple model_row template filter
|
Python
|
bsd-3-clause
|
matthiask/towel,matthiask/towel,matthiask/towel,matthiask/towel
|
Add simple model_row template filter
|
from django import template
from django.db import models
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def model_row(instance, fields):
for name in fields.split(','):
f = instance._meta.get_field(name)
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = u'<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk)
else:
value = unicode(fk)
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
else:
value = unicode(getattr(instance, f.name))
yield (f.verbose_name, value)
|
<commit_before><commit_msg>Add simple model_row template filter<commit_after>
|
from django import template
from django.db import models
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def model_row(instance, fields):
for name in fields.split(','):
f = instance._meta.get_field(name)
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = u'<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk)
else:
value = unicode(fk)
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
else:
value = unicode(getattr(instance, f.name))
yield (f.verbose_name, value)
|
Add simple model_row template filterfrom django import template
from django.db import models
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def model_row(instance, fields):
for name in fields.split(','):
f = instance._meta.get_field(name)
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = u'<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk)
else:
value = unicode(fk)
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
else:
value = unicode(getattr(instance, f.name))
yield (f.verbose_name, value)
|
<commit_before><commit_msg>Add simple model_row template filter<commit_after>from django import template
from django.db import models
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def model_row(instance, fields):
for name in fields.split(','):
f = instance._meta.get_field(name)
if isinstance(f, models.ForeignKey):
fk = getattr(instance, f.name)
if hasattr(fk, 'get_absolute_url'):
value = u'<a href="%s">%s</a>' % (
fk.get_absolute_url(),
fk)
else:
value = unicode(fk)
elif f.choices:
value = getattr(instance, 'get_%s_display' % f.name)()
else:
value = unicode(getattr(instance, f.name))
yield (f.verbose_name, value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.