commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4efa5a447e85c11723991870bd6fc632bc97ed5
|
us_ignite/common/templatetags/common_markdown.py
|
us_ignite/common/templatetags/common_markdown.py
|
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from us_ignite.common import output
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
return mark_safe(output.to_html(value))
|
Add template tag to render ``markdown``.
|
Add template tag to render ``markdown``.
|
Python
|
bsd-3-clause
|
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
|
Add template tag to render ``markdown``.
|
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from us_ignite.common import output
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
return mark_safe(output.to_html(value))
|
<commit_before><commit_msg>Add template tag to render ``markdown``.<commit_after>
|
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from us_ignite.common import output
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
return mark_safe(output.to_html(value))
|
Add template tag to render ``markdown``.from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from us_ignite.common import output
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
return mark_safe(output.to_html(value))
|
<commit_before><commit_msg>Add template tag to render ``markdown``.<commit_after>from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from us_ignite.common import output
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def markdown(value):
return mark_safe(output.to_html(value))
|
|
2cdc649d810e49c3879ec055741b1bdd909c55e0
|
feincms/management/commands/rebuild_mptt.py
|
feincms/management/commands/rebuild_mptt.py
|
# ------------------------------------------------------------------------
# coding=utf-8
# $Id$
# ------------------------------------------------------------------------
from django.core.management.base import NoArgsCommand
from django.db import transaction
from feincms.module.page.models import Page
class Command(NoArgsCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
@staticmethod
def seq(start = 1):
"""
Returns an ever-increasing stream of numbers. The starting point can
be freely defined.
"""
while True:
yield start
start += 1
@transaction.commit_manually
def handle_noargs(self, **options):
print "Rebuilding MPTT pointers for Page"
root = 1
changes = set()
for page in Page.objects.filter(parent__isnull=True).order_by('tree_id'):
print " Processing subtree %d at %s" % ( page.tree_id, page.slug )
page.tree_id = root # Renumber tree_id for good measure
self.renumber_mptt_tree(page, self.seq(1))
root += 1
transaction.commit()
def renumber_mptt_tree(self, obj, edge_count):
obj.lft = edge_count.next()
for c in obj.children.order_by('lft', 'rght').all():
self.renumber_mptt_tree(c, edge_count)
obj.rght = edge_count.next()
obj.save()
|
Add a command to rebuild the mptt tree.
|
Add a command to rebuild the mptt tree.
Only for emergencies to get your tree back in useable state should anything happen to the node indexes.
|
Python
|
bsd-3-clause
|
mjl/feincms,matthiask/django-content-editor,michaelkuty/feincms,matthiask/feincms2-content,nickburlett/feincms,matthiask/feincms2-content,michaelkuty/feincms,joshuajonah/feincms,joshuajonah/feincms,hgrimelid/feincms,hgrimelid/feincms,michaelkuty/feincms,feincms/feincms,pjdelport/feincms,nickburlett/feincms,matthiask/django-content-editor,feincms/feincms,hgrimelid/feincms,matthiask/django-content-editor,mjl/feincms,feincms/feincms,pjdelport/feincms,joshuajonah/feincms,nickburlett/feincms,nickburlett/feincms,pjdelport/feincms,matthiask/django-content-editor,mjl/feincms,joshuajonah/feincms,matthiask/feincms2-content,michaelkuty/feincms
|
Add a command to rebuild the mptt tree.
Only for emergencies to get your tree back in useable state should anything happen to the node indexes.
|
# ------------------------------------------------------------------------
# coding=utf-8
# $Id$
# ------------------------------------------------------------------------
from django.core.management.base import NoArgsCommand
from django.db import transaction
from feincms.module.page.models import Page
class Command(NoArgsCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
@staticmethod
def seq(start = 1):
"""
Returns an ever-increasing stream of numbers. The starting point can
be freely defined.
"""
while True:
yield start
start += 1
@transaction.commit_manually
def handle_noargs(self, **options):
print "Rebuilding MPTT pointers for Page"
root = 1
changes = set()
for page in Page.objects.filter(parent__isnull=True).order_by('tree_id'):
print " Processing subtree %d at %s" % ( page.tree_id, page.slug )
page.tree_id = root # Renumber tree_id for good measure
self.renumber_mptt_tree(page, self.seq(1))
root += 1
transaction.commit()
def renumber_mptt_tree(self, obj, edge_count):
obj.lft = edge_count.next()
for c in obj.children.order_by('lft', 'rght').all():
self.renumber_mptt_tree(c, edge_count)
obj.rght = edge_count.next()
obj.save()
|
<commit_before><commit_msg>Add a command to rebuild the mptt tree.
Only for emergencies to get your tree back in useable state should anything happen to the node indexes.<commit_after>
|
# ------------------------------------------------------------------------
# coding=utf-8
# $Id$
# ------------------------------------------------------------------------
from django.core.management.base import NoArgsCommand
from django.db import transaction
from feincms.module.page.models import Page
class Command(NoArgsCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
@staticmethod
def seq(start = 1):
"""
Returns an ever-increasing stream of numbers. The starting point can
be freely defined.
"""
while True:
yield start
start += 1
@transaction.commit_manually
def handle_noargs(self, **options):
print "Rebuilding MPTT pointers for Page"
root = 1
changes = set()
for page in Page.objects.filter(parent__isnull=True).order_by('tree_id'):
print " Processing subtree %d at %s" % ( page.tree_id, page.slug )
page.tree_id = root # Renumber tree_id for good measure
self.renumber_mptt_tree(page, self.seq(1))
root += 1
transaction.commit()
def renumber_mptt_tree(self, obj, edge_count):
obj.lft = edge_count.next()
for c in obj.children.order_by('lft', 'rght').all():
self.renumber_mptt_tree(c, edge_count)
obj.rght = edge_count.next()
obj.save()
|
Add a command to rebuild the mptt tree.
Only for emergencies to get your tree back in useable state should anything happen to the node indexes.# ------------------------------------------------------------------------
# coding=utf-8
# $Id$
# ------------------------------------------------------------------------
from django.core.management.base import NoArgsCommand
from django.db import transaction
from feincms.module.page.models import Page
class Command(NoArgsCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
@staticmethod
def seq(start = 1):
"""
Returns an ever-increasing stream of numbers. The starting point can
be freely defined.
"""
while True:
yield start
start += 1
@transaction.commit_manually
def handle_noargs(self, **options):
print "Rebuilding MPTT pointers for Page"
root = 1
changes = set()
for page in Page.objects.filter(parent__isnull=True).order_by('tree_id'):
print " Processing subtree %d at %s" % ( page.tree_id, page.slug )
page.tree_id = root # Renumber tree_id for good measure
self.renumber_mptt_tree(page, self.seq(1))
root += 1
transaction.commit()
def renumber_mptt_tree(self, obj, edge_count):
obj.lft = edge_count.next()
for c in obj.children.order_by('lft', 'rght').all():
self.renumber_mptt_tree(c, edge_count)
obj.rght = edge_count.next()
obj.save()
|
<commit_before><commit_msg>Add a command to rebuild the mptt tree.
Only for emergencies to get your tree back in useable state should anything happen to the node indexes.<commit_after># ------------------------------------------------------------------------
# coding=utf-8
# $Id$
# ------------------------------------------------------------------------
from django.core.management.base import NoArgsCommand
from django.db import transaction
from feincms.module.page.models import Page
class Command(NoArgsCommand):
help = "Run this manually to rebuild your mptt pointers. Only use in emergencies."
@staticmethod
def seq(start = 1):
"""
Returns an ever-increasing stream of numbers. The starting point can
be freely defined.
"""
while True:
yield start
start += 1
@transaction.commit_manually
def handle_noargs(self, **options):
print "Rebuilding MPTT pointers for Page"
root = 1
changes = set()
for page in Page.objects.filter(parent__isnull=True).order_by('tree_id'):
print " Processing subtree %d at %s" % ( page.tree_id, page.slug )
page.tree_id = root # Renumber tree_id for good measure
self.renumber_mptt_tree(page, self.seq(1))
root += 1
transaction.commit()
def renumber_mptt_tree(self, obj, edge_count):
obj.lft = edge_count.next()
for c in obj.children.order_by('lft', 'rght').all():
self.renumber_mptt_tree(c, edge_count)
obj.rght = edge_count.next()
obj.save()
|
|
92fd6c4785523a6891aa02eed460451552fb186e
|
scripts/tag_lyrics.py
|
scripts/tag_lyrics.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# needs mutagen
# grabbed from: http://code.activestate.com/recipes/577138-embed-lyrics-into-mp3-files-using-mutagen-uslt-tag/
# simplified to only work on one file and get lyrics from stdin
import os
import sys
import codecs
from mutagen.mp3 import MP3
from mutagen.id3 import ID3NoHeaderError
from mutagen.id3 import ID3, USLT
TEXT_ENCODING = 'utf8'
TEXT_LANG = 'XXX'
TEXT_DESC = ''
# get workdir from first arg or use current dir
if (len(sys.argv) > 1):
fname = sys.argv[1]
print "fname=" + fname
else:
print 'Give me at least a file name to work on, plus the lyrics from stdin'
print 'Optionally, you can provide the language (3 lowercase letters) of the lyrics and a description'
sys.exit()
if (len(sys.argv) > 2):
TEXT_LANG = sys.argv[2]
if (len(sys.argv) > 3):
TEXT_DESC = sys.argv[3]
print "reading lyrics from standard input ..."
lyrics = sys.stdin.read().strip()
# try to find the right encoding
for enc in ('utf8','iso-8859-1','iso-8859-15','cp1252','cp1251','latin1'):
try:
lyrics = lyrics.decode(enc)
TEXT_DESC = TEXT_DESC.decode(enc)
print enc,
break
except:
pass
print "Adding lyrics to " + fname
print "Language: " + TEXT_LANG
print "Description: " + TEXT_DESC
# create ID3 tag if not exists
try:
tags = ID3(fname)
except ID3NoHeaderError:
print "Adding ID3 header;",
tags = ID3()
# remove old unsychronized lyrics
if len(tags.getall(u"USLT::'"+TEXT_LANG+"'")) != 0:
print "Removing Lyrics."
tags.delall(u"USLT::'"+TEXT_LANG+"'")
#tags.save(fname) # hm, why?
#tags.add(USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
# apparently the description is important when more than one
# USLT frames are present
#tags[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
tags[u"USLT::'"+TEXT_LANG+"'"] = (USLT(encoding=3, lang=TEXT_LANG, desc=TEXT_DESC, text=lyrics))
print 'Added USLT frame to', fname
tags.save(fname)
print 'Done'
|
Add a little script to create test files with lyrics.
|
Add a little script to create test files with lyrics.
git-svn-id: 793bb72743a407948e3701719c462b6a765bc435@3088 35dc7657-300d-0410-a2e5-dc2837fedb53
|
Python
|
lgpl-2.1
|
Distrotech/mpg123,Distrotech/mpg123,Distrotech/mpg123,Distrotech/mpg123,Distrotech/mpg123,Distrotech/mpg123,Distrotech/mpg123
|
Add a little script to create test files with lyrics.
git-svn-id: 793bb72743a407948e3701719c462b6a765bc435@3088 35dc7657-300d-0410-a2e5-dc2837fedb53
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# needs mutagen
# grabbed from: http://code.activestate.com/recipes/577138-embed-lyrics-into-mp3-files-using-mutagen-uslt-tag/
# simplified to only work on one file and get lyrics from stdin
import os
import sys
import codecs
from mutagen.mp3 import MP3
from mutagen.id3 import ID3NoHeaderError
from mutagen.id3 import ID3, USLT
TEXT_ENCODING = 'utf8'
TEXT_LANG = 'XXX'
TEXT_DESC = ''
# get workdir from first arg or use current dir
if (len(sys.argv) > 1):
fname = sys.argv[1]
print "fname=" + fname
else:
print 'Give me at least a file name to work on, plus the lyrics from stdin'
print 'Optionally, you can provide the language (3 lowercase letters) of the lyrics and a description'
sys.exit()
if (len(sys.argv) > 2):
TEXT_LANG = sys.argv[2]
if (len(sys.argv) > 3):
TEXT_DESC = sys.argv[3]
print "reading lyrics from standard input ..."
lyrics = sys.stdin.read().strip()
# try to find the right encoding
for enc in ('utf8','iso-8859-1','iso-8859-15','cp1252','cp1251','latin1'):
try:
lyrics = lyrics.decode(enc)
TEXT_DESC = TEXT_DESC.decode(enc)
print enc,
break
except:
pass
print "Adding lyrics to " + fname
print "Language: " + TEXT_LANG
print "Description: " + TEXT_DESC
# create ID3 tag if not exists
try:
tags = ID3(fname)
except ID3NoHeaderError:
print "Adding ID3 header;",
tags = ID3()
# remove old unsychronized lyrics
if len(tags.getall(u"USLT::'"+TEXT_LANG+"'")) != 0:
print "Removing Lyrics."
tags.delall(u"USLT::'"+TEXT_LANG+"'")
#tags.save(fname) # hm, why?
#tags.add(USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
# apparently the description is important when more than one
# USLT frames are present
#tags[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
tags[u"USLT::'"+TEXT_LANG+"'"] = (USLT(encoding=3, lang=TEXT_LANG, desc=TEXT_DESC, text=lyrics))
print 'Added USLT frame to', fname
tags.save(fname)
print 'Done'
|
<commit_before><commit_msg>Add a little script to create test files with lyrics.
git-svn-id: 793bb72743a407948e3701719c462b6a765bc435@3088 35dc7657-300d-0410-a2e5-dc2837fedb53<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# needs mutagen
# grabbed from: http://code.activestate.com/recipes/577138-embed-lyrics-into-mp3-files-using-mutagen-uslt-tag/
# simplified to only work on one file and get lyrics from stdin
import os
import sys
import codecs
from mutagen.mp3 import MP3
from mutagen.id3 import ID3NoHeaderError
from mutagen.id3 import ID3, USLT
TEXT_ENCODING = 'utf8'
TEXT_LANG = 'XXX'
TEXT_DESC = ''
# get workdir from first arg or use current dir
if (len(sys.argv) > 1):
fname = sys.argv[1]
print "fname=" + fname
else:
print 'Give me at least a file name to work on, plus the lyrics from stdin'
print 'Optionally, you can provide the language (3 lowercase letters) of the lyrics and a description'
sys.exit()
if (len(sys.argv) > 2):
TEXT_LANG = sys.argv[2]
if (len(sys.argv) > 3):
TEXT_DESC = sys.argv[3]
print "reading lyrics from standard input ..."
lyrics = sys.stdin.read().strip()
# try to find the right encoding
for enc in ('utf8','iso-8859-1','iso-8859-15','cp1252','cp1251','latin1'):
try:
lyrics = lyrics.decode(enc)
TEXT_DESC = TEXT_DESC.decode(enc)
print enc,
break
except:
pass
print "Adding lyrics to " + fname
print "Language: " + TEXT_LANG
print "Description: " + TEXT_DESC
# create ID3 tag if not exists
try:
tags = ID3(fname)
except ID3NoHeaderError:
print "Adding ID3 header;",
tags = ID3()
# remove old unsychronized lyrics
if len(tags.getall(u"USLT::'"+TEXT_LANG+"'")) != 0:
print "Removing Lyrics."
tags.delall(u"USLT::'"+TEXT_LANG+"'")
#tags.save(fname) # hm, why?
#tags.add(USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
# apparently the description is important when more than one
# USLT frames are present
#tags[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
tags[u"USLT::'"+TEXT_LANG+"'"] = (USLT(encoding=3, lang=TEXT_LANG, desc=TEXT_DESC, text=lyrics))
print 'Added USLT frame to', fname
tags.save(fname)
print 'Done'
|
Add a little script to create test files with lyrics.
git-svn-id: 793bb72743a407948e3701719c462b6a765bc435@3088 35dc7657-300d-0410-a2e5-dc2837fedb53#!/usr/bin/env python
# -*- coding: utf-8 -*-
# needs mutagen
# grabbed from: http://code.activestate.com/recipes/577138-embed-lyrics-into-mp3-files-using-mutagen-uslt-tag/
# simplified to only work on one file and get lyrics from stdin
import os
import sys
import codecs
from mutagen.mp3 import MP3
from mutagen.id3 import ID3NoHeaderError
from mutagen.id3 import ID3, USLT
TEXT_ENCODING = 'utf8'
TEXT_LANG = 'XXX'
TEXT_DESC = ''
# get workdir from first arg or use current dir
if (len(sys.argv) > 1):
fname = sys.argv[1]
print "fname=" + fname
else:
print 'Give me at least a file name to work on, plus the lyrics from stdin'
print 'Optionally, you can provide the language (3 lowercase letters) of the lyrics and a description'
sys.exit()
if (len(sys.argv) > 2):
TEXT_LANG = sys.argv[2]
if (len(sys.argv) > 3):
TEXT_DESC = sys.argv[3]
print "reading lyrics from standard input ..."
lyrics = sys.stdin.read().strip()
# try to find the right encoding
for enc in ('utf8','iso-8859-1','iso-8859-15','cp1252','cp1251','latin1'):
try:
lyrics = lyrics.decode(enc)
TEXT_DESC = TEXT_DESC.decode(enc)
print enc,
break
except:
pass
print "Adding lyrics to " + fname
print "Language: " + TEXT_LANG
print "Description: " + TEXT_DESC
# create ID3 tag if not exists
try:
tags = ID3(fname)
except ID3NoHeaderError:
print "Adding ID3 header;",
tags = ID3()
# remove old unsychronized lyrics
if len(tags.getall(u"USLT::'"+TEXT_LANG+"'")) != 0:
print "Removing Lyrics."
tags.delall(u"USLT::'"+TEXT_LANG+"'")
#tags.save(fname) # hm, why?
#tags.add(USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
# apparently the description is important when more than one
# USLT frames are present
#tags[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
tags[u"USLT::'"+TEXT_LANG+"'"] = (USLT(encoding=3, lang=TEXT_LANG, desc=TEXT_DESC, text=lyrics))
print 'Added USLT frame to', fname
tags.save(fname)
print 'Done'
|
<commit_before><commit_msg>Add a little script to create test files with lyrics.
git-svn-id: 793bb72743a407948e3701719c462b6a765bc435@3088 35dc7657-300d-0410-a2e5-dc2837fedb53<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# needs mutagen
# grabbed from: http://code.activestate.com/recipes/577138-embed-lyrics-into-mp3-files-using-mutagen-uslt-tag/
# simplified to only work on one file and get lyrics from stdin
import os
import sys
import codecs
from mutagen.mp3 import MP3
from mutagen.id3 import ID3NoHeaderError
from mutagen.id3 import ID3, USLT
TEXT_ENCODING = 'utf8'
TEXT_LANG = 'XXX'
TEXT_DESC = ''
# get workdir from first arg or use current dir
if (len(sys.argv) > 1):
fname = sys.argv[1]
print "fname=" + fname
else:
print 'Give me at least a file name to work on, plus the lyrics from stdin'
print 'Optionally, you can provide the language (3 lowercase letters) of the lyrics and a description'
sys.exit()
if (len(sys.argv) > 2):
TEXT_LANG = sys.argv[2]
if (len(sys.argv) > 3):
TEXT_DESC = sys.argv[3]
print "reading lyrics from standard input ..."
lyrics = sys.stdin.read().strip()
# try to find the right encoding
for enc in ('utf8','iso-8859-1','iso-8859-15','cp1252','cp1251','latin1'):
try:
lyrics = lyrics.decode(enc)
TEXT_DESC = TEXT_DESC.decode(enc)
print enc,
break
except:
pass
print "Adding lyrics to " + fname
print "Language: " + TEXT_LANG
print "Description: " + TEXT_DESC
# create ID3 tag if not exists
try:
tags = ID3(fname)
except ID3NoHeaderError:
print "Adding ID3 header;",
tags = ID3()
# remove old unsychronized lyrics
if len(tags.getall(u"USLT::'"+TEXT_LANG+"'")) != 0:
print "Removing Lyrics."
tags.delall(u"USLT::'"+TEXT_LANG+"'")
#tags.save(fname) # hm, why?
#tags.add(USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
# apparently the description is important when more than one
# USLT frames are present
#tags[u"USLT::'eng'"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))
tags[u"USLT::'"+TEXT_LANG+"'"] = (USLT(encoding=3, lang=TEXT_LANG, desc=TEXT_DESC, text=lyrics))
print 'Added USLT frame to', fname
tags.save(fname)
print 'Done'
|
|
ac3ef2ed9d8fd5418a5f7365dde78afc7a9ae29c
|
getMoisture.py
|
getMoisture.py
|
#!/usr/bin/python
import spidev
import time
import RPi.GPIO as GPIO
import datetime
import time
#je potreba nastavit SPI na raspberry
#GPIO
GPIO.setmode(GPIO.BOARD)
pin = 11
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,True)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
temp = ((data * 330)/float(1023))-50
temp = round(temp,places)
return temp
# Define sensor channels
moisture_1_channel = 0
moisture_2_channel = 1
# Define delay between readings
delay = 1
time.sleep(5)
d = datetime.datetime.now()
# Read the light sensor data
moisture_1_level = ReadChannel(moisture_1_channel)
moisture_1_volts = ConvertVolts(moisture_1_level,2)
moisture_2_level = ReadChannel(moisture_2_channel)
moisture_2_volts = ConvertVolts(moisture_2_level,2)
#GPIO.output(pin,True)
# Print out results
line = d.strftime("%Y-%m-%d %H:%M:%S,")+str(moisture_1_level)+","+str(moisture_1_volts)+","+str(moisture_2_level)+ "," +str(moisture_2_volts)+"\n"
#print(line)
#print("1 = "+str(moisture_1_level)+ " - " +str(moisture_1_volts))
#print("2 = "+str(moisture_2_level)+ " - " +str(moisture_2_volts))
with open("/home/john/meteor-Data/moisture.csv", "a") as f:
f.write(line)
# Wait before repeating loop
GPIO.output(pin,False)
GPIO.cleanup()
|
Add script for soil moisture sensors.
|
Add script for soil moisture sensors.
|
Python
|
lgpl-2.1
|
TomasBedrnik/Meteo-Backend,TomasBedrnik/Meteo-Backend,TomasBedrnik/Meteo-Backend
|
Add script for soil moisture sensors.
|
#!/usr/bin/python
import spidev
import time
import RPi.GPIO as GPIO
import datetime
import time
#je potreba nastavit SPI na raspberry
#GPIO
GPIO.setmode(GPIO.BOARD)
pin = 11
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,True)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
temp = ((data * 330)/float(1023))-50
temp = round(temp,places)
return temp
# Define sensor channels
moisture_1_channel = 0
moisture_2_channel = 1
# Define delay between readings
delay = 1
time.sleep(5)
d = datetime.datetime.now()
# Read the light sensor data
moisture_1_level = ReadChannel(moisture_1_channel)
moisture_1_volts = ConvertVolts(moisture_1_level,2)
moisture_2_level = ReadChannel(moisture_2_channel)
moisture_2_volts = ConvertVolts(moisture_2_level,2)
#GPIO.output(pin,True)
# Print out results
line = d.strftime("%Y-%m-%d %H:%M:%S,")+str(moisture_1_level)+","+str(moisture_1_volts)+","+str(moisture_2_level)+ "," +str(moisture_2_volts)+"\n"
#print(line)
#print("1 = "+str(moisture_1_level)+ " - " +str(moisture_1_volts))
#print("2 = "+str(moisture_2_level)+ " - " +str(moisture_2_volts))
with open("/home/john/meteor-Data/moisture.csv", "a") as f:
f.write(line)
# Wait before repeating loop
GPIO.output(pin,False)
GPIO.cleanup()
|
<commit_before><commit_msg>Add script for soil moisture sensors.<commit_after>
|
#!/usr/bin/python
import spidev
import time
import RPi.GPIO as GPIO
import datetime
import time
#je potreba nastavit SPI na raspberry
#GPIO
GPIO.setmode(GPIO.BOARD)
pin = 11
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,True)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
temp = ((data * 330)/float(1023))-50
temp = round(temp,places)
return temp
# Define sensor channels
moisture_1_channel = 0
moisture_2_channel = 1
# Define delay between readings
delay = 1
time.sleep(5)
d = datetime.datetime.now()
# Read the light sensor data
moisture_1_level = ReadChannel(moisture_1_channel)
moisture_1_volts = ConvertVolts(moisture_1_level,2)
moisture_2_level = ReadChannel(moisture_2_channel)
moisture_2_volts = ConvertVolts(moisture_2_level,2)
#GPIO.output(pin,True)
# Print out results
line = d.strftime("%Y-%m-%d %H:%M:%S,")+str(moisture_1_level)+","+str(moisture_1_volts)+","+str(moisture_2_level)+ "," +str(moisture_2_volts)+"\n"
#print(line)
#print("1 = "+str(moisture_1_level)+ " - " +str(moisture_1_volts))
#print("2 = "+str(moisture_2_level)+ " - " +str(moisture_2_volts))
with open("/home/john/meteor-Data/moisture.csv", "a") as f:
f.write(line)
# Wait before repeating loop
GPIO.output(pin,False)
GPIO.cleanup()
|
Add script for soil moisture sensors.#!/usr/bin/python
import spidev
import time
import RPi.GPIO as GPIO
import datetime
import time
#je potreba nastavit SPI na raspberry
#GPIO
GPIO.setmode(GPIO.BOARD)
pin = 11
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,True)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
temp = ((data * 330)/float(1023))-50
temp = round(temp,places)
return temp
# Define sensor channels
moisture_1_channel = 0
moisture_2_channel = 1
# Define delay between readings
delay = 1
time.sleep(5)
d = datetime.datetime.now()
# Read the light sensor data
moisture_1_level = ReadChannel(moisture_1_channel)
moisture_1_volts = ConvertVolts(moisture_1_level,2)
moisture_2_level = ReadChannel(moisture_2_channel)
moisture_2_volts = ConvertVolts(moisture_2_level,2)
#GPIO.output(pin,True)
# Print out results
line = d.strftime("%Y-%m-%d %H:%M:%S,")+str(moisture_1_level)+","+str(moisture_1_volts)+","+str(moisture_2_level)+ "," +str(moisture_2_volts)+"\n"
#print(line)
#print("1 = "+str(moisture_1_level)+ " - " +str(moisture_1_volts))
#print("2 = "+str(moisture_2_level)+ " - " +str(moisture_2_volts))
with open("/home/john/meteor-Data/moisture.csv", "a") as f:
f.write(line)
# Wait before repeating loop
GPIO.output(pin,False)
GPIO.cleanup()
|
<commit_before><commit_msg>Add script for soil moisture sensors.<commit_after>#!/usr/bin/python
import spidev
import time
import RPi.GPIO as GPIO
import datetime
import time
#je potreba nastavit SPI na raspberry
#GPIO
GPIO.setmode(GPIO.BOARD)
pin = 11
GPIO.setup(pin,GPIO.OUT)
GPIO.output(pin,True)
# Open SPI bus
spi = spidev.SpiDev()
spi.open(0,0)
# Function to read SPI data from MCP3008 chip
# Channel must be an integer 0-7
def ReadChannel(channel):
adc = spi.xfer2([1,(8+channel)<<4,0])
data = ((adc[1]&3) << 8) + adc[2]
return data
# Function to convert data to voltage level,
# rounded to specified number of decimal places.
def ConvertVolts(data,places):
volts = (data * 3.3) / float(1023)
volts = round(volts,places)
return volts
temp = ((data * 330)/float(1023))-50
temp = round(temp,places)
return temp
# Define sensor channels
moisture_1_channel = 0
moisture_2_channel = 1
# Define delay between readings
delay = 1
time.sleep(5)
d = datetime.datetime.now()
# Read the light sensor data
moisture_1_level = ReadChannel(moisture_1_channel)
moisture_1_volts = ConvertVolts(moisture_1_level,2)
moisture_2_level = ReadChannel(moisture_2_channel)
moisture_2_volts = ConvertVolts(moisture_2_level,2)
#GPIO.output(pin,True)
# Print out results
line = d.strftime("%Y-%m-%d %H:%M:%S,")+str(moisture_1_level)+","+str(moisture_1_volts)+","+str(moisture_2_level)+ "," +str(moisture_2_volts)+"\n"
#print(line)
#print("1 = "+str(moisture_1_level)+ " - " +str(moisture_1_volts))
#print("2 = "+str(moisture_2_level)+ " - " +str(moisture_2_volts))
with open("/home/john/meteor-Data/moisture.csv", "a") as f:
f.write(line)
# Wait before repeating loop
GPIO.output(pin,False)
GPIO.cleanup()
|
|
6abdfdd5f33aaaf05df4821ae14f594cb5b826a7
|
src/test/swarm_test.py
|
src/test/swarm_test.py
|
import unittest
import pyoptima as opt
class SwarmTest(unittest.TestCase):
def test_swarm_with_parabola(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.01}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.parabola, 100)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 0)
def test_swarm_with_rosen(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.1}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.rosen, 1000)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
Add python test for swarm optimisation
|
Add python test for swarm optimisation
This will still need to be hooked into the main build system
|
Python
|
mit
|
samueljackson92/optima,samueljackson92/metaopt,samueljackson92/optima,samueljackson92/metaopt
|
Add python test for swarm optimisation
This will still need to be hooked into the main build system
|
import unittest
import pyoptima as opt
class SwarmTest(unittest.TestCase):
def test_swarm_with_parabola(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.01}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.parabola, 100)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 0)
def test_swarm_with_rosen(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.1}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.rosen, 1000)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add python test for swarm optimisation
This will still need to be hooked into the main build system<commit_after>
|
import unittest
import pyoptima as opt
class SwarmTest(unittest.TestCase):
def test_swarm_with_parabola(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.01}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.parabola, 100)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 0)
def test_swarm_with_rosen(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.1}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.rosen, 1000)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
Add python test for swarm optimisation
This will still need to be hooked into the main build systemimport unittest
import pyoptima as opt
class SwarmTest(unittest.TestCase):
def test_swarm_with_parabola(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.01}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.parabola, 100)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 0)
def test_swarm_with_rosen(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.1}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.rosen, 1000)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add python test for swarm optimisation
This will still need to be hooked into the main build system<commit_after>import unittest
import pyoptima as opt
class SwarmTest(unittest.TestCase):
def test_swarm_with_parabola(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.01}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.parabola, 100)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 0)
def test_swarm_with_rosen(self):
hyper_params = {'phi_local': 1, 'phi_global': 1, 'omega': 0.1}
params = {'x0': (-1, 1), 'x1': (-1, 1)}
num_particles = 100
s = opt.Swarm(params, hyper_params, num_particles)
s.optimize(opt.rosen, 1000)
bst_solution = s.getBestSolution()
for value in bst_solution:
self.assertAlmostEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
|
b0c8a5b114837ae9b6f4d7e81fc587ac0d1fc3a4
|
test/test_hpack_structures.py
|
test/test_hpack_structures.py
|
# -*- coding: utf-8 -*-
from hyper.http20.hpack_structures import Reference
class TestReference(object):
"""
Tests of the HPACK reference structure.
"""
def test_references_can_be_created(self):
r = Reference(None)
assert r
def test_two_references_to_the_same_object_compare_equal(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1 == r2
def test_two_references_to_equal_but_different_objects_compare_different(self):
a = ['hi'] # Use lists to avoid interning
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_unequal_objects_compare_different(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_the_same_object_hash_the_same(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1.__hash__() == r2.__hash__()
def test_two_references_to_equal_but_different_objects_hash_differently(self):
a = ['hi'] # Use lists to avoid interning.
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
def test_two_references_to_unequal_objects_hash_differently(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
|
Test the HPACK reference structure.
|
Test the HPACK reference structure.
|
Python
|
mit
|
fredthomsen/hyper,jdecuyper/hyper,plucury/hyper,jdecuyper/hyper,lawnmowerlatte/hyper,plucury/hyper,fredthomsen/hyper,irvind/hyper,masaori335/hyper,masaori335/hyper,Lukasa/hyper,lawnmowerlatte/hyper,irvind/hyper,Lukasa/hyper
|
Test the HPACK reference structure.
|
# -*- coding: utf-8 -*-
from hyper.http20.hpack_structures import Reference
class TestReference(object):
"""
Tests of the HPACK reference structure.
"""
def test_references_can_be_created(self):
r = Reference(None)
assert r
def test_two_references_to_the_same_object_compare_equal(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1 == r2
def test_two_references_to_equal_but_different_objects_compare_different(self):
a = ['hi'] # Use lists to avoid interning
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_unequal_objects_compare_different(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_the_same_object_hash_the_same(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1.__hash__() == r2.__hash__()
def test_two_references_to_equal_but_different_objects_hash_differently(self):
a = ['hi'] # Use lists to avoid interning.
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
def test_two_references_to_unequal_objects_hash_differently(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
|
<commit_before><commit_msg>Test the HPACK reference structure.<commit_after>
|
# -*- coding: utf-8 -*-
from hyper.http20.hpack_structures import Reference
class TestReference(object):
"""
Tests of the HPACK reference structure.
"""
def test_references_can_be_created(self):
r = Reference(None)
assert r
def test_two_references_to_the_same_object_compare_equal(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1 == r2
def test_two_references_to_equal_but_different_objects_compare_different(self):
a = ['hi'] # Use lists to avoid interning
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_unequal_objects_compare_different(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_the_same_object_hash_the_same(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1.__hash__() == r2.__hash__()
def test_two_references_to_equal_but_different_objects_hash_differently(self):
a = ['hi'] # Use lists to avoid interning.
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
def test_two_references_to_unequal_objects_hash_differently(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
|
Test the HPACK reference structure.# -*- coding: utf-8 -*-
from hyper.http20.hpack_structures import Reference
class TestReference(object):
"""
Tests of the HPACK reference structure.
"""
def test_references_can_be_created(self):
r = Reference(None)
assert r
def test_two_references_to_the_same_object_compare_equal(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1 == r2
def test_two_references_to_equal_but_different_objects_compare_different(self):
a = ['hi'] # Use lists to avoid interning
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_unequal_objects_compare_different(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_the_same_object_hash_the_same(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1.__hash__() == r2.__hash__()
def test_two_references_to_equal_but_different_objects_hash_differently(self):
a = ['hi'] # Use lists to avoid interning.
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
def test_two_references_to_unequal_objects_hash_differently(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
|
<commit_before><commit_msg>Test the HPACK reference structure.<commit_after># -*- coding: utf-8 -*-
from hyper.http20.hpack_structures import Reference
class TestReference(object):
"""
Tests of the HPACK reference structure.
"""
def test_references_can_be_created(self):
r = Reference(None)
assert r
def test_two_references_to_the_same_object_compare_equal(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1 == r2
def test_two_references_to_equal_but_different_objects_compare_different(self):
a = ['hi'] # Use lists to avoid interning
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_unequal_objects_compare_different(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1 != r2
def test_two_references_to_the_same_object_hash_the_same(self):
a = 'hi'
r1 = Reference(a)
r2 = Reference(a)
assert r1.__hash__() == r2.__hash__()
def test_two_references_to_equal_but_different_objects_hash_differently(self):
a = ['hi'] # Use lists to avoid interning.
b = ['hi']
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
def test_two_references_to_unequal_objects_hash_differently(self):
a = 'hi'
b = 'hi there'
r1 = Reference(a)
r2 = Reference(b)
assert r1.__hash__() != r2.__hash__()
|
|
a233f685f6cb514420fd534388d51ee92459d886
|
src/diamond/__init__.py
|
src/diamond/__init__.py
|
# coding=utf-8
import os
import sys
import string
import logging
import time
import traceback
import configobj
import socket
import re
import os
import sys
import re
import logging
import time
import datetime
import random
import urllib2
import base64
import csv
import platform
import string
import traceback
import configobj
import socket
from urlparse import urlparse
|
# coding=utf-8
"""
Diamond module init code
"""
import os
import sys
import logging
import time
import traceback
import configobj
import socket
import re
import datetime
import random
import urllib2
import base64
import csv
import platform
from urlparse import urlparse
|
Remove duplicate imports and remove entirly unused string
|
Remove duplicate imports and remove entirly unused string
|
Python
|
mit
|
szibis/Diamond,russss/Diamond,TAKEALOT/Diamond,szibis/Diamond,signalfx/Diamond,dcsquared13/Diamond,python-diamond/Diamond,eMerzh/Diamond-1,jumping/Diamond,codepython/Diamond,socialwareinc/Diamond,actmd/Diamond,cannium/Diamond,Slach/Diamond,eMerzh/Diamond-1,saucelabs/Diamond,zoidbergwill/Diamond,disqus/Diamond,bmhatfield/Diamond,Slach/Diamond,cannium/Diamond,tusharmakkar08/Diamond,actmd/Diamond,saucelabs/Diamond,TAKEALOT/Diamond,jaingaurav/Diamond,MediaMath/Diamond,Ormod/Diamond,TAKEALOT/Diamond,Ssawa/Diamond,Nihn/Diamond-1,timchenxiaoyu/Diamond,bmhatfield/Diamond,mzupan/Diamond,CYBERBUGJR/Diamond,sebbrandt87/Diamond,Basis/Diamond,Netuitive/netuitive-diamond,tellapart/Diamond,gg7/diamond,hvnsweeting/Diamond,skbkontur/Diamond,actmd/Diamond,jriguera/Diamond,MediaMath/Diamond,stuartbfox/Diamond,Precis/Diamond,MichaelDoyle/Diamond,cannium/Diamond,stuartbfox/Diamond,Netuitive/Diamond,TinLe/Diamond,russss/Diamond,CYBERBUGJR/Diamond,dcsquared13/Diamond,TinLe/Diamond,zoidbergwill/Diamond,krbaker/Diamond,Nihn/Diamond-1,signalfx/Diamond,metamx/Diamond,Basis/Diamond,rtoma/Diamond,disqus/Diamond,saucelabs/Diamond,Ormod/Diamond,bmhatfield/Diamond,Ensighten/Diamond,jaingaurav/Diamond,TAKEALOT/Diamond,datafiniti/Diamond,datafiniti/Diamond,sebbrandt87/Diamond,actmd/Diamond,ceph/Diamond,timchenxiaoyu/Diamond,Precis/Diamond,jaingaurav/Diamond,anandbhoraskar/Diamond,Ensighten/Diamond,janisz/Diamond-1,metamx/Diamond,MediaMath/Diamond,acquia/Diamond,CYBERBUGJR/Diamond,Slach/Diamond,thardie/Diamond,signalfx/Diamond,signalfx/Diamond,Precis/Diamond,tusharmakkar08/Diamond,works-mobile/Diamond,tuenti/Diamond,rtoma/Diamond,joel-airspring/Diamond,CYBERBUGJR/Diamond,Ormod/Diamond,codepython/Diamond,joel-airspring/Diamond,Basis/Diamond,krbaker/Diamond,TinLe/Diamond,mfriedenhagen/Diamond,skbkontur/Diamond,eMerzh/Diamond-1,python-diamond/Diamond,krbaker/Diamond,disqus/Diamond,anandbhoraskar/Diamond,rtoma/Diamond,hvnsweeting/Diamond,janisz/Diamond-1,skbkontur/Diamond,hvnsweeting/Diamond,tuenti/Diamond,hamelg/Diamond,szibis/Diamond,stuartbfox/Diamond,janisz/Diamond-1,rtoma/Diamond,dcsquared13/Diamond,socialwareinc/Diamond,timchenxiaoyu/Diamond,sebbrandt87/Diamond,sebbrandt87/Diamond,acquia/Diamond,krbaker/Diamond,EzyInsights/Diamond,anandbhoraskar/Diamond,jumping/Diamond,tellapart/Diamond,EzyInsights/Diamond,thardie/Diamond,gg7/diamond,cannium/Diamond,Nihn/Diamond-1,socialwareinc/Diamond,Slach/Diamond,Basis/Diamond,skbkontur/Diamond,Ensighten/Diamond,janisz/Diamond-1,EzyInsights/Diamond,python-diamond/Diamond,codepython/Diamond,Clever/Diamond,hvnsweeting/Diamond,Ssawa/Diamond,ramjothikumar/Diamond,saucelabs/Diamond,EzyInsights/Diamond,Netuitive/netuitive-diamond,Netuitive/netuitive-diamond,szibis/Diamond,socialwareinc/Diamond,joel-airspring/Diamond,Ssawa/Diamond,gg7/diamond,Ensighten/Diamond,Clever/Diamond,h00dy/Diamond,gg7/diamond,MichaelDoyle/Diamond,bmhatfield/Diamond,Netuitive/netuitive-diamond,Precis/Diamond,joel-airspring/Diamond,ramjothikumar/Diamond,Netuitive/Diamond,MichaelDoyle/Diamond,timchenxiaoyu/Diamond,mfriedenhagen/Diamond,mzupan/Diamond,anandbhoraskar/Diamond,tellapart/Diamond,ramjothikumar/Diamond,datafiniti/Diamond,hamelg/Diamond,ramjothikumar/Diamond,codepython/Diamond,thardie/Diamond,jriguera/Diamond,h00dy/Diamond,h00dy/Diamond,jriguera/Diamond,Clever/Diamond,russss/Diamond,hamelg/Diamond,zoidbergwill/Diamond,mfriedenhagen/Diamond,jaingaurav/Diamond,hamelg/Diamond,Ssawa/Diamond,thardie/Diamond,mzupan/Diamond,tellapart/Diamond,ceph/Diamond,zoidbergwill/Diamond,datafiniti/Diamond,works-mobile/Diamond,Nihn/Diamond-1,tuenti/Diamond,acquia/Diamond,tusharmakkar08/Diamond,h00dy/Diamond,TinLe/Diamond,jriguera/Diamond,tuenti/Diamond,jumping/Diamond,Netuitive/Diamond,eMerzh/Diamond-1,works-mobile/Diamond,ceph/Diamond,MichaelDoyle/Diamond,Ormod/Diamond,jumping/Diamond,tusharmakkar08/Diamond,dcsquared13/Diamond,Clever/Diamond,ceph/Diamond,stuartbfox/Diamond,acquia/Diamond,works-mobile/Diamond,mfriedenhagen/Diamond,Netuitive/Diamond,metamx/Diamond,russss/Diamond,MediaMath/Diamond,mzupan/Diamond
|
# coding=utf-8
import os
import sys
import string
import logging
import time
import traceback
import configobj
import socket
import re
import os
import sys
import re
import logging
import time
import datetime
import random
import urllib2
import base64
import csv
import platform
import string
import traceback
import configobj
import socket
from urlparse import urlparse
Remove duplicate imports and remove entirly unused string
|
# coding=utf-8
"""
Diamond module init code
"""
import os
import sys
import logging
import time
import traceback
import configobj
import socket
import re
import datetime
import random
import urllib2
import base64
import csv
import platform
from urlparse import urlparse
|
<commit_before># coding=utf-8
import os
import sys
import string
import logging
import time
import traceback
import configobj
import socket
import re
import os
import sys
import re
import logging
import time
import datetime
import random
import urllib2
import base64
import csv
import platform
import string
import traceback
import configobj
import socket
from urlparse import urlparse
<commit_msg>Remove duplicate imports and remove entirly unused string<commit_after>
|
# coding=utf-8
"""
Diamond module init code
"""
import os
import sys
import logging
import time
import traceback
import configobj
import socket
import re
import datetime
import random
import urllib2
import base64
import csv
import platform
from urlparse import urlparse
|
# coding=utf-8
import os
import sys
import string
import logging
import time
import traceback
import configobj
import socket
import re
import os
import sys
import re
import logging
import time
import datetime
import random
import urllib2
import base64
import csv
import platform
import string
import traceback
import configobj
import socket
from urlparse import urlparse
Remove duplicate imports and remove entirly unused string# coding=utf-8
"""
Diamond module init code
"""
import os
import sys
import logging
import time
import traceback
import configobj
import socket
import re
import datetime
import random
import urllib2
import base64
import csv
import platform
from urlparse import urlparse
|
<commit_before># coding=utf-8
import os
import sys
import string
import logging
import time
import traceback
import configobj
import socket
import re
import os
import sys
import re
import logging
import time
import datetime
import random
import urllib2
import base64
import csv
import platform
import string
import traceback
import configobj
import socket
from urlparse import urlparse
<commit_msg>Remove duplicate imports and remove entirly unused string<commit_after># coding=utf-8
"""
Diamond module init code
"""
import os
import sys
import logging
import time
import traceback
import configobj
import socket
import re
import datetime
import random
import urllib2
import base64
import csv
import platform
from urlparse import urlparse
|
ce1f05c9943b365e35758502a38122ffe02c0d85
|
prefix_sums/min_slice.py
|
prefix_sums/min_slice.py
|
# It is required to find the position of a slice with min avg in the
# numerical sequence. There is a mathematical proof for such problem
# where only slices of 2 and 3 elements are taken into account.
# Having that, solution become really simple.
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i - 1]
return result
def solution(A):
pref = get_prefix_sum(A)
min_i = 0
min_slice = float(A[0] + A[1])/2.0
for i in xrange(len(A)):
if i < len(A) - 2:
slice_3_avg = float(pref[i + 3] - pref[i])/3.0
if slice_3_avg < min_slice:
min_slice = slice_3_avg
min_i = i
if i< len(A) - 1:
slice_2_avg = float(pref[i + 2] - pref[i])/2.0
if slice_2_avg < min_slice:
min_slice = slice_2_avg
min_i = i
return min_i
|
Add min avg slice solution.
|
Add min avg slice solution.
|
Python
|
apache-2.0
|
isendel/algorithms
|
Add min avg slice solution.
|
# It is required to find the position of a slice with min avg in the
# numerical sequence. There is a mathematical proof for such problem
# where only slices of 2 and 3 elements are taken into account.
# Having that, solution become really simple.
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i - 1]
return result
def solution(A):
pref = get_prefix_sum(A)
min_i = 0
min_slice = float(A[0] + A[1])/2.0
for i in xrange(len(A)):
if i < len(A) - 2:
slice_3_avg = float(pref[i + 3] - pref[i])/3.0
if slice_3_avg < min_slice:
min_slice = slice_3_avg
min_i = i
if i< len(A) - 1:
slice_2_avg = float(pref[i + 2] - pref[i])/2.0
if slice_2_avg < min_slice:
min_slice = slice_2_avg
min_i = i
return min_i
|
<commit_before><commit_msg>Add min avg slice solution.<commit_after>
|
# It is required to find the position of a slice with min avg in the
# numerical sequence. There is a mathematical proof for such problem
# where only slices of 2 and 3 elements are taken into account.
# Having that, solution become really simple.
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i - 1]
return result
def solution(A):
pref = get_prefix_sum(A)
min_i = 0
min_slice = float(A[0] + A[1])/2.0
for i in xrange(len(A)):
if i < len(A) - 2:
slice_3_avg = float(pref[i + 3] - pref[i])/3.0
if slice_3_avg < min_slice:
min_slice = slice_3_avg
min_i = i
if i< len(A) - 1:
slice_2_avg = float(pref[i + 2] - pref[i])/2.0
if slice_2_avg < min_slice:
min_slice = slice_2_avg
min_i = i
return min_i
|
Add min avg slice solution.# It is required to find the position of a slice with min avg in the
# numerical sequence. There is a mathematical proof for such problem
# where only slices of 2 and 3 elements are taken into account.
# Having that, solution become really simple.
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i - 1]
return result
def solution(A):
pref = get_prefix_sum(A)
min_i = 0
min_slice = float(A[0] + A[1])/2.0
for i in xrange(len(A)):
if i < len(A) - 2:
slice_3_avg = float(pref[i + 3] - pref[i])/3.0
if slice_3_avg < min_slice:
min_slice = slice_3_avg
min_i = i
if i< len(A) - 1:
slice_2_avg = float(pref[i + 2] - pref[i])/2.0
if slice_2_avg < min_slice:
min_slice = slice_2_avg
min_i = i
return min_i
|
<commit_before><commit_msg>Add min avg slice solution.<commit_after># It is required to find the position of a slice with min avg in the
# numerical sequence. There is a mathematical proof for such problem
# where only slices of 2 and 3 elements are taken into account.
# Having that, solution become really simple.
def get_prefix_sum(A):
result = [0] * (len(A) + 1)
for i in xrange(1, len(A) + 1):
result[i] = result[i - 1] + A[i - 1]
return result
def solution(A):
pref = get_prefix_sum(A)
min_i = 0
min_slice = float(A[0] + A[1])/2.0
for i in xrange(len(A)):
if i < len(A) - 2:
slice_3_avg = float(pref[i + 3] - pref[i])/3.0
if slice_3_avg < min_slice:
min_slice = slice_3_avg
min_i = i
if i< len(A) - 1:
slice_2_avg = float(pref[i + 2] - pref[i])/2.0
if slice_2_avg < min_slice:
min_slice = slice_2_avg
min_i = i
return min_i
|
|
05864894b876a679726f1063832f70cfecb4325e
|
py/print-binary-tree.py
|
py/print-binary-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
q = [(root, 1, 0)]
for v, d, c in q:
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
maxd = q[-1][1]
table = [[""] * ((1 << maxd) - 1) for _ in xrange(maxd)]
for v, d, c in q:
x = d - 1
y = (1 << (maxd - 1)) - 1
for i in xrange(d - 1):
if c & (1 << i):
y += (1 << (i + (maxd - d)))
else:
y -= (1 << (i + (maxd - d)))
table[x][y] = str(v.val)
return table
|
Add py solution for 655. Print Binary Tree
|
Add py solution for 655. Print Binary Tree
655. Print Binary Tree: https://leetcode.com/problems/print-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 655. Print Binary Tree
655. Print Binary Tree: https://leetcode.com/problems/print-binary-tree/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
q = [(root, 1, 0)]
for v, d, c in q:
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
maxd = q[-1][1]
table = [[""] * ((1 << maxd) - 1) for _ in xrange(maxd)]
for v, d, c in q:
x = d - 1
y = (1 << (maxd - 1)) - 1
for i in xrange(d - 1):
if c & (1 << i):
y += (1 << (i + (maxd - d)))
else:
y -= (1 << (i + (maxd - d)))
table[x][y] = str(v.val)
return table
|
<commit_before><commit_msg>Add py solution for 655. Print Binary Tree
655. Print Binary Tree: https://leetcode.com/problems/print-binary-tree/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
q = [(root, 1, 0)]
for v, d, c in q:
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
maxd = q[-1][1]
table = [[""] * ((1 << maxd) - 1) for _ in xrange(maxd)]
for v, d, c in q:
x = d - 1
y = (1 << (maxd - 1)) - 1
for i in xrange(d - 1):
if c & (1 << i):
y += (1 << (i + (maxd - d)))
else:
y -= (1 << (i + (maxd - d)))
table[x][y] = str(v.val)
return table
|
Add py solution for 655. Print Binary Tree
655. Print Binary Tree: https://leetcode.com/problems/print-binary-tree/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
q = [(root, 1, 0)]
for v, d, c in q:
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
maxd = q[-1][1]
table = [[""] * ((1 << maxd) - 1) for _ in xrange(maxd)]
for v, d, c in q:
x = d - 1
y = (1 << (maxd - 1)) - 1
for i in xrange(d - 1):
if c & (1 << i):
y += (1 << (i + (maxd - d)))
else:
y -= (1 << (i + (maxd - d)))
table[x][y] = str(v.val)
return table
|
<commit_before><commit_msg>Add py solution for 655. Print Binary Tree
655. Print Binary Tree: https://leetcode.com/problems/print-binary-tree/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def printTree(self, root):
"""
:type root: TreeNode
:rtype: List[List[str]]
"""
q = [(root, 1, 0)]
for v, d, c in q:
if v.left:
q.append((v.left, d + 1, c << 1))
if v.right:
q.append((v.right, d + 1, (c << 1) | 1))
maxd = q[-1][1]
table = [[""] * ((1 << maxd) - 1) for _ in xrange(maxd)]
for v, d, c in q:
x = d - 1
y = (1 << (maxd - 1)) - 1
for i in xrange(d - 1):
if c & (1 << i):
y += (1 << (i + (maxd - d)))
else:
y -= (1 << (i + (maxd - d)))
table[x][y] = str(v.val)
return table
|
|
9645b58b73e689efafd50ee2681beb813f3e410d
|
tests/test_complex_dtypes.py
|
tests/test_complex_dtypes.py
|
import logging
import sys
import uuid
import numpy as np
import pytest
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.fixture(scope='function')
def tempfile():
"""A temporary filename in the GDAL '/vsimem' filesystem"""
return '/vsimem/{}'.format(uuid.uuid4())
def complex_image(height, width, dtype):
"""An array with sequential elements"""
return np.array(
[complex(x, x) for x in range(height * width)],
dtype=dtype).reshape(height, width)
dtypes = ['complex', 'complex64', 'complex128']
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("height,width", [(20, 20)])
def test_read_array(tempfile, dtype, height, width):
"""_io functions read and write arrays correctly"""
in_img = complex_image(height, width, dtype)
with rasterio.open(tempfile, 'w', driver='GTiff', dtype=dtype,
height=height, width=width, count=1) as dataset:
dataset.write(in_img, 1)
out_img = dataset.read(1)
assert (in_img == out_img).all()
|
Add test of complex types
|
Add test of complex types
|
Python
|
bsd-3-clause
|
brendan-ward/rasterio,brendan-ward/rasterio,brendan-ward/rasterio
|
Add test of complex types
|
import logging
import sys
import uuid
import numpy as np
import pytest
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.fixture(scope='function')
def tempfile():
"""A temporary filename in the GDAL '/vsimem' filesystem"""
return '/vsimem/{}'.format(uuid.uuid4())
def complex_image(height, width, dtype):
"""An array with sequential elements"""
return np.array(
[complex(x, x) for x in range(height * width)],
dtype=dtype).reshape(height, width)
dtypes = ['complex', 'complex64', 'complex128']
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("height,width", [(20, 20)])
def test_read_array(tempfile, dtype, height, width):
"""_io functions read and write arrays correctly"""
in_img = complex_image(height, width, dtype)
with rasterio.open(tempfile, 'w', driver='GTiff', dtype=dtype,
height=height, width=width, count=1) as dataset:
dataset.write(in_img, 1)
out_img = dataset.read(1)
assert (in_img == out_img).all()
|
<commit_before><commit_msg>Add test of complex types<commit_after>
|
import logging
import sys
import uuid
import numpy as np
import pytest
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.fixture(scope='function')
def tempfile():
"""A temporary filename in the GDAL '/vsimem' filesystem"""
return '/vsimem/{}'.format(uuid.uuid4())
def complex_image(height, width, dtype):
"""An array with sequential elements"""
return np.array(
[complex(x, x) for x in range(height * width)],
dtype=dtype).reshape(height, width)
dtypes = ['complex', 'complex64', 'complex128']
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("height,width", [(20, 20)])
def test_read_array(tempfile, dtype, height, width):
"""_io functions read and write arrays correctly"""
in_img = complex_image(height, width, dtype)
with rasterio.open(tempfile, 'w', driver='GTiff', dtype=dtype,
height=height, width=width, count=1) as dataset:
dataset.write(in_img, 1)
out_img = dataset.read(1)
assert (in_img == out_img).all()
|
Add test of complex typesimport logging
import sys
import uuid
import numpy as np
import pytest
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.fixture(scope='function')
def tempfile():
"""A temporary filename in the GDAL '/vsimem' filesystem"""
return '/vsimem/{}'.format(uuid.uuid4())
def complex_image(height, width, dtype):
"""An array with sequential elements"""
return np.array(
[complex(x, x) for x in range(height * width)],
dtype=dtype).reshape(height, width)
dtypes = ['complex', 'complex64', 'complex128']
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("height,width", [(20, 20)])
def test_read_array(tempfile, dtype, height, width):
"""_io functions read and write arrays correctly"""
in_img = complex_image(height, width, dtype)
with rasterio.open(tempfile, 'w', driver='GTiff', dtype=dtype,
height=height, width=width, count=1) as dataset:
dataset.write(in_img, 1)
out_img = dataset.read(1)
assert (in_img == out_img).all()
|
<commit_before><commit_msg>Add test of complex types<commit_after>import logging
import sys
import uuid
import numpy as np
import pytest
import rasterio
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
@pytest.fixture(scope='function')
def tempfile():
"""A temporary filename in the GDAL '/vsimem' filesystem"""
return '/vsimem/{}'.format(uuid.uuid4())
def complex_image(height, width, dtype):
"""An array with sequential elements"""
return np.array(
[complex(x, x) for x in range(height * width)],
dtype=dtype).reshape(height, width)
dtypes = ['complex', 'complex64', 'complex128']
@pytest.mark.parametrize("dtype", dtypes)
@pytest.mark.parametrize("height,width", [(20, 20)])
def test_read_array(tempfile, dtype, height, width):
"""_io functions read and write arrays correctly"""
in_img = complex_image(height, width, dtype)
with rasterio.open(tempfile, 'w', driver='GTiff', dtype=dtype,
height=height, width=width, count=1) as dataset:
dataset.write(in_img, 1)
out_img = dataset.read(1)
assert (in_img == out_img).all()
|
|
f888799f4c88952f3ee77578fc1aface8eb4b067
|
olympiad/bestfit.py
|
olympiad/bestfit.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Best fit algorithm implementation in Python as solution for problem C1.
# Reads the size of each bin, the amount of numbers and the numbers from
# the standard input and calculates the least amount of bins needed
# to pack these numbers in a bin of the given size.
if __name__ == "__main__":
b, n = [int(input()) for x in range(0, 2)]
ws = sorted([int(input()) for x in range(0, n)], reverse=True)
bins = []
for w in ws:
for x in bins:
if sum(x) + w <= b:
x.append(w)
w = None
break
if w != None:
bins.append([w])
bins = [bins[index] for index, x in sorted(enumerate(map(sum, bins)), key=lambda k: k[1], reverse=True)]
print(len(bins))
|
Add solution for problem C1
|
Add solution for problem C1
|
Python
|
apache-2.0
|
fabianm/olympiad,fabianm/olympiad,fabianm/olympiad
|
Add solution for problem C1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Best fit algorithm implementation in Python as solution for problem C1.
# Reads the size of each bin, the amount of numbers and the numbers from
# the standard input and calculates the least amount of bins needed
# to pack these numbers in a bin of the given size.
if __name__ == "__main__":
b, n = [int(input()) for x in range(0, 2)]
ws = sorted([int(input()) for x in range(0, n)], reverse=True)
bins = []
for w in ws:
for x in bins:
if sum(x) + w <= b:
x.append(w)
w = None
break
if w != None:
bins.append([w])
bins = [bins[index] for index, x in sorted(enumerate(map(sum, bins)), key=lambda k: k[1], reverse=True)]
print(len(bins))
|
<commit_before><commit_msg>Add solution for problem C1<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Best fit algorithm implementation in Python as solution for problem C1.
# Reads the size of each bin, the amount of numbers and the numbers from
# the standard input and calculates the least amount of bins needed
# to pack these numbers in a bin of the given size.
if __name__ == "__main__":
b, n = [int(input()) for x in range(0, 2)]
ws = sorted([int(input()) for x in range(0, n)], reverse=True)
bins = []
for w in ws:
for x in bins:
if sum(x) + w <= b:
x.append(w)
w = None
break
if w != None:
bins.append([w])
bins = [bins[index] for index, x in sorted(enumerate(map(sum, bins)), key=lambda k: k[1], reverse=True)]
print(len(bins))
|
Add solution for problem C1#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Best fit algorithm implementation in Python as solution for problem C1.
# Reads the size of each bin, the amount of numbers and the numbers from
# the standard input and calculates the least amount of bins needed
# to pack these numbers in a bin of the given size.
if __name__ == "__main__":
b, n = [int(input()) for x in range(0, 2)]
ws = sorted([int(input()) for x in range(0, n)], reverse=True)
bins = []
for w in ws:
for x in bins:
if sum(x) + w <= b:
x.append(w)
w = None
break
if w != None:
bins.append([w])
bins = [bins[index] for index, x in sorted(enumerate(map(sum, bins)), key=lambda k: k[1], reverse=True)]
print(len(bins))
|
<commit_before><commit_msg>Add solution for problem C1<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Best fit algorithm implementation in Python as solution for problem C1.
# Reads the size of each bin, the amount of numbers and the numbers from
# the standard input and calculates the least amount of bins needed
# to pack these numbers in a bin of the given size.
if __name__ == "__main__":
b, n = [int(input()) for x in range(0, 2)]
ws = sorted([int(input()) for x in range(0, n)], reverse=True)
bins = []
for w in ws:
for x in bins:
if sum(x) + w <= b:
x.append(w)
w = None
break
if w != None:
bins.append([w])
bins = [bins[index] for index, x in sorted(enumerate(map(sum, bins)), key=lambda k: k[1], reverse=True)]
print(len(bins))
|
|
726cae5bfbb4a53ba979babc87e681b19562112e
|
lib/reinteract/format_escaped.py
|
lib/reinteract/format_escaped.py
|
#
# Very, very partial implementation of g_markup_printf_escaped(). Doesn't
# handling things like a %c with an integer argument that evaluates to
# a markup special character, or special characters in the repr() or str()
# of an object. It also doesn't handle %(name)s type arguments with
# keyword arguments.
#
# To do better at escaping everything, you'd probably want to apply the
# implementation technique of g_markup_printf_escaped(). The main difficulty
# of that is that you need then to be able to split the format string into
# format specifiers and other sections, which means
# a big regular expression encoding the format specifers defined by
# http://docs.python.org/lib/typesseq-strings.html
#
from gobject import markup_escape_text
def _escape(o):
if isinstance(o, basestring):
return markup_escape_text(o)
else:
return o
def format_escaped(fmt, *args):
return fmt % tuple((_escape(x) for x in args))
if __name__ == '__main__':
assert format_escaped("%s %.4f", "&foo", 4.3) == "&foo 4.3000"
|
Add a simple implementation of functionality like g_markup_printf_escaped()
|
Add a simple implementation of functionality like
g_markup_printf_escaped()
|
Python
|
bsd-2-clause
|
rschroll/reinteract,johnrizzo1/reinteract,jbaayen/reinteract,alexey4petrov/reinteract,alexey4petrov/reinteract,rschroll/reinteract,alexey4petrov/reinteract,jbaayen/reinteract,rschroll/reinteract,jbaayen/reinteract,johnrizzo1/reinteract,johnrizzo1/reinteract
|
Add a simple implementation of functionality like
g_markup_printf_escaped()
|
#
# Very, very partial implementation of g_markup_printf_escaped(). Doesn't
# handling things like a %c with an integer argument that evaluates to
# a markup special character, or special characters in the repr() or str()
# of an object. It also doesn't handle %(name)s type arguments with
# keyword arguments.
#
# To do better at escaping everything, you'd probably want to apply the
# implementation technique of g_markup_printf_escaped(). The main difficulty
# of that is that you need then to be able to split the format string into
# format specifiers and other sections, which means
# a big regular expression encoding the format specifers defined by
# http://docs.python.org/lib/typesseq-strings.html
#
from gobject import markup_escape_text
def _escape(o):
if isinstance(o, basestring):
return markup_escape_text(o)
else:
return o
def format_escaped(fmt, *args):
return fmt % tuple((_escape(x) for x in args))
if __name__ == '__main__':
assert format_escaped("%s %.4f", "&foo", 4.3) == "&foo 4.3000"
|
<commit_before><commit_msg>Add a simple implementation of functionality like
g_markup_printf_escaped()<commit_after>
|
#
# Very, very partial implementation of g_markup_printf_escaped(). Doesn't
# handling things like a %c with an integer argument that evaluates to
# a markup special character, or special characters in the repr() or str()
# of an object. It also doesn't handle %(name)s type arguments with
# keyword arguments.
#
# To do better at escaping everything, you'd probably want to apply the
# implementation technique of g_markup_printf_escaped(). The main difficulty
# of that is that you need then to be able to split the format string into
# format specifiers and other sections, which means
# a big regular expression encoding the format specifers defined by
# http://docs.python.org/lib/typesseq-strings.html
#
from gobject import markup_escape_text
def _escape(o):
if isinstance(o, basestring):
return markup_escape_text(o)
else:
return o
def format_escaped(fmt, *args):
return fmt % tuple((_escape(x) for x in args))
if __name__ == '__main__':
assert format_escaped("%s %.4f", "&foo", 4.3) == "&foo 4.3000"
|
Add a simple implementation of functionality like
g_markup_printf_escaped()#
# Very, very partial implementation of g_markup_printf_escaped(). Doesn't
# handling things like a %c with an integer argument that evaluates to
# a markup special character, or special characters in the repr() or str()
# of an object. It also doesn't handle %(name)s type arguments with
# keyword arguments.
#
# To do better at escaping everything, you'd probably want to apply the
# implementation technique of g_markup_printf_escaped(). The main difficulty
# of that is that you need then to be able to split the format string into
# format specifiers and other sections, which means
# a big regular expression encoding the format specifers defined by
# http://docs.python.org/lib/typesseq-strings.html
#
from gobject import markup_escape_text
def _escape(o):
if isinstance(o, basestring):
return markup_escape_text(o)
else:
return o
def format_escaped(fmt, *args):
return fmt % tuple((_escape(x) for x in args))
if __name__ == '__main__':
assert format_escaped("%s %.4f", "&foo", 4.3) == "&foo 4.3000"
|
<commit_before><commit_msg>Add a simple implementation of functionality like
g_markup_printf_escaped()<commit_after>#
# Very, very partial implementation of g_markup_printf_escaped(). Doesn't
# handling things like a %c with an integer argument that evaluates to
# a markup special character, or special characters in the repr() or str()
# of an object. It also doesn't handle %(name)s type arguments with
# keyword arguments.
#
# To do better at escaping everything, you'd probably want to apply the
# implementation technique of g_markup_printf_escaped(). The main difficulty
# of that is that you need then to be able to split the format string into
# format specifiers and other sections, which means
# a big regular expression encoding the format specifers defined by
# http://docs.python.org/lib/typesseq-strings.html
#
from gobject import markup_escape_text
def _escape(o):
if isinstance(o, basestring):
return markup_escape_text(o)
else:
return o
def format_escaped(fmt, *args):
return fmt % tuple((_escape(x) for x in args))
if __name__ == '__main__':
assert format_escaped("%s %.4f", "&foo", 4.3) == "&foo 4.3000"
|
|
fbace843f6febaea28f35511035c8c997ba0fdab
|
queues/priority_queue.py
|
queues/priority_queue.py
|
"""
Implementation of priority queue
"""
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return str(self.data) + ": " + str(self.priority)
class PriorityQueue:
def __init__(self):
self.priority_queue_list = []
def size(self):
return len(self.priority_queue_list)
def insert(self, node):
# if queue is empty
if self.size() == 0:
self.priority_queue_list.append(node)
else:
# traverse the queue to find the right place for new node
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
# if we have traversed the complete queue
if index == self.size() - 1:
# add new node at the end
self.priority_queue_list.insert(index + 1, node)
else:
continue
else:
self.priority_queue_list.insert(index, node)
return True
def delete(self):
# remove the first node from the queue
return self.priority_queue_list.pop(0)
|
Add priority queue implementation using lists
|
Add priority queue implementation using lists
|
Python
|
mit
|
keon/algorithms,amaozhao/algorithms
|
Add priority queue implementation using lists
|
"""
Implementation of priority queue
"""
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return str(self.data) + ": " + str(self.priority)
class PriorityQueue:
def __init__(self):
self.priority_queue_list = []
def size(self):
return len(self.priority_queue_list)
def insert(self, node):
# if queue is empty
if self.size() == 0:
self.priority_queue_list.append(node)
else:
# traverse the queue to find the right place for new node
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
# if we have traversed the complete queue
if index == self.size() - 1:
# add new node at the end
self.priority_queue_list.insert(index + 1, node)
else:
continue
else:
self.priority_queue_list.insert(index, node)
return True
def delete(self):
# remove the first node from the queue
return self.priority_queue_list.pop(0)
|
<commit_before><commit_msg>Add priority queue implementation using lists<commit_after>
|
"""
Implementation of priority queue
"""
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return str(self.data) + ": " + str(self.priority)
class PriorityQueue:
def __init__(self):
self.priority_queue_list = []
def size(self):
return len(self.priority_queue_list)
def insert(self, node):
# if queue is empty
if self.size() == 0:
self.priority_queue_list.append(node)
else:
# traverse the queue to find the right place for new node
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
# if we have traversed the complete queue
if index == self.size() - 1:
# add new node at the end
self.priority_queue_list.insert(index + 1, node)
else:
continue
else:
self.priority_queue_list.insert(index, node)
return True
def delete(self):
# remove the first node from the queue
return self.priority_queue_list.pop(0)
|
Add priority queue implementation using lists"""
Implementation of priority queue
"""
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return str(self.data) + ": " + str(self.priority)
class PriorityQueue:
def __init__(self):
self.priority_queue_list = []
def size(self):
return len(self.priority_queue_list)
def insert(self, node):
# if queue is empty
if self.size() == 0:
self.priority_queue_list.append(node)
else:
# traverse the queue to find the right place for new node
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
# if we have traversed the complete queue
if index == self.size() - 1:
# add new node at the end
self.priority_queue_list.insert(index + 1, node)
else:
continue
else:
self.priority_queue_list.insert(index, node)
return True
def delete(self):
# remove the first node from the queue
return self.priority_queue_list.pop(0)
|
<commit_before><commit_msg>Add priority queue implementation using lists<commit_after>"""
Implementation of priority queue
"""
class PriorityQueueNode:
def __init__(self, data, priority):
self.data = data
self.priority = priority
def __repr__(self):
return str(self.data) + ": " + str(self.priority)
class PriorityQueue:
def __init__(self):
self.priority_queue_list = []
def size(self):
return len(self.priority_queue_list)
def insert(self, node):
# if queue is empty
if self.size() == 0:
self.priority_queue_list.append(node)
else:
# traverse the queue to find the right place for new node
for index, current in enumerate(self.priority_queue_list):
if current.priority < node.priority:
# if we have traversed the complete queue
if index == self.size() - 1:
# add new node at the end
self.priority_queue_list.insert(index + 1, node)
else:
continue
else:
self.priority_queue_list.insert(index, node)
return True
def delete(self):
# remove the first node from the queue
return self.priority_queue_list.pop(0)
|
|
d662bf8ddcdd78b6cc75ea7b6731f5ac2379645d
|
rflink1.py
|
rflink1.py
|
import serial
import time
import logging
import re
logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def readlineCR(port):
rv = ""
while True:
ch = port.read().decode()
rv += ch
if ch=='\r':
rv = rv.strip('\r').strip('\n')
return rv
def sendData(data,port):
data="10;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
time.sleep(1)
print("Data Received back:" + repr(readlineCR(port)))
logging.debug(repr(rcv))
def echoData(data,port)
data="11;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
def decodedata(data):
data=re.split(';',data)
print("Third item in list is " + data[2])
print("Forth item in list is " + data[3])
print("Fifth item in list is " + data[4])
print("Sixth item in list is " + data[5])
if data[2]=='DEBUG':
logging.debug(repr(rcv))
port = serial.Serial("/dev/ttyACM0", baudrate=57600, timeout=3.0)
time.sleep(2) # delay for 2 seconds
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
sendData('REBOOT',port)
time.sleep(2)
sendData('RFUDEBUG=ON',port)
#sendData('RFDEBUG=OFF',port)
sendData('VERSION',port)
#sendData('PING',port)
#sendData('RTS;0f303f;0;OFF',port)
#sendData('RTS;0fb0bf;0;OFF',port)
#sendData('RTS;0f707f;0;OFF',port)
#sendData('RTS;0f717f;0;OFF',port)
#sendData('RTS;0ff0ff;0;OFF',port)
#sendData('RTS;077880;0;OFF',port)
#sendData('Byron;112233;02;OFF',
while True:
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
decodedata(repr(rcv))
|
Test python script to get data from Rflink.
|
Test python script to get data from Rflink.
|
Python
|
apache-2.0
|
matt2005/pyrflink
|
Test python script to get data from Rflink.
|
import serial
import time
import logging
import re
logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def readlineCR(port):
rv = ""
while True:
ch = port.read().decode()
rv += ch
if ch=='\r':
rv = rv.strip('\r').strip('\n')
return rv
def sendData(data,port):
data="10;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
time.sleep(1)
print("Data Received back:" + repr(readlineCR(port)))
logging.debug(repr(rcv))
def echoData(data,port)
data="11;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
def decodedata(data):
data=re.split(';',data)
print("Third item in list is " + data[2])
print("Forth item in list is " + data[3])
print("Fifth item in list is " + data[4])
print("Sixth item in list is " + data[5])
if data[2]=='DEBUG':
logging.debug(repr(rcv))
port = serial.Serial("/dev/ttyACM0", baudrate=57600, timeout=3.0)
time.sleep(2) # delay for 2 seconds
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
sendData('REBOOT',port)
time.sleep(2)
sendData('RFUDEBUG=ON',port)
#sendData('RFDEBUG=OFF',port)
sendData('VERSION',port)
#sendData('PING',port)
#sendData('RTS;0f303f;0;OFF',port)
#sendData('RTS;0fb0bf;0;OFF',port)
#sendData('RTS;0f707f;0;OFF',port)
#sendData('RTS;0f717f;0;OFF',port)
#sendData('RTS;0ff0ff;0;OFF',port)
#sendData('RTS;077880;0;OFF',port)
#sendData('Byron;112233;02;OFF',
while True:
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
decodedata(repr(rcv))
|
<commit_before><commit_msg>Test python script to get data from Rflink.<commit_after>
|
import serial
import time
import logging
import re
logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def readlineCR(port):
rv = ""
while True:
ch = port.read().decode()
rv += ch
if ch=='\r':
rv = rv.strip('\r').strip('\n')
return rv
def sendData(data,port):
data="10;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
time.sleep(1)
print("Data Received back:" + repr(readlineCR(port)))
logging.debug(repr(rcv))
def echoData(data,port)
data="11;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
def decodedata(data):
data=re.split(';',data)
print("Third item in list is " + data[2])
print("Forth item in list is " + data[3])
print("Fifth item in list is " + data[4])
print("Sixth item in list is " + data[5])
if data[2]=='DEBUG':
logging.debug(repr(rcv))
port = serial.Serial("/dev/ttyACM0", baudrate=57600, timeout=3.0)
time.sleep(2) # delay for 2 seconds
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
sendData('REBOOT',port)
time.sleep(2)
sendData('RFUDEBUG=ON',port)
#sendData('RFDEBUG=OFF',port)
sendData('VERSION',port)
#sendData('PING',port)
#sendData('RTS;0f303f;0;OFF',port)
#sendData('RTS;0fb0bf;0;OFF',port)
#sendData('RTS;0f707f;0;OFF',port)
#sendData('RTS;0f717f;0;OFF',port)
#sendData('RTS;0ff0ff;0;OFF',port)
#sendData('RTS;077880;0;OFF',port)
#sendData('Byron;112233;02;OFF',
while True:
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
decodedata(repr(rcv))
|
Test python script to get data from Rflink.import serial
import time
import logging
import re
logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def readlineCR(port):
rv = ""
while True:
ch = port.read().decode()
rv += ch
if ch=='\r':
rv = rv.strip('\r').strip('\n')
return rv
def sendData(data,port):
data="10;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
time.sleep(1)
print("Data Received back:" + repr(readlineCR(port)))
logging.debug(repr(rcv))
def echoData(data,port)
data="11;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
def decodedata(data):
data=re.split(';',data)
print("Third item in list is " + data[2])
print("Forth item in list is " + data[3])
print("Fifth item in list is " + data[4])
print("Sixth item in list is " + data[5])
if data[2]=='DEBUG':
logging.debug(repr(rcv))
port = serial.Serial("/dev/ttyACM0", baudrate=57600, timeout=3.0)
time.sleep(2) # delay for 2 seconds
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
sendData('REBOOT',port)
time.sleep(2)
sendData('RFUDEBUG=ON',port)
#sendData('RFDEBUG=OFF',port)
sendData('VERSION',port)
#sendData('PING',port)
#sendData('RTS;0f303f;0;OFF',port)
#sendData('RTS;0fb0bf;0;OFF',port)
#sendData('RTS;0f707f;0;OFF',port)
#sendData('RTS;0f717f;0;OFF',port)
#sendData('RTS;0ff0ff;0;OFF',port)
#sendData('RTS;077880;0;OFF',port)
#sendData('Byron;112233;02;OFF',
while True:
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
decodedata(repr(rcv))
|
<commit_before><commit_msg>Test python script to get data from Rflink.<commit_after>import serial
import time
import logging
import re
logging.basicConfig(filename='debug.log',level=logging.DEBUG)
def readlineCR(port):
rv = ""
while True:
ch = port.read().decode()
rv += ch
if ch=='\r':
rv = rv.strip('\r').strip('\n')
return rv
def sendData(data,port):
data="10;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
time.sleep(1)
print("Data Received back:" + repr(readlineCR(port)))
logging.debug(repr(rcv))
def echoData(data,port)
data="11;"+data+";\r\n"
print("Data Sent:" + data.strip('\r').strip('\n'))
port.write(data.encode())
def decodedata(data):
data=re.split(';',data)
print("Third item in list is " + data[2])
print("Forth item in list is " + data[3])
print("Fifth item in list is " + data[4])
print("Sixth item in list is " + data[5])
if data[2]=='DEBUG':
logging.debug(repr(rcv))
port = serial.Serial("/dev/ttyACM0", baudrate=57600, timeout=3.0)
time.sleep(2) # delay for 2 seconds
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
sendData('REBOOT',port)
time.sleep(2)
sendData('RFUDEBUG=ON',port)
#sendData('RFDEBUG=OFF',port)
sendData('VERSION',port)
#sendData('PING',port)
#sendData('RTS;0f303f;0;OFF',port)
#sendData('RTS;0fb0bf;0;OFF',port)
#sendData('RTS;0f707f;0;OFF',port)
#sendData('RTS;0f717f;0;OFF',port)
#sendData('RTS;0ff0ff;0;OFF',port)
#sendData('RTS;077880;0;OFF',port)
#sendData('Byron;112233;02;OFF',
while True:
rcv = readlineCR(port)
print("Data Received:" + repr(rcv))
decodedata(repr(rcv))
|
|
4e0af69b8803e798fe1a90ea8c261cbcd85d149d
|
languages/python/talking-clock.py
|
languages/python/talking-clock.py
|
#!/usr/bin/env python3
#
# Talking clock, using my Google Home device as a media player.
#
import pychromecast, uuid, time, datetime
#
# Get list of all Chromecast devices.
# The local computer must be in the same Wi-Fi network.
#
def print_all_chromecasts():
all_cast_devices = pychromecast.get_chromecasts()
print("All Chromecast devices:", all_cast_devices)
#
# Find media controller by unique ID.
#
def find_media_controller(my_uuid):
all_cast_devices = pychromecast.get_chromecasts()
my_cast_device = next(x for x in all_cast_devices if str(x.device.uuid) == my_uuid)
return my_cast_device.media_controller
#
# Open media controller with given IP address, port and unique ID.
#
def open_media_controller(my_uuid, my_ip_addr, my_port):
my_device = pychromecast.DeviceStatus(friendly_name = 'Abra',
model_name = 'Cadabra',
manufacturer = 'Unknown',
uuid = uuid.UUID(my_uuid),
cast_type = 'cast')
my_cast_device = pychromecast.Chromecast(my_ip_addr,
port = my_port,
device = my_device)
print("My Chromecast device:", my_cast_device)
return my_cast_device.media_controller
#
# Play a video on my Chromecast device.
#
def play_media(mc, url, media_type):
print("Playing media:", url)
mc.play_media(url, content_type = media_type)
mc.block_until_active()
mc.play()
# Wait until done.
while True:
time.sleep(1)
mc.update_status()
if mc.status.player_is_idle:
break
#
# Get time.
#
now = datetime.datetime.today()
hour = now.hour
minute = now.minute
print(f"Time: {hour}:{minute}")
#
# When the time is exact, voice it.
#
if minute == 0:
# Open my media controller.
mc = open_media_controller("c854c4ea-517a-29cf-e083-1e63659ea1c6", "192.168.86.75", 8009)
# Play a chime, to get attention.
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/service-login.oga", "application/ogg")
# Speak the hour.
filename = f"{hour:02d}_ru.mp3"
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/" + filename, "audio/mpeg")
|
Add talking clock via Chromecast.
|
Add talking clock via Chromecast.
|
Python
|
apache-2.0
|
sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource,sergev/vak-opensource
|
Add talking clock via Chromecast.
|
#!/usr/bin/env python3
#
# Talking clock, using my Google Home device as a media player.
#
import pychromecast, uuid, time, datetime
#
# Get list of all Chromecast devices.
# The local computer must be in the same Wi-Fi network.
#
def print_all_chromecasts():
all_cast_devices = pychromecast.get_chromecasts()
print("All Chromecast devices:", all_cast_devices)
#
# Find media controller by unique ID.
#
def find_media_controller(my_uuid):
all_cast_devices = pychromecast.get_chromecasts()
my_cast_device = next(x for x in all_cast_devices if str(x.device.uuid) == my_uuid)
return my_cast_device.media_controller
#
# Open media controller with given IP address, port and unique ID.
#
def open_media_controller(my_uuid, my_ip_addr, my_port):
my_device = pychromecast.DeviceStatus(friendly_name = 'Abra',
model_name = 'Cadabra',
manufacturer = 'Unknown',
uuid = uuid.UUID(my_uuid),
cast_type = 'cast')
my_cast_device = pychromecast.Chromecast(my_ip_addr,
port = my_port,
device = my_device)
print("My Chromecast device:", my_cast_device)
return my_cast_device.media_controller
#
# Play a video on my Chromecast device.
#
def play_media(mc, url, media_type):
print("Playing media:", url)
mc.play_media(url, content_type = media_type)
mc.block_until_active()
mc.play()
# Wait until done.
while True:
time.sleep(1)
mc.update_status()
if mc.status.player_is_idle:
break
#
# Get time.
#
now = datetime.datetime.today()
hour = now.hour
minute = now.minute
print(f"Time: {hour}:{minute}")
#
# When the time is exact, voice it.
#
if minute == 0:
# Open my media controller.
mc = open_media_controller("c854c4ea-517a-29cf-e083-1e63659ea1c6", "192.168.86.75", 8009)
# Play a chime, to get attention.
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/service-login.oga", "application/ogg")
# Speak the hour.
filename = f"{hour:02d}_ru.mp3"
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/" + filename, "audio/mpeg")
|
<commit_before><commit_msg>Add talking clock via Chromecast.<commit_after>
|
#!/usr/bin/env python3
#
# Talking clock, using my Google Home device as a media player.
#
import pychromecast, uuid, time, datetime
#
# Get list of all Chromecast devices.
# The local computer must be in the same Wi-Fi network.
#
def print_all_chromecasts():
all_cast_devices = pychromecast.get_chromecasts()
print("All Chromecast devices:", all_cast_devices)
#
# Find media controller by unique ID.
#
def find_media_controller(my_uuid):
all_cast_devices = pychromecast.get_chromecasts()
my_cast_device = next(x for x in all_cast_devices if str(x.device.uuid) == my_uuid)
return my_cast_device.media_controller
#
# Open media controller with given IP address, port and unique ID.
#
def open_media_controller(my_uuid, my_ip_addr, my_port):
my_device = pychromecast.DeviceStatus(friendly_name = 'Abra',
model_name = 'Cadabra',
manufacturer = 'Unknown',
uuid = uuid.UUID(my_uuid),
cast_type = 'cast')
my_cast_device = pychromecast.Chromecast(my_ip_addr,
port = my_port,
device = my_device)
print("My Chromecast device:", my_cast_device)
return my_cast_device.media_controller
#
# Play a video on my Chromecast device.
#
def play_media(mc, url, media_type):
print("Playing media:", url)
mc.play_media(url, content_type = media_type)
mc.block_until_active()
mc.play()
# Wait until done.
while True:
time.sleep(1)
mc.update_status()
if mc.status.player_is_idle:
break
#
# Get time.
#
now = datetime.datetime.today()
hour = now.hour
minute = now.minute
print(f"Time: {hour}:{minute}")
#
# When the time is exact, voice it.
#
if minute == 0:
# Open my media controller.
mc = open_media_controller("c854c4ea-517a-29cf-e083-1e63659ea1c6", "192.168.86.75", 8009)
# Play a chime, to get attention.
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/service-login.oga", "application/ogg")
# Speak the hour.
filename = f"{hour:02d}_ru.mp3"
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/" + filename, "audio/mpeg")
|
Add talking clock via Chromecast.#!/usr/bin/env python3
#
# Talking clock, using my Google Home device as a media player.
#
import pychromecast, uuid, time, datetime
#
# Get list of all Chromecast devices.
# The local computer must be in the same Wi-Fi network.
#
def print_all_chromecasts():
all_cast_devices = pychromecast.get_chromecasts()
print("All Chromecast devices:", all_cast_devices)
#
# Find media controller by unique ID.
#
def find_media_controller(my_uuid):
all_cast_devices = pychromecast.get_chromecasts()
my_cast_device = next(x for x in all_cast_devices if str(x.device.uuid) == my_uuid)
return my_cast_device.media_controller
#
# Open media controller with given IP address, port and unique ID.
#
def open_media_controller(my_uuid, my_ip_addr, my_port):
my_device = pychromecast.DeviceStatus(friendly_name = 'Abra',
model_name = 'Cadabra',
manufacturer = 'Unknown',
uuid = uuid.UUID(my_uuid),
cast_type = 'cast')
my_cast_device = pychromecast.Chromecast(my_ip_addr,
port = my_port,
device = my_device)
print("My Chromecast device:", my_cast_device)
return my_cast_device.media_controller
#
# Play a video on my Chromecast device.
#
def play_media(mc, url, media_type):
print("Playing media:", url)
mc.play_media(url, content_type = media_type)
mc.block_until_active()
mc.play()
# Wait until done.
while True:
time.sleep(1)
mc.update_status()
if mc.status.player_is_idle:
break
#
# Get time.
#
now = datetime.datetime.today()
hour = now.hour
minute = now.minute
print(f"Time: {hour}:{minute}")
#
# When the time is exact, voice it.
#
if minute == 0:
# Open my media controller.
mc = open_media_controller("c854c4ea-517a-29cf-e083-1e63659ea1c6", "192.168.86.75", 8009)
# Play a chime, to get attention.
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/service-login.oga", "application/ogg")
# Speak the hour.
filename = f"{hour:02d}_ru.mp3"
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/" + filename, "audio/mpeg")
|
<commit_before><commit_msg>Add talking clock via Chromecast.<commit_after>#!/usr/bin/env python3
#
# Talking clock, using my Google Home device as a media player.
#
import pychromecast, uuid, time, datetime
#
# Get list of all Chromecast devices.
# The local computer must be in the same Wi-Fi network.
#
def print_all_chromecasts():
all_cast_devices = pychromecast.get_chromecasts()
print("All Chromecast devices:", all_cast_devices)
#
# Find media controller by unique ID.
#
def find_media_controller(my_uuid):
all_cast_devices = pychromecast.get_chromecasts()
my_cast_device = next(x for x in all_cast_devices if str(x.device.uuid) == my_uuid)
return my_cast_device.media_controller
#
# Open media controller with given IP address, port and unique ID.
#
def open_media_controller(my_uuid, my_ip_addr, my_port):
my_device = pychromecast.DeviceStatus(friendly_name = 'Abra',
model_name = 'Cadabra',
manufacturer = 'Unknown',
uuid = uuid.UUID(my_uuid),
cast_type = 'cast')
my_cast_device = pychromecast.Chromecast(my_ip_addr,
port = my_port,
device = my_device)
print("My Chromecast device:", my_cast_device)
return my_cast_device.media_controller
#
# Play a video on my Chromecast device.
#
def play_media(mc, url, media_type):
print("Playing media:", url)
mc.play_media(url, content_type = media_type)
mc.block_until_active()
mc.play()
# Wait until done.
while True:
time.sleep(1)
mc.update_status()
if mc.status.player_is_idle:
break
#
# Get time.
#
now = datetime.datetime.today()
hour = now.hour
minute = now.minute
print(f"Time: {hour}:{minute}")
#
# When the time is exact, voice it.
#
if minute == 0:
# Open my media controller.
mc = open_media_controller("c854c4ea-517a-29cf-e083-1e63659ea1c6", "192.168.86.75", 8009)
# Play a chime, to get attention.
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/service-login.oga", "application/ogg")
# Speak the hour.
filename = f"{hour:02d}_ru.mp3"
play_media(mc, "http://166.84.7.114/pub/media/talking-clock/" + filename, "audio/mpeg")
|
|
69978f33eb1b7fb7c24d06da33cec1ee48667bef
|
train/labs/acp-workshop/scripts/centos.py
|
train/labs/acp-workshop/scripts/centos.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
PRIMARY_OS = 'CENTOS-7.0'
CS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
{dinfo}
reboot
'''
OS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
curl -sSL https://get.docker.com/ | sh
usermod -aG docker centos
chkconfig docker on
{dinfo}
reboot
'''
def pre_process():
"""Executed before launching instances in AWS"""
pass
def post_process():
"""Executed after launching instances in AWS"""
pass
# Notes
'''
Script requires:
{fqdn}
{dinfo}
'''
|
Add CentOS config for acp-workshop
|
Add CentOS config for acp-workshop
Signed-off-by: Jerry Baker <aaf88dc49a82ab24b325ac267fdcf59a36abcb76@docker.com>
|
Python
|
apache-2.0
|
curtisz/train,curtisz/train,anokun7/train,kizbitz/train,kizbitz/train,danielpalstra/train,danielpalstra/train,anokun7/train
|
Add CentOS config for acp-workshop
Signed-off-by: Jerry Baker <aaf88dc49a82ab24b325ac267fdcf59a36abcb76@docker.com>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
PRIMARY_OS = 'CENTOS-7.0'
CS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
{dinfo}
reboot
'''
OS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
curl -sSL https://get.docker.com/ | sh
usermod -aG docker centos
chkconfig docker on
{dinfo}
reboot
'''
def pre_process():
"""Executed before launching instances in AWS"""
pass
def post_process():
"""Executed after launching instances in AWS"""
pass
# Notes
'''
Script requires:
{fqdn}
{dinfo}
'''
|
<commit_before><commit_msg>Add CentOS config for acp-workshop
Signed-off-by: Jerry Baker <aaf88dc49a82ab24b325ac267fdcf59a36abcb76@docker.com><commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
PRIMARY_OS = 'CENTOS-7.0'
CS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
{dinfo}
reboot
'''
OS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
curl -sSL https://get.docker.com/ | sh
usermod -aG docker centos
chkconfig docker on
{dinfo}
reboot
'''
def pre_process():
"""Executed before launching instances in AWS"""
pass
def post_process():
"""Executed after launching instances in AWS"""
pass
# Notes
'''
Script requires:
{fqdn}
{dinfo}
'''
|
Add CentOS config for acp-workshop
Signed-off-by: Jerry Baker <aaf88dc49a82ab24b325ac267fdcf59a36abcb76@docker.com>#!/usr/bin/env python
# -*- coding: utf-8 -*-
PRIMARY_OS = 'CENTOS-7.0'
CS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
{dinfo}
reboot
'''
OS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
curl -sSL https://get.docker.com/ | sh
usermod -aG docker centos
chkconfig docker on
{dinfo}
reboot
'''
def pre_process():
"""Executed before launching instances in AWS"""
pass
def post_process():
"""Executed after launching instances in AWS"""
pass
# Notes
'''
Script requires:
{fqdn}
{dinfo}
'''
|
<commit_before><commit_msg>Add CentOS config for acp-workshop
Signed-off-by: Jerry Baker <aaf88dc49a82ab24b325ac267fdcf59a36abcb76@docker.com><commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
PRIMARY_OS = 'CENTOS-7.0'
CS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
{dinfo}
reboot
'''
OS_ENGINE = '''#!/bin/sh
#
FQDN="{fqdn}"
export DEBIAN_FRONTEND=noninteractive
# locale
sudo locale-gen en_US.UTF-8
# /etc/hostname - /etc/hosts
sed -i "1 c\\127.0.0.1 $FQDN localhost" /etc/hosts
echo $FQDN > /etc/hostname
service hostname restart
sleep 5
yum -y clean all
yum -y upgrade
curl -sSL https://get.docker.com/ | sh
usermod -aG docker centos
chkconfig docker on
{dinfo}
reboot
'''
def pre_process():
"""Executed before launching instances in AWS"""
pass
def post_process():
"""Executed after launching instances in AWS"""
pass
# Notes
'''
Script requires:
{fqdn}
{dinfo}
'''
|
|
d03d4bf0ca7ec4d66868f384d6864c9fb456cb84
|
src/test_io.py
|
src/test_io.py
|
import RPi.GPIO as GPIO
import time
# This test is made for a quick check of the gpios
# List of the in channels to test. When pressed they output a message.
in_chanels = []
# A list of output channels to test. These whill be switched on and off in a pattern.
out_chanels = []
def switch_called(channel):
print 'Edge detected on channel %s'%channel
for in_channel in in_chanels:
GPIO.setup(in_channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN )
GPIO.add_event_detect(in_channel, GPIO.RISING, callback=switch_called, bouncetime=300)
for out_channel in out_chanels:
GPIO.setup(out_channel, GPIO.OUT)
GPIO.output(out_channel, GPIO.LOW)
print 'Start output test'
while True:
for out_channel in out_chanels:
time.sleep(1)
GPIO.output(out_channel, not GPIO.input(out_channel))
|
Test for rudimentary io tests added
|
Test for rudimentary io tests added
|
Python
|
apache-2.0
|
baumartig/vpn_switcher,baumartig/vpn_switcher
|
Test for rudimentary io tests added
|
import RPi.GPIO as GPIO
import time
# This test is made for a quick check of the gpios
# List of the in channels to test. When pressed they output a message.
in_chanels = []
# A list of output channels to test. These whill be switched on and off in a pattern.
out_chanels = []
def switch_called(channel):
print 'Edge detected on channel %s'%channel
for in_channel in in_chanels:
GPIO.setup(in_channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN )
GPIO.add_event_detect(in_channel, GPIO.RISING, callback=switch_called, bouncetime=300)
for out_channel in out_chanels:
GPIO.setup(out_channel, GPIO.OUT)
GPIO.output(out_channel, GPIO.LOW)
print 'Start output test'
while True:
for out_channel in out_chanels:
time.sleep(1)
GPIO.output(out_channel, not GPIO.input(out_channel))
|
<commit_before><commit_msg>Test for rudimentary io tests added<commit_after>
|
import RPi.GPIO as GPIO
import time
# This test is made for a quick check of the gpios
# List of the in channels to test. When pressed they output a message.
in_chanels = []
# A list of output channels to test. These whill be switched on and off in a pattern.
out_chanels = []
def switch_called(channel):
print 'Edge detected on channel %s'%channel
for in_channel in in_chanels:
GPIO.setup(in_channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN )
GPIO.add_event_detect(in_channel, GPIO.RISING, callback=switch_called, bouncetime=300)
for out_channel in out_chanels:
GPIO.setup(out_channel, GPIO.OUT)
GPIO.output(out_channel, GPIO.LOW)
print 'Start output test'
while True:
for out_channel in out_chanels:
time.sleep(1)
GPIO.output(out_channel, not GPIO.input(out_channel))
|
Test for rudimentary io tests addedimport RPi.GPIO as GPIO
import time
# This test is made for a quick check of the gpios
# List of the in channels to test. When pressed they output a message.
in_chanels = []
# A list of output channels to test. These whill be switched on and off in a pattern.
out_chanels = []
def switch_called(channel):
print 'Edge detected on channel %s'%channel
for in_channel in in_chanels:
GPIO.setup(in_channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN )
GPIO.add_event_detect(in_channel, GPIO.RISING, callback=switch_called, bouncetime=300)
for out_channel in out_chanels:
GPIO.setup(out_channel, GPIO.OUT)
GPIO.output(out_channel, GPIO.LOW)
print 'Start output test'
while True:
for out_channel in out_chanels:
time.sleep(1)
GPIO.output(out_channel, not GPIO.input(out_channel))
|
<commit_before><commit_msg>Test for rudimentary io tests added<commit_after>import RPi.GPIO as GPIO
import time
# This test is made for a quick check of the gpios
# List of the in channels to test. When pressed they output a message.
in_chanels = []
# A list of output channels to test. These whill be switched on and off in a pattern.
out_chanels = []
def switch_called(channel):
print 'Edge detected on channel %s'%channel
for in_channel in in_chanels:
GPIO.setup(in_channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN )
GPIO.add_event_detect(in_channel, GPIO.RISING, callback=switch_called, bouncetime=300)
for out_channel in out_chanels:
GPIO.setup(out_channel, GPIO.OUT)
GPIO.output(out_channel, GPIO.LOW)
print 'Start output test'
while True:
for out_channel in out_chanels:
time.sleep(1)
GPIO.output(out_channel, not GPIO.input(out_channel))
|
|
089208e4cb85da5b85c20242097cbe7f0c0e0ece
|
fabfile.py
|
fabfile.py
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import os
from fabric.api import local, task
from pylua.settings import LOG_CONFIG
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = 'pylua'
TESTS_DIR = os.path.join(SRC_DIR, 'test')
LOG_CONFIG_PATH = os.path.join(ROOT_DIR, SRC_DIR, LOG_CONFIG)
@task
def test_all():
local('nosetests -vv --with-timer {} --log-config={}'.format(
TESTS_DIR, LOG_CONFIG_PATH))
@task
def run_test(name=None):
if name is None:
print 'Usage: fab run_test:name=<file>:<Test_Case>.<test_method>'
print ('Sample: fab run_test:name={}/test_json.py:TestJson.'
'test_int_param_py'.format(TESTS_DIR))
return
local('nosetests -vv -s --with-timer --log-config={} {}'.format(
LOG_CONFIG_PATH, name))
|
Add fab file with two tasks: test_all, run_test
|
Add fab file with two tasks: test_all, run_test
|
Python
|
mit
|
malirod/pylua,malirod/pylua
|
Add fab file with two tasks: test_all, run_test
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import os
from fabric.api import local, task
from pylua.settings import LOG_CONFIG
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = 'pylua'
TESTS_DIR = os.path.join(SRC_DIR, 'test')
LOG_CONFIG_PATH = os.path.join(ROOT_DIR, SRC_DIR, LOG_CONFIG)
@task
def test_all():
local('nosetests -vv --with-timer {} --log-config={}'.format(
TESTS_DIR, LOG_CONFIG_PATH))
@task
def run_test(name=None):
if name is None:
print 'Usage: fab run_test:name=<file>:<Test_Case>.<test_method>'
print ('Sample: fab run_test:name={}/test_json.py:TestJson.'
'test_int_param_py'.format(TESTS_DIR))
return
local('nosetests -vv -s --with-timer --log-config={} {}'.format(
LOG_CONFIG_PATH, name))
|
<commit_before><commit_msg>Add fab file with two tasks: test_all, run_test<commit_after>
|
#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import os
from fabric.api import local, task
from pylua.settings import LOG_CONFIG
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = 'pylua'
TESTS_DIR = os.path.join(SRC_DIR, 'test')
LOG_CONFIG_PATH = os.path.join(ROOT_DIR, SRC_DIR, LOG_CONFIG)
@task
def test_all():
local('nosetests -vv --with-timer {} --log-config={}'.format(
TESTS_DIR, LOG_CONFIG_PATH))
@task
def run_test(name=None):
if name is None:
print 'Usage: fab run_test:name=<file>:<Test_Case>.<test_method>'
print ('Sample: fab run_test:name={}/test_json.py:TestJson.'
'test_int_param_py'.format(TESTS_DIR))
return
local('nosetests -vv -s --with-timer --log-config={} {}'.format(
LOG_CONFIG_PATH, name))
|
Add fab file with two tasks: test_all, run_test#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import os
from fabric.api import local, task
from pylua.settings import LOG_CONFIG
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = 'pylua'
TESTS_DIR = os.path.join(SRC_DIR, 'test')
LOG_CONFIG_PATH = os.path.join(ROOT_DIR, SRC_DIR, LOG_CONFIG)
@task
def test_all():
local('nosetests -vv --with-timer {} --log-config={}'.format(
TESTS_DIR, LOG_CONFIG_PATH))
@task
def run_test(name=None):
if name is None:
print 'Usage: fab run_test:name=<file>:<Test_Case>.<test_method>'
print ('Sample: fab run_test:name={}/test_json.py:TestJson.'
'test_int_param_py'.format(TESTS_DIR))
return
local('nosetests -vv -s --with-timer --log-config={} {}'.format(
LOG_CONFIG_PATH, name))
|
<commit_before><commit_msg>Add fab file with two tasks: test_all, run_test<commit_after>#!/usr/bin/env python2
#-*- coding: utf-8 -*-
import os
from fabric.api import local, task
from pylua.settings import LOG_CONFIG
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = 'pylua'
TESTS_DIR = os.path.join(SRC_DIR, 'test')
LOG_CONFIG_PATH = os.path.join(ROOT_DIR, SRC_DIR, LOG_CONFIG)
@task
def test_all():
local('nosetests -vv --with-timer {} --log-config={}'.format(
TESTS_DIR, LOG_CONFIG_PATH))
@task
def run_test(name=None):
if name is None:
print 'Usage: fab run_test:name=<file>:<Test_Case>.<test_method>'
print ('Sample: fab run_test:name={}/test_json.py:TestJson.'
'test_int_param_py'.format(TESTS_DIR))
return
local('nosetests -vv -s --with-timer --log-config={} {}'.format(
LOG_CONFIG_PATH, name))
|
|
d38751f466f2b76f71dc716b85cdd1ffbabd481d
|
cpro/migrations/0015_auto_20170217_0801.py
|
cpro/migrations/0015_auto_20170217_0801.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0014_auto_20170129_2236'),
]
operations = [
migrations.AlterField(
model_name='card',
name='art_hd_awakened',
field=models.ImageField(upload_to=cpro.models.uploadItem(b'c/art_hd/a'), null=True, verbose_name='Art (HD Awakened)'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration')]),
preserve_default=True,
),
]
|
Add 2 new skills: concentration and all round, with sentences in English and Japanese
|
Add 2 new skills: concentration and all round, with sentences in English and Japanese
|
Python
|
apache-2.0
|
SchoolIdolTomodachi/CinderellaProducers,SchoolIdolTomodachi/CinderellaProducers
|
Add 2 new skills: concentration and all round, with sentences in English and Japanese
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0014_auto_20170129_2236'),
]
operations = [
migrations.AlterField(
model_name='card',
name='art_hd_awakened',
field=models.ImageField(upload_to=cpro.models.uploadItem(b'c/art_hd/a'), null=True, verbose_name='Art (HD Awakened)'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add 2 new skills: concentration and all round, with sentences in English and Japanese<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0014_auto_20170129_2236'),
]
operations = [
migrations.AlterField(
model_name='card',
name='art_hd_awakened',
field=models.ImageField(upload_to=cpro.models.uploadItem(b'c/art_hd/a'), null=True, verbose_name='Art (HD Awakened)'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration')]),
preserve_default=True,
),
]
|
Add 2 new skills: concentration and all round, with sentences in English and Japanese# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0014_auto_20170129_2236'),
]
operations = [
migrations.AlterField(
model_name='card',
name='art_hd_awakened',
field=models.ImageField(upload_to=cpro.models.uploadItem(b'c/art_hd/a'), null=True, verbose_name='Art (HD Awakened)'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add 2 new skills: concentration and all round, with sentences in English and Japanese<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import cpro.models
class Migration(migrations.Migration):
dependencies = [
('cpro', '0014_auto_20170129_2236'),
]
operations = [
migrations.AlterField(
model_name='card',
name='art_hd_awakened',
field=models.ImageField(upload_to=cpro.models.uploadItem(b'c/art_hd/a'), null=True, verbose_name='Art (HD Awakened)'),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='i_skill',
field=models.PositiveIntegerField(null=True, verbose_name='Skill', choices=[(0, 'Lesser Perfect Lock'), (1, 'Greater Perfect Lock'), (2, 'Extreme Perfect Lock'), (3, 'Combo Lock'), (4, 'Healer'), (5, 'Life Lock'), (6, 'Combo Bonus'), (7, 'Perfect Score Bonus'), (8, 'Overload'), (9, 'Score Boost'), (10, 'All Round'), (11, 'Concentration')]),
preserve_default=True,
),
]
|
|
1875c5f1a813abda0ecf52b5b2604011fe2f2c15
|
Examples/Scripting/PostJob/Sample/Sample.py
|
Examples/Scripting/PostJob/Sample/Sample.py
|
#Python.NET
###############################################################
# This is an Python.net/CPython script. #
# To use IronPython, remove "#Python.NET" from the first #
# line of this file. Make sure you don't use the quotes. #
###############################################################
###############################################################
## Imports
###############################################################
from System.IO import *
from Deadline.Scripting import *
import os
###############################################################
## Entry point and other source
###############################################################
def __main__():
""" This is run by Deadline before or after a task depending on which context its used in """
LogInfo("Script ran.")
LogInfo("...And did absolutely nothing")
|
Add a sample Pre/Post job
|
Add a sample Pre/Post job
|
Python
|
apache-2.0
|
ThinkboxSoftware/Deadline,ThinkboxSoftware/Deadline,ThinkboxSoftware/Deadline
|
Add a sample Pre/Post job
|
#Python.NET
###############################################################
# This is an Python.net/CPython script. #
# To use IronPython, remove "#Python.NET" from the first #
# line of this file. Make sure you don't use the quotes. #
###############################################################
###############################################################
## Imports
###############################################################
from System.IO import *
from Deadline.Scripting import *
import os
###############################################################
## Entry point and other source
###############################################################
def __main__():
""" This is run by Deadline before or after a task depending on which context its used in """
LogInfo("Script ran.")
LogInfo("...And did absolutely nothing")
|
<commit_before><commit_msg>Add a sample Pre/Post job<commit_after>
|
#Python.NET
###############################################################
# This is an Python.net/CPython script. #
# To use IronPython, remove "#Python.NET" from the first #
# line of this file. Make sure you don't use the quotes. #
###############################################################
###############################################################
## Imports
###############################################################
from System.IO import *
from Deadline.Scripting import *
import os
###############################################################
## Entry point and other source
###############################################################
def __main__():
""" This is run by Deadline before or after a task depending on which context its used in """
LogInfo("Script ran.")
LogInfo("...And did absolutely nothing")
|
Add a sample Pre/Post job#Python.NET
###############################################################
# This is an Python.net/CPython script. #
# To use IronPython, remove "#Python.NET" from the first #
# line of this file. Make sure you don't use the quotes. #
###############################################################
###############################################################
## Imports
###############################################################
from System.IO import *
from Deadline.Scripting import *
import os
###############################################################
## Entry point and other source
###############################################################
def __main__():
""" This is run by Deadline before or after a task depending on which context its used in """
LogInfo("Script ran.")
LogInfo("...And did absolutely nothing")
|
<commit_before><commit_msg>Add a sample Pre/Post job<commit_after>#Python.NET
###############################################################
# This is an Python.net/CPython script. #
# To use IronPython, remove "#Python.NET" from the first #
# line of this file. Make sure you don't use the quotes. #
###############################################################
###############################################################
## Imports
###############################################################
from System.IO import *
from Deadline.Scripting import *
import os
###############################################################
## Entry point and other source
###############################################################
def __main__():
""" This is run by Deadline before or after a task depending on which context its used in """
LogInfo("Script ran.")
LogInfo("...And did absolutely nothing")
|
|
8bebea72536a8a6fc480631d737f2426f52a356c
|
Lib/fontTools/ttLib/tables/_k_e_r_n_test.py
|
Lib/fontTools/ttLib/tables/_k_e_r_n_test.py
|
from __future__ import print_function, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
import unittest
from ._k_e_r_n import KernTable_format_0
class MockFont(object):
def getGlyphOrder(self):
return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"]
def getGlyphName(self, glyphID):
return "glyph%.5d" % glyphID
class KernTable_format_0_Test(unittest.TestCase):
def test_decompileBadGlyphId(self):
subtable = KernTable_format_0()
subtable.apple = False
subtable.decompile( b'\x00' * 6
+ b'\x00' + b'\x02' + b'\x00' * 6
+ b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01'
+ b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
MockFont())
self.assertEqual(subtable[("glyph00001", "glyph00003")], 1)
self.assertEqual(subtable[("glyph00001", "glyph65535")], 2)
if __name__ == "__main__":
unittest.main()
|
Test for load a kern table with a bad glyph id.
|
Test for load a kern table with a bad glyph id.
|
Python
|
mit
|
googlefonts/fonttools,fonttools/fonttools
|
Test for load a kern table with a bad glyph id.
|
from __future__ import print_function, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
import unittest
from ._k_e_r_n import KernTable_format_0
class MockFont(object):
def getGlyphOrder(self):
return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"]
def getGlyphName(self, glyphID):
return "glyph%.5d" % glyphID
class KernTable_format_0_Test(unittest.TestCase):
def test_decompileBadGlyphId(self):
subtable = KernTable_format_0()
subtable.apple = False
subtable.decompile( b'\x00' * 6
+ b'\x00' + b'\x02' + b'\x00' * 6
+ b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01'
+ b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
MockFont())
self.assertEqual(subtable[("glyph00001", "glyph00003")], 1)
self.assertEqual(subtable[("glyph00001", "glyph65535")], 2)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test for load a kern table with a bad glyph id.<commit_after>
|
from __future__ import print_function, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
import unittest
from ._k_e_r_n import KernTable_format_0
class MockFont(object):
def getGlyphOrder(self):
return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"]
def getGlyphName(self, glyphID):
return "glyph%.5d" % glyphID
class KernTable_format_0_Test(unittest.TestCase):
def test_decompileBadGlyphId(self):
subtable = KernTable_format_0()
subtable.apple = False
subtable.decompile( b'\x00' * 6
+ b'\x00' + b'\x02' + b'\x00' * 6
+ b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01'
+ b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
MockFont())
self.assertEqual(subtable[("glyph00001", "glyph00003")], 1)
self.assertEqual(subtable[("glyph00001", "glyph65535")], 2)
if __name__ == "__main__":
unittest.main()
|
Test for load a kern table with a bad glyph id.from __future__ import print_function, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
import unittest
from ._k_e_r_n import KernTable_format_0
class MockFont(object):
def getGlyphOrder(self):
return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"]
def getGlyphName(self, glyphID):
return "glyph%.5d" % glyphID
class KernTable_format_0_Test(unittest.TestCase):
def test_decompileBadGlyphId(self):
subtable = KernTable_format_0()
subtable.apple = False
subtable.decompile( b'\x00' * 6
+ b'\x00' + b'\x02' + b'\x00' * 6
+ b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01'
+ b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
MockFont())
self.assertEqual(subtable[("glyph00001", "glyph00003")], 1)
self.assertEqual(subtable[("glyph00001", "glyph65535")], 2)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test for load a kern table with a bad glyph id.<commit_after>from __future__ import print_function, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
import unittest
from ._k_e_r_n import KernTable_format_0
class MockFont(object):
def getGlyphOrder(self):
return ["glyph00000", "glyph00001", "glyph00002", "glyph00003"]
def getGlyphName(self, glyphID):
return "glyph%.5d" % glyphID
class KernTable_format_0_Test(unittest.TestCase):
def test_decompileBadGlyphId(self):
subtable = KernTable_format_0()
subtable.apple = False
subtable.decompile( b'\x00' * 6
+ b'\x00' + b'\x02' + b'\x00' * 6
+ b'\x00' + b'\x01' + b'\x00' + b'\x03' + b'\x00' + b'\x01'
+ b'\x00' + b'\x01' + b'\xFF' + b'\xFF' + b'\x00' + b'\x02',
MockFont())
self.assertEqual(subtable[("glyph00001", "glyph00003")], 1)
self.assertEqual(subtable[("glyph00001", "glyph65535")], 2)
if __name__ == "__main__":
unittest.main()
|
|
7d89303fddd12fc88fd04bcf27826c3c801b1eff
|
scripts/import_submissions.py
|
scripts/import_submissions.py
|
#!/usr/bin/env python
# Copyright (C) 2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import json
from acoustid.script import run_script
from acoustid.data.submission import import_queued_submissions
logger = logging.getLogger(__name__)
def main(script, opts, args):
channel = script.redis.pubsub()
channel.subscribe('channel.submissions')
for message in channel.listen():
ids = json.loads(message)
logger.debug('Got notified about %s new submissions', len(ids))
#conn = script.engine.connect()
#import_queued_submissions(conn, limit=300, index=script.index)
run_script(main)
|
Add a dummy script for continuous submission importing
|
Add a dummy script for continuous submission importing
|
Python
|
mit
|
lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server
|
Add a dummy script for continuous submission importing
|
#!/usr/bin/env python
# Copyright (C) 2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import json
from acoustid.script import run_script
from acoustid.data.submission import import_queued_submissions
logger = logging.getLogger(__name__)
def main(script, opts, args):
channel = script.redis.pubsub()
channel.subscribe('channel.submissions')
for message in channel.listen():
ids = json.loads(message)
logger.debug('Got notified about %s new submissions', len(ids))
#conn = script.engine.connect()
#import_queued_submissions(conn, limit=300, index=script.index)
run_script(main)
|
<commit_before><commit_msg>Add a dummy script for continuous submission importing<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import json
from acoustid.script import run_script
from acoustid.data.submission import import_queued_submissions
logger = logging.getLogger(__name__)
def main(script, opts, args):
channel = script.redis.pubsub()
channel.subscribe('channel.submissions')
for message in channel.listen():
ids = json.loads(message)
logger.debug('Got notified about %s new submissions', len(ids))
#conn = script.engine.connect()
#import_queued_submissions(conn, limit=300, index=script.index)
run_script(main)
|
Add a dummy script for continuous submission importing#!/usr/bin/env python
# Copyright (C) 2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import json
from acoustid.script import run_script
from acoustid.data.submission import import_queued_submissions
logger = logging.getLogger(__name__)
def main(script, opts, args):
channel = script.redis.pubsub()
channel.subscribe('channel.submissions')
for message in channel.listen():
ids = json.loads(message)
logger.debug('Got notified about %s new submissions', len(ids))
#conn = script.engine.connect()
#import_queued_submissions(conn, limit=300, index=script.index)
run_script(main)
|
<commit_before><commit_msg>Add a dummy script for continuous submission importing<commit_after>#!/usr/bin/env python
# Copyright (C) 2012 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
import json
from acoustid.script import run_script
from acoustid.data.submission import import_queued_submissions
logger = logging.getLogger(__name__)
def main(script, opts, args):
channel = script.redis.pubsub()
channel.subscribe('channel.submissions')
for message in channel.listen():
ids = json.loads(message)
logger.debug('Got notified about %s new submissions', len(ids))
#conn = script.engine.connect()
#import_queued_submissions(conn, limit=300, index=script.index)
run_script(main)
|
|
206454788c6d054f8c562d4d5d13a737d9cb6d27
|
tests/sentry/api/serializers/test_project.py
|
tests/sentry/api/serializers/test_project.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
ProjectWithOrganizationSerializer, ProjectWithTeamSerializer
)
from sentry.testutils import TestCase
class ProjectSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user)
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
class ProjectWithTeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithTeamSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['team'] == serialize(team, user)
class ProjectWithOrganizationSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['organization'] == serialize(organization, user)
|
Add tests for project serializer
|
test(projects): Add tests for project serializer
|
Python
|
bsd-3-clause
|
beeftornado/sentry,ifduyue/sentry,gencer/sentry,ifduyue/sentry,beeftornado/sentry,gencer/sentry,mvaled/sentry,mvaled/sentry,gencer/sentry,looker/sentry,gencer/sentry,beeftornado/sentry,gencer/sentry,mvaled/sentry,looker/sentry,ifduyue/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry,mvaled/sentry,looker/sentry,ifduyue/sentry,mvaled/sentry,looker/sentry
|
test(projects): Add tests for project serializer
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
ProjectWithOrganizationSerializer, ProjectWithTeamSerializer
)
from sentry.testutils import TestCase
class ProjectSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user)
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
class ProjectWithTeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithTeamSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['team'] == serialize(team, user)
class ProjectWithOrganizationSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['organization'] == serialize(organization, user)
|
<commit_before><commit_msg>test(projects): Add tests for project serializer<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
ProjectWithOrganizationSerializer, ProjectWithTeamSerializer
)
from sentry.testutils import TestCase
class ProjectSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user)
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
class ProjectWithTeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithTeamSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['team'] == serialize(team, user)
class ProjectWithOrganizationSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['organization'] == serialize(organization, user)
|
test(projects): Add tests for project serializer# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
ProjectWithOrganizationSerializer, ProjectWithTeamSerializer
)
from sentry.testutils import TestCase
class ProjectSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user)
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
class ProjectWithTeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithTeamSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['team'] == serialize(team, user)
class ProjectWithOrganizationSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['organization'] == serialize(organization, user)
|
<commit_before><commit_msg>test(projects): Add tests for project serializer<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.api.serializers.models.project import (
ProjectWithOrganizationSerializer, ProjectWithTeamSerializer
)
from sentry.testutils import TestCase
class ProjectSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user)
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
class ProjectWithTeamSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithTeamSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['team'] == serialize(team, user)
class ProjectWithOrganizationSerializerTest(TestCase):
def test_simple(self):
user = self.create_user(username='foo')
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(team=team, organization=organization, name='foo')
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result['slug'] == project.slug
assert result['name'] == project.name
assert result['id'] == six.text_type(project.id)
assert result['organization'] == serialize(organization, user)
|
|
f5b185fa2bba29efe3c1db2cd6c6a50188be24e3
|
tests/test_unlocking.py
|
tests/test_unlocking.py
|
# Tests for SecretStorage
# Author: Dmitry Shachnev, 2018
# License: BSD
import unittest
from secretstorage import dbus_init, get_any_collection
from secretstorage.util import BUS_NAME
from secretstorage.exceptions import LockedException
@unittest.skipIf(BUS_NAME == "org.freedesktop.secrets",
"This test should only be run with the mocked server.")
class LockingUnlockingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = dbus_init()
cls.collection = get_any_collection(cls.connection)
def test_lock_unlock(self):
self.collection.lock()
self.assertRaises(LockedException, self.collection.ensure_not_locked)
self.collection.unlock()
self.collection.ensure_not_locked()
|
Add a basic test for locking and unlocking
|
Add a basic test for locking and unlocking
To improve test coverage.
|
Python
|
bsd-3-clause
|
mitya57/secretstorage
|
Add a basic test for locking and unlocking
To improve test coverage.
|
# Tests for SecretStorage
# Author: Dmitry Shachnev, 2018
# License: BSD
import unittest
from secretstorage import dbus_init, get_any_collection
from secretstorage.util import BUS_NAME
from secretstorage.exceptions import LockedException
@unittest.skipIf(BUS_NAME == "org.freedesktop.secrets",
"This test should only be run with the mocked server.")
class LockingUnlockingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = dbus_init()
cls.collection = get_any_collection(cls.connection)
def test_lock_unlock(self):
self.collection.lock()
self.assertRaises(LockedException, self.collection.ensure_not_locked)
self.collection.unlock()
self.collection.ensure_not_locked()
|
<commit_before><commit_msg>Add a basic test for locking and unlocking
To improve test coverage.<commit_after>
|
# Tests for SecretStorage
# Author: Dmitry Shachnev, 2018
# License: BSD
import unittest
from secretstorage import dbus_init, get_any_collection
from secretstorage.util import BUS_NAME
from secretstorage.exceptions import LockedException
@unittest.skipIf(BUS_NAME == "org.freedesktop.secrets",
"This test should only be run with the mocked server.")
class LockingUnlockingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = dbus_init()
cls.collection = get_any_collection(cls.connection)
def test_lock_unlock(self):
self.collection.lock()
self.assertRaises(LockedException, self.collection.ensure_not_locked)
self.collection.unlock()
self.collection.ensure_not_locked()
|
Add a basic test for locking and unlocking
To improve test coverage.# Tests for SecretStorage
# Author: Dmitry Shachnev, 2018
# License: BSD
import unittest
from secretstorage import dbus_init, get_any_collection
from secretstorage.util import BUS_NAME
from secretstorage.exceptions import LockedException
@unittest.skipIf(BUS_NAME == "org.freedesktop.secrets",
"This test should only be run with the mocked server.")
class LockingUnlockingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = dbus_init()
cls.collection = get_any_collection(cls.connection)
def test_lock_unlock(self):
self.collection.lock()
self.assertRaises(LockedException, self.collection.ensure_not_locked)
self.collection.unlock()
self.collection.ensure_not_locked()
|
<commit_before><commit_msg>Add a basic test for locking and unlocking
To improve test coverage.<commit_after># Tests for SecretStorage
# Author: Dmitry Shachnev, 2018
# License: BSD
import unittest
from secretstorage import dbus_init, get_any_collection
from secretstorage.util import BUS_NAME
from secretstorage.exceptions import LockedException
@unittest.skipIf(BUS_NAME == "org.freedesktop.secrets",
"This test should only be run with the mocked server.")
class LockingUnlockingTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = dbus_init()
cls.collection = get_any_collection(cls.connection)
def test_lock_unlock(self):
self.collection.lock()
self.assertRaises(LockedException, self.collection.ensure_not_locked)
self.collection.unlock()
self.collection.ensure_not_locked()
|
|
085efb9bfe476232695cb66ddf28d6c1a6f84c2f
|
core/migrations/0005_auto_20170506_1026.py
|
core/migrations/0005_auto_20170506_1026.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-06 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170426_1717'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='entry',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='project',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['client', '-id']},
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client'),
),
]
|
Add migration for Model changes.
|
Add migration for Model changes.
|
Python
|
bsd-2-clause
|
cdubz/timestrap,cdubz/timestrap,cdubz/timestrap,muhleder/timestrap,Leahelisabeth/timestrap,muhleder/timestrap,Leahelisabeth/timestrap,muhleder/timestrap,overshard/timestrap,overshard/timestrap,Leahelisabeth/timestrap,overshard/timestrap,Leahelisabeth/timestrap
|
Add migration for Model changes.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-06 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170426_1717'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='entry',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='project',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['client', '-id']},
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client'),
),
]
|
<commit_before><commit_msg>Add migration for Model changes.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-06 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170426_1717'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='entry',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='project',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['client', '-id']},
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client'),
),
]
|
Add migration for Model changes.# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-06 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170426_1717'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='entry',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='project',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['client', '-id']},
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client'),
),
]
|
<commit_before><commit_msg>Add migration for Model changes.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-06 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170426_1717'),
]
operations = [
migrations.AlterModelOptions(
name='client',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-id']},
),
migrations.AlterModelOptions(
name='entry',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='project',
options={'default_permissions': ('view', 'add', 'change', 'delete'), 'ordering': ['client', '-id']},
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='core.Client'),
),
]
|
|
3bc86ca5bd302103a57b6e2829d549aa1e243766
|
go/apps/jsbox/tests/test_forms.py
|
go/apps/jsbox/tests/test_forms.py
|
from django.test import TestCase
from go.apps.jsbox.forms import JsboxForm
class JsboxFormTestCase(TestCase):
def test_to_metdata(self):
form = JsboxForm(data={
'javascript': 'x = 1;',
})
self.assertTrue(form.is_valid())
metadata = form.to_metadata()
self.assertEqual(metadata, {
'javascript': 'x = 1;',
'source_url': '',
})
|
Add basic test for jsbox forms.
|
Add basic test for jsbox forms.
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
Add basic test for jsbox forms.
|
from django.test import TestCase
from go.apps.jsbox.forms import JsboxForm
class JsboxFormTestCase(TestCase):
def test_to_metdata(self):
form = JsboxForm(data={
'javascript': 'x = 1;',
})
self.assertTrue(form.is_valid())
metadata = form.to_metadata()
self.assertEqual(metadata, {
'javascript': 'x = 1;',
'source_url': '',
})
|
<commit_before><commit_msg>Add basic test for jsbox forms.<commit_after>
|
from django.test import TestCase
from go.apps.jsbox.forms import JsboxForm
class JsboxFormTestCase(TestCase):
def test_to_metdata(self):
form = JsboxForm(data={
'javascript': 'x = 1;',
})
self.assertTrue(form.is_valid())
metadata = form.to_metadata()
self.assertEqual(metadata, {
'javascript': 'x = 1;',
'source_url': '',
})
|
Add basic test for jsbox forms.from django.test import TestCase
from go.apps.jsbox.forms import JsboxForm
class JsboxFormTestCase(TestCase):
def test_to_metdata(self):
form = JsboxForm(data={
'javascript': 'x = 1;',
})
self.assertTrue(form.is_valid())
metadata = form.to_metadata()
self.assertEqual(metadata, {
'javascript': 'x = 1;',
'source_url': '',
})
|
<commit_before><commit_msg>Add basic test for jsbox forms.<commit_after>from django.test import TestCase
from go.apps.jsbox.forms import JsboxForm
class JsboxFormTestCase(TestCase):
def test_to_metdata(self):
form = JsboxForm(data={
'javascript': 'x = 1;',
})
self.assertTrue(form.is_valid())
metadata = form.to_metadata()
self.assertEqual(metadata, {
'javascript': 'x = 1;',
'source_url': '',
})
|
|
a6712ce13b0c6e62488adf0ae13fabf986a1b890
|
imgsort.py
|
imgsort.py
|
#!/usr/bin/env python3
import os
import shutil
from PIL import Image
whitelist = (
(1366, 768),
(1600, 900),
(1680, 1050),
(1920, 1080),
(1920, 1200),
)
def split(directory):
filemap = {dimensions: set() for dimensions in whitelist}
filemap['others'] = set()
makepath = lambda filename: os.path.join(directory, filename)
for filename in os.listdir(directory):
abspath = makepath(filename)
if not os.path.isfile(abspath):
continue
image = Image.open(abspath)
if image.size in whitelist:
filemap[image.size].add(abspath)
else:
filemap['others'].add(abspath)
return filemap
def scatter(filemap, directory, fn):
makepath = lambda resolution: os.path.join(directory, '%dx%d' % resolution)
for resolution in whitelist:
if resolution not in filemap or len(filemap[resolution]) == 0:
continue
abspath = makepath(resolution)
os.makedirs(abspath, exist_ok=True)
for filename in filemap[resolution]:
try:
fn(filename, abspath)
except shutil.Error:
pass
for filename in filemap['others']:
try:
fn(filename, directory)
except shutil.Error:
pass
def main():
import argparse
parser = argparse.ArgumentParser(
description='Automatic wallpaper sorter by image dimensions')
parser.add_argument('origin', type=os.path.abspath)
parser.add_argument('destiny', type=os.path.abspath)
parser.add_argument('-m', '--mv', action='store_const', const=shutil.move, default=shutil.copy)
args = parser.parse_args()
scatter(split(args.origin), args.destiny, args.mv)
if __name__ == '__main__':
main()
|
Add simple script to sort images
|
Add simple script to sort images
|
Python
|
mit
|
ranisalt/imgsort
|
Add simple script to sort images
|
#!/usr/bin/env python3
import os
import shutil
from PIL import Image
whitelist = (
(1366, 768),
(1600, 900),
(1680, 1050),
(1920, 1080),
(1920, 1200),
)
def split(directory):
filemap = {dimensions: set() for dimensions in whitelist}
filemap['others'] = set()
makepath = lambda filename: os.path.join(directory, filename)
for filename in os.listdir(directory):
abspath = makepath(filename)
if not os.path.isfile(abspath):
continue
image = Image.open(abspath)
if image.size in whitelist:
filemap[image.size].add(abspath)
else:
filemap['others'].add(abspath)
return filemap
def scatter(filemap, directory, fn):
makepath = lambda resolution: os.path.join(directory, '%dx%d' % resolution)
for resolution in whitelist:
if resolution not in filemap or len(filemap[resolution]) == 0:
continue
abspath = makepath(resolution)
os.makedirs(abspath, exist_ok=True)
for filename in filemap[resolution]:
try:
fn(filename, abspath)
except shutil.Error:
pass
for filename in filemap['others']:
try:
fn(filename, directory)
except shutil.Error:
pass
def main():
import argparse
parser = argparse.ArgumentParser(
description='Automatic wallpaper sorter by image dimensions')
parser.add_argument('origin', type=os.path.abspath)
parser.add_argument('destiny', type=os.path.abspath)
parser.add_argument('-m', '--mv', action='store_const', const=shutil.move, default=shutil.copy)
args = parser.parse_args()
scatter(split(args.origin), args.destiny, args.mv)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple script to sort images<commit_after>
|
#!/usr/bin/env python3
import os
import shutil
from PIL import Image
whitelist = (
(1366, 768),
(1600, 900),
(1680, 1050),
(1920, 1080),
(1920, 1200),
)
def split(directory):
filemap = {dimensions: set() for dimensions in whitelist}
filemap['others'] = set()
makepath = lambda filename: os.path.join(directory, filename)
for filename in os.listdir(directory):
abspath = makepath(filename)
if not os.path.isfile(abspath):
continue
image = Image.open(abspath)
if image.size in whitelist:
filemap[image.size].add(abspath)
else:
filemap['others'].add(abspath)
return filemap
def scatter(filemap, directory, fn):
makepath = lambda resolution: os.path.join(directory, '%dx%d' % resolution)
for resolution in whitelist:
if resolution not in filemap or len(filemap[resolution]) == 0:
continue
abspath = makepath(resolution)
os.makedirs(abspath, exist_ok=True)
for filename in filemap[resolution]:
try:
fn(filename, abspath)
except shutil.Error:
pass
for filename in filemap['others']:
try:
fn(filename, directory)
except shutil.Error:
pass
def main():
import argparse
parser = argparse.ArgumentParser(
description='Automatic wallpaper sorter by image dimensions')
parser.add_argument('origin', type=os.path.abspath)
parser.add_argument('destiny', type=os.path.abspath)
parser.add_argument('-m', '--mv', action='store_const', const=shutil.move, default=shutil.copy)
args = parser.parse_args()
scatter(split(args.origin), args.destiny, args.mv)
if __name__ == '__main__':
main()
|
Add simple script to sort images#!/usr/bin/env python3
import os
import shutil
from PIL import Image
whitelist = (
(1366, 768),
(1600, 900),
(1680, 1050),
(1920, 1080),
(1920, 1200),
)
def split(directory):
filemap = {dimensions: set() for dimensions in whitelist}
filemap['others'] = set()
makepath = lambda filename: os.path.join(directory, filename)
for filename in os.listdir(directory):
abspath = makepath(filename)
if not os.path.isfile(abspath):
continue
image = Image.open(abspath)
if image.size in whitelist:
filemap[image.size].add(abspath)
else:
filemap['others'].add(abspath)
return filemap
def scatter(filemap, directory, fn):
makepath = lambda resolution: os.path.join(directory, '%dx%d' % resolution)
for resolution in whitelist:
if resolution not in filemap or len(filemap[resolution]) == 0:
continue
abspath = makepath(resolution)
os.makedirs(abspath, exist_ok=True)
for filename in filemap[resolution]:
try:
fn(filename, abspath)
except shutil.Error:
pass
for filename in filemap['others']:
try:
fn(filename, directory)
except shutil.Error:
pass
def main():
import argparse
parser = argparse.ArgumentParser(
description='Automatic wallpaper sorter by image dimensions')
parser.add_argument('origin', type=os.path.abspath)
parser.add_argument('destiny', type=os.path.abspath)
parser.add_argument('-m', '--mv', action='store_const', const=shutil.move, default=shutil.copy)
args = parser.parse_args()
scatter(split(args.origin), args.destiny, args.mv)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple script to sort images<commit_after>#!/usr/bin/env python3
import os
import shutil
from PIL import Image
whitelist = (
(1366, 768),
(1600, 900),
(1680, 1050),
(1920, 1080),
(1920, 1200),
)
def split(directory):
filemap = {dimensions: set() for dimensions in whitelist}
filemap['others'] = set()
makepath = lambda filename: os.path.join(directory, filename)
for filename in os.listdir(directory):
abspath = makepath(filename)
if not os.path.isfile(abspath):
continue
image = Image.open(abspath)
if image.size in whitelist:
filemap[image.size].add(abspath)
else:
filemap['others'].add(abspath)
return filemap
def scatter(filemap, directory, fn):
makepath = lambda resolution: os.path.join(directory, '%dx%d' % resolution)
for resolution in whitelist:
if resolution not in filemap or len(filemap[resolution]) == 0:
continue
abspath = makepath(resolution)
os.makedirs(abspath, exist_ok=True)
for filename in filemap[resolution]:
try:
fn(filename, abspath)
except shutil.Error:
pass
for filename in filemap['others']:
try:
fn(filename, directory)
except shutil.Error:
pass
def main():
import argparse
parser = argparse.ArgumentParser(
description='Automatic wallpaper sorter by image dimensions')
parser.add_argument('origin', type=os.path.abspath)
parser.add_argument('destiny', type=os.path.abspath)
parser.add_argument('-m', '--mv', action='store_const', const=shutil.move, default=shutil.copy)
args = parser.parse_args()
scatter(split(args.origin), args.destiny, args.mv)
if __name__ == '__main__':
main()
|
|
004d8fc6edae142cff7d26e53a79183b1ca29a5b
|
src/greplin/defer/wait.py
|
src/greplin/defer/wait.py
|
# Copyright 2010 Greplin, Inc. All Rights Reserved.
"""Mixin for waiting on deferreds, and cancelling them if needed."""
class WaitMixin(object):
"""Mixin for waiting on deferreds, and cancelling them if needed."""
__currentWait = None
def _wait(self, deferred):
"""Waits for the given deferred."""
self.__currentWait = deferred
if deferred:
return deferred.addBoth(self.__clearWait)
def __clearWait(self, _):
"""Clears the current wait."""
self.__currentWait = None
def _cancelWait(self):
"""Cancels the deferred currently being waited for."""
if self.__currentWait:
self.__currentWait.cancel()
|
Migrate greplin.defer's remaining used code to greplin-twisted-utils
|
Migrate greplin.defer's remaining used code to greplin-twisted-utils
|
Python
|
apache-2.0
|
Cue/greplin-twisted-utils
|
Migrate greplin.defer's remaining used code to greplin-twisted-utils
|
# Copyright 2010 Greplin, Inc. All Rights Reserved.
"""Mixin for waiting on deferreds, and cancelling them if needed."""
class WaitMixin(object):
"""Mixin for waiting on deferreds, and cancelling them if needed."""
__currentWait = None
def _wait(self, deferred):
"""Waits for the given deferred."""
self.__currentWait = deferred
if deferred:
return deferred.addBoth(self.__clearWait)
def __clearWait(self, _):
"""Clears the current wait."""
self.__currentWait = None
def _cancelWait(self):
"""Cancels the deferred currently being waited for."""
if self.__currentWait:
self.__currentWait.cancel()
|
<commit_before><commit_msg>Migrate greplin.defer's remaining used code to greplin-twisted-utils<commit_after>
|
# Copyright 2010 Greplin, Inc. All Rights Reserved.
"""Mixin for waiting on deferreds, and cancelling them if needed."""
class WaitMixin(object):
"""Mixin for waiting on deferreds, and cancelling them if needed."""
__currentWait = None
def _wait(self, deferred):
"""Waits for the given deferred."""
self.__currentWait = deferred
if deferred:
return deferred.addBoth(self.__clearWait)
def __clearWait(self, _):
"""Clears the current wait."""
self.__currentWait = None
def _cancelWait(self):
"""Cancels the deferred currently being waited for."""
if self.__currentWait:
self.__currentWait.cancel()
|
Migrate greplin.defer's remaining used code to greplin-twisted-utils# Copyright 2010 Greplin, Inc. All Rights Reserved.
"""Mixin for waiting on deferreds, and cancelling them if needed."""
class WaitMixin(object):
"""Mixin for waiting on deferreds, and cancelling them if needed."""
__currentWait = None
def _wait(self, deferred):
"""Waits for the given deferred."""
self.__currentWait = deferred
if deferred:
return deferred.addBoth(self.__clearWait)
def __clearWait(self, _):
"""Clears the current wait."""
self.__currentWait = None
def _cancelWait(self):
"""Cancels the deferred currently being waited for."""
if self.__currentWait:
self.__currentWait.cancel()
|
<commit_before><commit_msg>Migrate greplin.defer's remaining used code to greplin-twisted-utils<commit_after># Copyright 2010 Greplin, Inc. All Rights Reserved.
"""Mixin for waiting on deferreds, and cancelling them if needed."""
class WaitMixin(object):
"""Mixin for waiting on deferreds, and cancelling them if needed."""
__currentWait = None
def _wait(self, deferred):
"""Waits for the given deferred."""
self.__currentWait = deferred
if deferred:
return deferred.addBoth(self.__clearWait)
def __clearWait(self, _):
"""Clears the current wait."""
self.__currentWait = None
def _cancelWait(self):
"""Cancels the deferred currently being waited for."""
if self.__currentWait:
self.__currentWait.cancel()
|
|
6a08dc8ae70ebb7d759514991033a54e35ef0a93
|
bin/desi_make_bricks.py
|
bin/desi_make_bricks.py
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fibermap', default = None, metavar = 'FILE',
help = 'Filename containing fibermap to read.')
args = parser.parse_args()
if args.fibermap is None:
print 'Missing required fibermap argument.'
return -1
fibermap,hdr = desispec.io.read_fibermap(args.fibermap)
print fibermap.dtype
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import os.path
import glob
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--night', default = None, metavar = 'YYYYMMDD',
help = 'Night to process in the format YYYYMMDD')
args = parser.parse_args()
if args.night is None:
print 'Missing required night argument.'
return -1
try:
# Loop over exposures available for this night.
for exposure in desispec.io.get_exposures(args.night):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype = 'fibermap', night = args.night,
expid = exposure)
print fibermap_path
if not os.path.exists(fibermap_path):
if args.verbose:
print 'Skipping exposure %d with no fibermap.' % exposure
continue
# Open the fibermap.
fibermap_data,fibermap_hdr = desispec.io.read_fibermap(fibermap_path)
print fibermap_data.dtype
# Look for cframes associated with this exposure.
cframe_path = desispec.io.findfile(filetype = 'cframe',night = args.night,
expid = exposure, camera = '*')
for entry in glob.glob(cframe_path):
print entry
except RuntimeError,e:
print str(e)
return -2
if __name__ == '__main__':
main()
|
Update brick maker to use new meta functionality
|
Update brick maker to use new meta functionality
|
Python
|
bsd-3-clause
|
profxj/desispec,timahutchinson/desispec,profxj/desispec,gdhungana/desispec,desihub/desispec,gdhungana/desispec,timahutchinson/desispec,desihub/desispec
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fibermap', default = None, metavar = 'FILE',
help = 'Filename containing fibermap to read.')
args = parser.parse_args()
if args.fibermap is None:
print 'Missing required fibermap argument.'
return -1
fibermap,hdr = desispec.io.read_fibermap(args.fibermap)
print fibermap.dtype
if __name__ == '__main__':
main()
Update brick maker to use new meta functionality
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import os.path
import glob
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--night', default = None, metavar = 'YYYYMMDD',
help = 'Night to process in the format YYYYMMDD')
args = parser.parse_args()
if args.night is None:
print 'Missing required night argument.'
return -1
try:
# Loop over exposures available for this night.
for exposure in desispec.io.get_exposures(args.night):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype = 'fibermap', night = args.night,
expid = exposure)
print fibermap_path
if not os.path.exists(fibermap_path):
if args.verbose:
print 'Skipping exposure %d with no fibermap.' % exposure
continue
# Open the fibermap.
fibermap_data,fibermap_hdr = desispec.io.read_fibermap(fibermap_path)
print fibermap_data.dtype
# Look for cframes associated with this exposure.
cframe_path = desispec.io.findfile(filetype = 'cframe',night = args.night,
expid = exposure, camera = '*')
for entry in glob.glob(cframe_path):
print entry
except RuntimeError,e:
print str(e)
return -2
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fibermap', default = None, metavar = 'FILE',
help = 'Filename containing fibermap to read.')
args = parser.parse_args()
if args.fibermap is None:
print 'Missing required fibermap argument.'
return -1
fibermap,hdr = desispec.io.read_fibermap(args.fibermap)
print fibermap.dtype
if __name__ == '__main__':
main()
<commit_msg>Update brick maker to use new meta functionality<commit_after>
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import os.path
import glob
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--night', default = None, metavar = 'YYYYMMDD',
help = 'Night to process in the format YYYYMMDD')
args = parser.parse_args()
if args.night is None:
print 'Missing required night argument.'
return -1
try:
# Loop over exposures available for this night.
for exposure in desispec.io.get_exposures(args.night):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype = 'fibermap', night = args.night,
expid = exposure)
print fibermap_path
if not os.path.exists(fibermap_path):
if args.verbose:
print 'Skipping exposure %d with no fibermap.' % exposure
continue
# Open the fibermap.
fibermap_data,fibermap_hdr = desispec.io.read_fibermap(fibermap_path)
print fibermap_data.dtype
# Look for cframes associated with this exposure.
cframe_path = desispec.io.findfile(filetype = 'cframe',night = args.night,
expid = exposure, camera = '*')
for entry in glob.glob(cframe_path):
print entry
except RuntimeError,e:
print str(e)
return -2
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fibermap', default = None, metavar = 'FILE',
help = 'Filename containing fibermap to read.')
args = parser.parse_args()
if args.fibermap is None:
print 'Missing required fibermap argument.'
return -1
fibermap,hdr = desispec.io.read_fibermap(args.fibermap)
print fibermap.dtype
if __name__ == '__main__':
main()
Update brick maker to use new meta functionality#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import os.path
import glob
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--night', default = None, metavar = 'YYYYMMDD',
help = 'Night to process in the format YYYYMMDD')
args = parser.parse_args()
if args.night is None:
print 'Missing required night argument.'
return -1
try:
# Loop over exposures available for this night.
for exposure in desispec.io.get_exposures(args.night):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype = 'fibermap', night = args.night,
expid = exposure)
print fibermap_path
if not os.path.exists(fibermap_path):
if args.verbose:
print 'Skipping exposure %d with no fibermap.' % exposure
continue
# Open the fibermap.
fibermap_data,fibermap_hdr = desispec.io.read_fibermap(fibermap_path)
print fibermap_data.dtype
# Look for cframes associated with this exposure.
cframe_path = desispec.io.findfile(filetype = 'cframe',night = args.night,
expid = exposure, camera = '*')
for entry in glob.glob(cframe_path):
print entry
except RuntimeError,e:
print str(e)
return -2
if __name__ == '__main__':
main()
|
<commit_before>#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--fibermap', default = None, metavar = 'FILE',
help = 'Filename containing fibermap to read.')
args = parser.parse_args()
if args.fibermap is None:
print 'Missing required fibermap argument.'
return -1
fibermap,hdr = desispec.io.read_fibermap(args.fibermap)
print fibermap.dtype
if __name__ == '__main__':
main()
<commit_msg>Update brick maker to use new meta functionality<commit_after>#!/usr/bin/env python
#
# See top-level LICENSE file for Copyright information
#
# -*- coding: utf-8 -*-
import argparse
import os.path
import glob
import desispec.io
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--verbose', action = 'store_true',
help = 'Provide verbose reporting of progress.')
parser.add_argument('--night', default = None, metavar = 'YYYYMMDD',
help = 'Night to process in the format YYYYMMDD')
args = parser.parse_args()
if args.night is None:
print 'Missing required night argument.'
return -1
try:
# Loop over exposures available for this night.
for exposure in desispec.io.get_exposures(args.night):
# Ignore exposures with no fibermap, assuming they are calibration data.
fibermap_path = desispec.io.findfile(filetype = 'fibermap', night = args.night,
expid = exposure)
print fibermap_path
if not os.path.exists(fibermap_path):
if args.verbose:
print 'Skipping exposure %d with no fibermap.' % exposure
continue
# Open the fibermap.
fibermap_data,fibermap_hdr = desispec.io.read_fibermap(fibermap_path)
print fibermap_data.dtype
# Look for cframes associated with this exposure.
cframe_path = desispec.io.findfile(filetype = 'cframe',night = args.night,
expid = exposure, camera = '*')
for entry in glob.glob(cframe_path):
print entry
except RuntimeError,e:
print str(e)
return -2
if __name__ == '__main__':
main()
|
af127fa56d2ce9304034e19ed2e0a598d10bebba
|
tests/tests_cyclus.py
|
tests/tests_cyclus.py
|
#! /usr/bin/env python
import os
from tests_list import sim_files
from cyclus_tools import run_cyclus, db_comparator
"""Tests"""
def test_cyclus():
"""Test for all inputs in sim_files. Checks if reference and current cyclus
output is the same.
WARNING: the tests require cyclus executable to be included in PATH
"""
cwd = os.getcwd()
for sim_input,bench_db in sim_files:
temp_output = [(sim_input, "./output_temp.h5")]
yield run_cyclus("cyclus", cwd, temp_output)
yield db_comparator(args, bench_db, "./output_temp.h5")
os.remove("./output_temp.h5")
|
Add main regression test file for cyclus
|
Add main regression test file for cyclus
|
Python
|
bsd-3-clause
|
Baaaaam/cyBaM,gonuke/cycamore,gonuke/cycamore,cyclus/cycaless,Baaaaam/cyCLASS,gonuke/cycamore,rwcarlsen/cycamore,gonuke/cycamore,jlittell/cycamore,rwcarlsen/cycamore,Baaaaam/cyBaM,Baaaaam/cycamore,Baaaaam/cyBaM,jlittell/cycamore,Baaaaam/cycamore,jlittell/cycamore,Baaaaam/cycamore,Baaaaam/cyCLASS,jlittell/cycamore,cyclus/cycaless,rwcarlsen/cycamore,Baaaaam/cyBaM,rwcarlsen/cycamore
|
Add main regression test file for cyclus
|
#! /usr/bin/env python
import os
from tests_list import sim_files
from cyclus_tools import run_cyclus, db_comparator
"""Tests"""
def test_cyclus():
"""Test for all inputs in sim_files. Checks if reference and current cyclus
output is the same.
WARNING: the tests require cyclus executable to be included in PATH
"""
cwd = os.getcwd()
for sim_input,bench_db in sim_files:
temp_output = [(sim_input, "./output_temp.h5")]
yield run_cyclus("cyclus", cwd, temp_output)
yield db_comparator(args, bench_db, "./output_temp.h5")
os.remove("./output_temp.h5")
|
<commit_before><commit_msg>Add main regression test file for cyclus<commit_after>
|
#! /usr/bin/env python
import os
from tests_list import sim_files
from cyclus_tools import run_cyclus, db_comparator
"""Tests"""
def test_cyclus():
"""Test for all inputs in sim_files. Checks if reference and current cyclus
output is the same.
WARNING: the tests require cyclus executable to be included in PATH
"""
cwd = os.getcwd()
for sim_input,bench_db in sim_files:
temp_output = [(sim_input, "./output_temp.h5")]
yield run_cyclus("cyclus", cwd, temp_output)
yield db_comparator(args, bench_db, "./output_temp.h5")
os.remove("./output_temp.h5")
|
Add main regression test file for cyclus#! /usr/bin/env python
import os
from tests_list import sim_files
from cyclus_tools import run_cyclus, db_comparator
"""Tests"""
def test_cyclus():
"""Test for all inputs in sim_files. Checks if reference and current cyclus
output is the same.
WARNING: the tests require cyclus executable to be included in PATH
"""
cwd = os.getcwd()
for sim_input,bench_db in sim_files:
temp_output = [(sim_input, "./output_temp.h5")]
yield run_cyclus("cyclus", cwd, temp_output)
yield db_comparator(args, bench_db, "./output_temp.h5")
os.remove("./output_temp.h5")
|
<commit_before><commit_msg>Add main regression test file for cyclus<commit_after>#! /usr/bin/env python
import os
from tests_list import sim_files
from cyclus_tools import run_cyclus, db_comparator
"""Tests"""
def test_cyclus():
"""Test for all inputs in sim_files. Checks if reference and current cyclus
output is the same.
WARNING: the tests require cyclus executable to be included in PATH
"""
cwd = os.getcwd()
for sim_input,bench_db in sim_files:
temp_output = [(sim_input, "./output_temp.h5")]
yield run_cyclus("cyclus", cwd, temp_output)
yield db_comparator(args, bench_db, "./output_temp.h5")
os.remove("./output_temp.h5")
|
|
ad5f851b7959f7bf09d7cd669d8db126fa962982
|
pymatgen/symmetry/tests/test_spacegroup.py
|
pymatgen/symmetry/tests/test_spacegroup.py
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.
|
Add a unittest for spacegroup. Still very basic.
|
Python
|
mit
|
migueldiascosta/pymatgen,yanikou19/pymatgen,sonium0/pymatgen,migueldiascosta/pymatgen,ctoher/pymatgen,sonium0/pymatgen,Dioptas/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,sonium0/pymatgen,rousseab/pymatgen,Bismarrck/pymatgen,Dioptas/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,migueldiascosta/pymatgen,yanikou19/pymatgen,Bismarrck/pymatgen,ctoher/pymatgen,rousseab/pymatgen
|
Add a unittest for spacegroup. Still very basic.
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.<commit_after>
|
#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add a unittest for spacegroup. Still very basic.#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add a unittest for spacegroup. Still very basic.<commit_after>#!/usr/bin/env python
'''
Created on Mar 12, 2012
'''
from __future__ import division
__author__="Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 12, 2012"
import unittest
import os
from pymatgen.core.structure import PeriodicSite
from pymatgen.symmetry.spacegroup import Spacegroup
from pymatgen.io.vaspio import Poscar
from pymatgen.symmetry.spglib_adaptor import SymmetryFinder
import pymatgen
test_dir = os.path.join(os.path.dirname(os.path.abspath(pymatgen.__file__)), '..', 'test_files')
class SpacegroupTest(unittest.TestCase):
def setUp(self):
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
self.structure = p.struct
self.sg1 = SymmetryFinder(self.structure, 0.001).get_spacegroup()
self.sg2 = Spacegroup.from_spacegroup_number(62)
def test_are_symmetrically_equivalent(self):
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [2,3]]
self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertTrue(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
sites1 = [self.structure[i] for i in [0,1]]
sites2 = [self.structure[i] for i in [0,2]]
self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3))
self.assertFalse(self.sg2.are_symmetrically_equivalent(sites1, sites2, 1e-3))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
e1e90a8d704666613e6f2f6aaf839724af05cc19
|
tools/create_token.py
|
tools/create_token.py
|
#!/usr/bin/env python
import argparse
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
import scitokens
# Arguments:
def add_args():
parser = argparse.ArgumentParser(description='Create a new SciToken')
parser.add_argument('claims', metavar='C', type=str, nargs='+',
help='Claims in the format key=value')
parser.add_argument('--keyfile',
help='Location of the private key file')
parser.add_argument('--key_id', help='The string key identifier')
parser.add_argument('--issuer', help="Issuer for the token")
args = parser.parse_args()
return args
def main():
args = add_args()
print(args)
with open(args.keyfile, "r") as fp:
private_key_contents = fp.read()
loaded_private_key = serialization.load_pem_private_key(
private_key_contents,
password=None, # Hey, it's a sample file committed to disk...
backend=default_backend()
)
token = scitokens.SciToken(key=loaded_private_key, key_id=args.key_id)
for claim in args.claims:
(key, value) = claim.split('=', 1)
token.update_claims({key: value})
serialized_token = token.serialize(issuer=args.issuer)
print(serialized_token)
if __name__ == "__main__":
main()
|
Add CLI tool for creating SciTokens.
|
Add CLI tool for creating SciTokens.
|
Python
|
apache-2.0
|
scitokens/scitokens,scitokens/scitokens
|
Add CLI tool for creating SciTokens.
|
#!/usr/bin/env python
import argparse
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
import scitokens
# Arguments:
def add_args():
parser = argparse.ArgumentParser(description='Create a new SciToken')
parser.add_argument('claims', metavar='C', type=str, nargs='+',
help='Claims in the format key=value')
parser.add_argument('--keyfile',
help='Location of the private key file')
parser.add_argument('--key_id', help='The string key identifier')
parser.add_argument('--issuer', help="Issuer for the token")
args = parser.parse_args()
return args
def main():
args = add_args()
print(args)
with open(args.keyfile, "r") as fp:
private_key_contents = fp.read()
loaded_private_key = serialization.load_pem_private_key(
private_key_contents,
password=None, # Hey, it's a sample file committed to disk...
backend=default_backend()
)
token = scitokens.SciToken(key=loaded_private_key, key_id=args.key_id)
for claim in args.claims:
(key, value) = claim.split('=', 1)
token.update_claims({key: value})
serialized_token = token.serialize(issuer=args.issuer)
print(serialized_token)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add CLI tool for creating SciTokens.<commit_after>
|
#!/usr/bin/env python
import argparse
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
import scitokens
# Arguments:
def add_args():
parser = argparse.ArgumentParser(description='Create a new SciToken')
parser.add_argument('claims', metavar='C', type=str, nargs='+',
help='Claims in the format key=value')
parser.add_argument('--keyfile',
help='Location of the private key file')
parser.add_argument('--key_id', help='The string key identifier')
parser.add_argument('--issuer', help="Issuer for the token")
args = parser.parse_args()
return args
def main():
args = add_args()
print(args)
with open(args.keyfile, "r") as fp:
private_key_contents = fp.read()
loaded_private_key = serialization.load_pem_private_key(
private_key_contents,
password=None, # Hey, it's a sample file committed to disk...
backend=default_backend()
)
token = scitokens.SciToken(key=loaded_private_key, key_id=args.key_id)
for claim in args.claims:
(key, value) = claim.split('=', 1)
token.update_claims({key: value})
serialized_token = token.serialize(issuer=args.issuer)
print(serialized_token)
if __name__ == "__main__":
main()
|
Add CLI tool for creating SciTokens.#!/usr/bin/env python
import argparse
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
import scitokens
# Arguments:
def add_args():
parser = argparse.ArgumentParser(description='Create a new SciToken')
parser.add_argument('claims', metavar='C', type=str, nargs='+',
help='Claims in the format key=value')
parser.add_argument('--keyfile',
help='Location of the private key file')
parser.add_argument('--key_id', help='The string key identifier')
parser.add_argument('--issuer', help="Issuer for the token")
args = parser.parse_args()
return args
def main():
args = add_args()
print(args)
with open(args.keyfile, "r") as fp:
private_key_contents = fp.read()
loaded_private_key = serialization.load_pem_private_key(
private_key_contents,
password=None, # Hey, it's a sample file committed to disk...
backend=default_backend()
)
token = scitokens.SciToken(key=loaded_private_key, key_id=args.key_id)
for claim in args.claims:
(key, value) = claim.split('=', 1)
token.update_claims({key: value})
serialized_token = token.serialize(issuer=args.issuer)
print(serialized_token)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add CLI tool for creating SciTokens.<commit_after>#!/usr/bin/env python
import argparse
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
import scitokens
# Arguments:
def add_args():
parser = argparse.ArgumentParser(description='Create a new SciToken')
parser.add_argument('claims', metavar='C', type=str, nargs='+',
help='Claims in the format key=value')
parser.add_argument('--keyfile',
help='Location of the private key file')
parser.add_argument('--key_id', help='The string key identifier')
parser.add_argument('--issuer', help="Issuer for the token")
args = parser.parse_args()
return args
def main():
args = add_args()
print(args)
with open(args.keyfile, "r") as fp:
private_key_contents = fp.read()
loaded_private_key = serialization.load_pem_private_key(
private_key_contents,
password=None, # Hey, it's a sample file committed to disk...
backend=default_backend()
)
token = scitokens.SciToken(key=loaded_private_key, key_id=args.key_id)
for claim in args.claims:
(key, value) = claim.split('=', 1)
token.update_claims({key: value})
serialized_token = token.serialize(issuer=args.issuer)
print(serialized_token)
if __name__ == "__main__":
main()
|
|
29f32ec5cc3b050d6307688995907d094966b369
|
indra/sources/sofia/make_sofia_ontology.py
|
indra/sources/sofia/make_sofia_ontology.py
|
import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
|
Add script to make SOFIA ontology
|
Add script to make SOFIA ontology
|
Python
|
bsd-2-clause
|
sorgerlab/indra,bgyori/indra,sorgerlab/belpy,pvtodorov/indra,pvtodorov/indra,sorgerlab/belpy,johnbachman/indra,bgyori/indra,johnbachman/belpy,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra,bgyori/indra
|
Add script to make SOFIA ontology
|
import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
|
<commit_before><commit_msg>Add script to make SOFIA ontology<commit_after>
|
import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
|
Add script to make SOFIA ontologyimport sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
|
<commit_before><commit_msg>Add script to make SOFIA ontology<commit_after>import sys
import json
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
from indra.sources import sofia
# Note that this is just a placeholder, it doesn't resolve as a URL
sofia_ns = Namespace('http://cs.cmu.edu/sofia/')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_ontology(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def build_ontology(ont_json, rdf_path):
G = Graph()
for top_key, entries in ont_json.items():
for entry_key, examples in entries.items():
if '/' in entry_key:
parent, child = entry_key.split('/', maxsplit=1)
parent_term = sofia_ns.term(parent)
child_term = sofia_ns.term(entry_key)
rel = (child_term, isa, parent_term)
G.add(rel)
save_ontology(G, rdf_path)
if __name__ == '__main__':
# Path to a SOFIA ontology JSON file
sofia_ont_json_file = sys.argv[1]
with open(sofia_ont_json_file, 'r') as fh:
sofia_ont_json = json.load(fh)
sofia_rdf_path = join(dirname(abspath(sofia.__file__)),
'sofia_ontology.rdf')
G = build_ontology(sofia_ont_json, sofia_rdf_path)
|
|
7a01615d50ec374687a5676e53e103eff9082b3e
|
kivy/core/clipboard/clipboard_xsel.py
|
kivy/core/clipboard/clipboard_xsel.py
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return list('text/plain',)
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return [u'text/plain']
|
Fix return get_types for ClipboardXsel
|
Fix return get_types for ClipboardXsel
|
Python
|
mit
|
gonzafirewall/kivy,LogicalDash/kivy,aron-bordin/kivy,jffernandez/kivy,VinGarcia/kivy,vipulroxx/kivy,jehutting/kivy,rafalo1333/kivy,matham/kivy,matham/kivy,MiyamotoAkira/kivy,mSenyor/kivy,kivy/kivy,thezawad/kivy,bionoid/kivy,LogicalDash/kivy,aron-bordin/kivy,bionoid/kivy,bionoid/kivy,KeyWeeUsr/kivy,arlowhite/kivy,Farkal/kivy,adamkh/kivy,arcticshores/kivy,yoelk/kivy,manthansharma/kivy,denys-duchier/kivy,kivy/kivy,jehutting/kivy,Shyam10/kivy,arlowhite/kivy,viralpandey/kivy,KeyWeeUsr/kivy,thezawad/kivy,adamkh/kivy,Farkal/kivy,rafalo1333/kivy,manthansharma/kivy,xpndlabs/kivy,darkopevec/kivy,tony/kivy,LogicalDash/kivy,habibmasuro/kivy,kivy/kivy,adamkh/kivy,angryrancor/kivy,akshayaurora/kivy,jffernandez/kivy,jehutting/kivy,Cheaterman/kivy,gonzafirewall/kivy,jkankiewicz/kivy,ernstp/kivy,jkankiewicz/kivy,arcticshores/kivy,rnixx/kivy,arlowhite/kivy,viralpandey/kivy,darkopevec/kivy,LogicalDash/kivy,adamkh/kivy,iamutkarshtiwari/kivy,gonzafirewall/kivy,manashmndl/kivy,CuriousLearner/kivy,jffernandez/kivy,manashmndl/kivy,cbenhagen/kivy,matham/kivy,rnixx/kivy,autosportlabs/kivy,jegger/kivy,bliz937/kivy,yoelk/kivy,Shyam10/kivy,kived/kivy,kived/kivy,vitorio/kivy,Cheaterman/kivy,akshayaurora/kivy,xiaoyanit/kivy,arcticshores/kivy,youprofit/kivy,edubrunaldi/kivy,akshayaurora/kivy,iamutkarshtiwari/kivy,denys-duchier/kivy,vipulroxx/kivy,janssen/kivy,Ramalus/kivy,ernstp/kivy,angryrancor/kivy,Farkal/kivy,gonzafirewall/kivy,inclement/kivy,Shyam10/kivy,xiaoyanit/kivy,manthansharma/kivy,ernstp/kivy,inclement/kivy,el-ethan/kivy,bionoid/kivy,arcticshores/kivy,Ramalus/kivy,jffernandez/kivy,andnovar/kivy,autosportlabs/kivy,kived/kivy,denys-duchier/kivy,Ramalus/kivy,mSenyor/kivy,jegger/kivy,xpndlabs/kivy,manthansharma/kivy,el-ethan/kivy,bob-the-hamster/kivy,dirkjot/kivy,tony/kivy,ernstp/kivy,el-ethan/kivy,janssen/kivy,cbenhagen/kivy,inclement/kivy,bob-the-hamster/kivy,CuriousLearner/kivy,matham/kivy,dirkjot/kivy,VinGarcia/kivy,viralpandey/kivy,KeyWeeUsr/kivy,angryrancor/kivy,Cheaterman/kivy,iamutkarshtiwari/kivy,edubrunaldi/kivy,andnovar/kivy,edubrunaldi/kivy,janssen/kivy,darkopevec/kivy,MiyamotoAkira/kivy,rnixx/kivy,jegger/kivy,bob-the-hamster/kivy,bhargav2408/kivy,bhargav2408/kivy,yoelk/kivy,vitorio/kivy,darkopevec/kivy,rafalo1333/kivy,manashmndl/kivy,youprofit/kivy,aron-bordin/kivy,jegger/kivy,tony/kivy,janssen/kivy,MiyamotoAkira/kivy,mSenyor/kivy,vitorio/kivy,bliz937/kivy,autosportlabs/kivy,vipulroxx/kivy,habibmasuro/kivy,andnovar/kivy,Cheaterman/kivy,bhargav2408/kivy,vipulroxx/kivy,youprofit/kivy,bliz937/kivy,KeyWeeUsr/kivy,yoelk/kivy,dirkjot/kivy,denys-duchier/kivy,thezawad/kivy,Shyam10/kivy,xiaoyanit/kivy,VinGarcia/kivy,jkankiewicz/kivy,xpndlabs/kivy,MiyamotoAkira/kivy,dirkjot/kivy,bob-the-hamster/kivy,habibmasuro/kivy,jkankiewicz/kivy,aron-bordin/kivy,Farkal/kivy,angryrancor/kivy,CuriousLearner/kivy,cbenhagen/kivy
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return list('text/plain',)
Fix return get_types for ClipboardXsel
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return [u'text/plain']
|
<commit_before>'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return list('text/plain',)
<commit_msg>Fix return get_types for ClipboardXsel<commit_after>
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return [u'text/plain']
|
'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return list('text/plain',)
Fix return get_types for ClipboardXsel'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return [u'text/plain']
|
<commit_before>'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return list('text/plain',)
<commit_msg>Fix return get_types for ClipboardXsel<commit_after>'''
Clipboard xsel: an implementation of the Clipboard using xsel command line tool.
'''
__all__ = ('ClipboardXsel', )
from kivy.utils import platform
from kivy.core.clipboard import ClipboardBase
if platform != 'linux':
raise SystemError('unsupported platform for xsel clipboard')
try:
import subprocess
p = subprocess.Popen(['xsel'], stdout=subprocess.PIPE)
p.communicate()
except:
raise
class ClipboardXsel(ClipboardBase):
def get(self, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bo'], stdout=subprocess.PIPE)
data, _ = p.communicate()
return data
def put(self, data, mimetype='text/plain'):
p = subprocess.Popen(['xsel', '-bi'], stdin=subprocess.PIPE)
p.communicate(data)
def get_types(self):
return [u'text/plain']
|
a71807789bd09181369fff8b18b3ab5544ba58dd
|
locations/spiders/sunloan.py
|
locations/spiders/sunloan.py
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
|
Add spider for Sun Loan Company
|
Add spider for Sun Loan Company
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Sun Loan Company
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
|
<commit_before><commit_msg>Add spider for Sun Loan Company<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
|
Add spider for Sun Loan Company# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
|
<commit_before><commit_msg>Add spider for Sun Loan Company<commit_after># -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
DAYS={
'Monday':'Mo',
'Tuesday':'Tu',
'Wednesday':'We',
'Friday':'Fr',
'Thursday':'Th',
'Saturday':'Sa',
'Sunday':'Su',
}
class SunLoanSpider(scrapy.Spider):
name = "sunloan"
allowed_domains = ["sunloan.com"]
start_urls = (
'https://www.sunloan.com/locations/',
)
download_delay = 0.5
def parse(self, response):
urls = response.xpath('//div[@id="custom-locations-2"]//div[@class="location-box"]/div/p/strong/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
def parse_store(self, response):
try:
data = response.xpath('//script[contains(text(),"latitude")]/text()').extract_first()
data = json.loads(data)
except TypeError:
return
except json.JSONDecodeError:
data = data.replace('"hasMap": \r\n', '')
data = json.loads(data)
if not data:
return
properties = {
'lat' : float(data['geo']['latitude']),
'lon' : float(data['geo']['longitude']),
'website' : response.url,
'ref' : response.url,
'addr_full' : data['address']['streetAddress'],
'city' : data['address']['addressLocality'],
'state' : data['address']['addressRegion'],
'postcode' : data['address']['postalCode'],
'country' : 'US',
'name' : data['name'],
}
try:
hours = data['openingHours']
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
# yield GeojsonPointItem(
# lat=float(data['geo']['latitude']),
# lon=float(data['geo']['longitude']),
# website=response.url,
# ref=response.url,
# #opening_hours=data['openingHours'],
# addr_full=data['address']['streetAddress'],
# city=data['address']['addressLocality'],
# state=data['address']['addressRegion'],
# postcode=data['address']['postalCode'],
# country='US',
# name=data['name'],
# )
|
|
66a0622ba63d89b6cfcfa74f7b342f4df55c5045
|
src/sponsors/migrations/0004_auto_20160501_1632.py
|
src/sponsors/migrations/0004_auto_20160501_1632.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-01 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_auto_20160427_0722'),
]
operations = [
migrations.AlterModelOptions(
name='sponsor',
options={'ordering': ('level', 'name'), 'verbose_name': 'sponsor', 'verbose_name_plural': 'sponsors'},
),
]
|
Add missing migration for sponsors
|
Add missing migration for sponsors
|
Python
|
mit
|
pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016
|
Add missing migration for sponsors
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-01 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_auto_20160427_0722'),
]
operations = [
migrations.AlterModelOptions(
name='sponsor',
options={'ordering': ('level', 'name'), 'verbose_name': 'sponsor', 'verbose_name_plural': 'sponsors'},
),
]
|
<commit_before><commit_msg>Add missing migration for sponsors<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-01 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_auto_20160427_0722'),
]
operations = [
migrations.AlterModelOptions(
name='sponsor',
options={'ordering': ('level', 'name'), 'verbose_name': 'sponsor', 'verbose_name_plural': 'sponsors'},
),
]
|
Add missing migration for sponsors# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-01 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_auto_20160427_0722'),
]
operations = [
migrations.AlterModelOptions(
name='sponsor',
options={'ordering': ('level', 'name'), 'verbose_name': 'sponsor', 'verbose_name_plural': 'sponsors'},
),
]
|
<commit_before><commit_msg>Add missing migration for sponsors<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-05-01 16:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0003_auto_20160427_0722'),
]
operations = [
migrations.AlterModelOptions(
name='sponsor',
options={'ordering': ('level', 'name'), 'verbose_name': 'sponsor', 'verbose_name_plural': 'sponsors'},
),
]
|
|
df8e3a2b6f7c8dbb0b93e06e204e802104aad70f
|
faker/providers/date_time/ru_RU/__init__.py
|
faker/providers/date_time/ru_RU/__init__.py
|
# coding: utf-8
from __future__ import unicode_literals
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
@classmethod
def day_of_week(cls):
day = cls.date('%w')
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
return DAY_NAMES[day]
@classmethod
def month_name(cls):
month = cls.month()
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
return MONTH_NAMES[month]
|
Add russian words for date_time
|
Add russian words for date_time
|
Python
|
mit
|
joke2k/faker,joke2k/faker,danhuss/faker,trtd/faker
|
Add russian words for date_time
|
# coding: utf-8
from __future__ import unicode_literals
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
@classmethod
def day_of_week(cls):
day = cls.date('%w')
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
return DAY_NAMES[day]
@classmethod
def month_name(cls):
month = cls.month()
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
return MONTH_NAMES[month]
|
<commit_before><commit_msg>Add russian words for date_time<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
@classmethod
def day_of_week(cls):
day = cls.date('%w')
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
return DAY_NAMES[day]
@classmethod
def month_name(cls):
month = cls.month()
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
return MONTH_NAMES[month]
|
Add russian words for date_time# coding: utf-8
from __future__ import unicode_literals
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
@classmethod
def day_of_week(cls):
day = cls.date('%w')
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
return DAY_NAMES[day]
@classmethod
def month_name(cls):
month = cls.month()
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
return MONTH_NAMES[month]
|
<commit_before><commit_msg>Add russian words for date_time<commit_after># coding: utf-8
from __future__ import unicode_literals
from .. import Provider as DateTimeProvider
class Provider(DateTimeProvider):
@classmethod
def day_of_week(cls):
day = cls.date('%w')
DAY_NAMES = {
"0": "Воскресенье",
"1": "Понедельник",
"2": "Вторник",
"3": "Среда",
"4": "Четверг",
"5": "Пятница",
"6": "Суббота",
}
return DAY_NAMES[day]
@classmethod
def month_name(cls):
month = cls.month()
MONTH_NAMES = {
"01": "Январь",
"02": "Февраль",
"03": "Март",
"04": "Апрель",
"05": "Май",
"06": "Июнь",
"07": "Июль",
"08": "Август",
"09": "Сентябрь",
"10": "Октябрь",
"11": "Ноябрь",
"12": "Декабрь",
}
return MONTH_NAMES[month]
|
|
d6cf9df23e8c9e3b6a8d261ea79dfe16cfa1dbb1
|
numba/tests/test_redefine.py
|
numba/tests/test_redefine.py
|
from numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
|
Add test for function re-definition
|
Add test for function re-definition
|
Python
|
bsd-2-clause
|
IntelLabs/numba,GaZ3ll3/numba,stuartarchibald/numba,pitrou/numba,seibert/numba,stefanseefeld/numba,ssarangi/numba,jriehl/numba,seibert/numba,gdementen/numba,pombredanne/numba,sklam/numba,stefanseefeld/numba,jriehl/numba,IntelLabs/numba,stefanseefeld/numba,pombredanne/numba,seibert/numba,stonebig/numba,GaZ3ll3/numba,sklam/numba,gmarkall/numba,jriehl/numba,ssarangi/numba,GaZ3ll3/numba,ssarangi/numba,numba/numba,IntelLabs/numba,stuartarchibald/numba,stuartarchibald/numba,gdementen/numba,stefanseefeld/numba,shiquanwang/numba,stonebig/numba,jriehl/numba,stonebig/numba,cpcloud/numba,sklam/numba,ssarangi/numba,pitrou/numba,pombredanne/numba,seibert/numba,pitrou/numba,cpcloud/numba,stefanseefeld/numba,IntelLabs/numba,seibert/numba,pitrou/numba,pombredanne/numba,gmarkall/numba,GaZ3ll3/numba,cpcloud/numba,sklam/numba,jriehl/numba,shiquanwang/numba,pitrou/numba,gmarkall/numba,stuartarchibald/numba,pombredanne/numba,numba/numba,numba/numba,numba/numba,gdementen/numba,stuartarchibald/numba,numba/numba,IntelLabs/numba,gdementen/numba,shiquanwang/numba,GaZ3ll3/numba,cpcloud/numba,cpcloud/numba,ssarangi/numba,gmarkall/numba,stonebig/numba,stonebig/numba,gdementen/numba,sklam/numba,gmarkall/numba
|
Add test for function re-definition
|
from numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
|
<commit_before><commit_msg>Add test for function re-definition<commit_after>
|
from numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
|
Add test for function re-definitionfrom numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
|
<commit_before><commit_msg>Add test for function re-definition<commit_after>from numba import *
import unittest
class TestRedefine(unittest.TestCase):
def test_redefine(self):
def foo(x):
return x + 1
jfoo = jit(int32(int32))(foo)
# Test original function
self.assertTrue(jfoo(1), 2)
jfoo = jit(int32(int32))(foo)
# Test re-compiliation
self.assertTrue(jfoo(2), 3)
def foo(x):
return x + 2
jfoo = jit(int32(int32))(foo)
# Test redefinition
self.assertTrue(jfoo(1), 3)
|
|
956d524a8694bb63532ac5f4a8121f57d75b7ad1
|
test/BuildDir/guess-subdir.py
|
test/BuildDir/guess-subdir.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the logic that "guesses" the associated BuildDir for a
subdirectory correctly builds targets in the BuildDir subdirectory.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir(['work'], ['work', 'src'])
test.write(['work', 'SConstruct'], """
c_builddir = r'%s'
BuildDir(c_builddir, '.', duplicate=0)
SConscript(c_builddir + '/SConscript')
""" % test.workpath('debug'))
test.write(['work', 'SConscript'], """
SConscript('src/SConscript')
""")
test.write(['work', 'src', 'SConscript'], """
env = Environment(OBJSUFFIX='.obj',
PROGSUFFIX='.exe')
env.Program('test.cpp')
""")
test.write(['work', 'src', 'test.cpp'], """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("work/src/test.cpp\\n");
}
""")
test.run(chdir = 'work', arguments = '.')
test.must_exist(test.workpath('debug', 'src', 'test.obj'))
test.must_exist(test.workpath('debug', 'src', 'test.exe'))
test.pass_test()
|
Add a test case for guessing the BuildDir associated with a subdirectory argument.
|
Add a test case for guessing the BuildDir associated with a subdirectory argument.
|
Python
|
mit
|
Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons
|
Add a test case for guessing the BuildDir associated with a subdirectory argument.
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the logic that "guesses" the associated BuildDir for a
subdirectory correctly builds targets in the BuildDir subdirectory.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir(['work'], ['work', 'src'])
test.write(['work', 'SConstruct'], """
c_builddir = r'%s'
BuildDir(c_builddir, '.', duplicate=0)
SConscript(c_builddir + '/SConscript')
""" % test.workpath('debug'))
test.write(['work', 'SConscript'], """
SConscript('src/SConscript')
""")
test.write(['work', 'src', 'SConscript'], """
env = Environment(OBJSUFFIX='.obj',
PROGSUFFIX='.exe')
env.Program('test.cpp')
""")
test.write(['work', 'src', 'test.cpp'], """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("work/src/test.cpp\\n");
}
""")
test.run(chdir = 'work', arguments = '.')
test.must_exist(test.workpath('debug', 'src', 'test.obj'))
test.must_exist(test.workpath('debug', 'src', 'test.exe'))
test.pass_test()
|
<commit_before><commit_msg>Add a test case for guessing the BuildDir associated with a subdirectory argument.<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the logic that "guesses" the associated BuildDir for a
subdirectory correctly builds targets in the BuildDir subdirectory.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir(['work'], ['work', 'src'])
test.write(['work', 'SConstruct'], """
c_builddir = r'%s'
BuildDir(c_builddir, '.', duplicate=0)
SConscript(c_builddir + '/SConscript')
""" % test.workpath('debug'))
test.write(['work', 'SConscript'], """
SConscript('src/SConscript')
""")
test.write(['work', 'src', 'SConscript'], """
env = Environment(OBJSUFFIX='.obj',
PROGSUFFIX='.exe')
env.Program('test.cpp')
""")
test.write(['work', 'src', 'test.cpp'], """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("work/src/test.cpp\\n");
}
""")
test.run(chdir = 'work', arguments = '.')
test.must_exist(test.workpath('debug', 'src', 'test.obj'))
test.must_exist(test.workpath('debug', 'src', 'test.exe'))
test.pass_test()
|
Add a test case for guessing the BuildDir associated with a subdirectory argument.#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the logic that "guesses" the associated BuildDir for a
subdirectory correctly builds targets in the BuildDir subdirectory.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir(['work'], ['work', 'src'])
test.write(['work', 'SConstruct'], """
c_builddir = r'%s'
BuildDir(c_builddir, '.', duplicate=0)
SConscript(c_builddir + '/SConscript')
""" % test.workpath('debug'))
test.write(['work', 'SConscript'], """
SConscript('src/SConscript')
""")
test.write(['work', 'src', 'SConscript'], """
env = Environment(OBJSUFFIX='.obj',
PROGSUFFIX='.exe')
env.Program('test.cpp')
""")
test.write(['work', 'src', 'test.cpp'], """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("work/src/test.cpp\\n");
}
""")
test.run(chdir = 'work', arguments = '.')
test.must_exist(test.workpath('debug', 'src', 'test.obj'))
test.must_exist(test.workpath('debug', 'src', 'test.exe'))
test.pass_test()
|
<commit_before><commit_msg>Add a test case for guessing the BuildDir associated with a subdirectory argument.<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the logic that "guesses" the associated BuildDir for a
subdirectory correctly builds targets in the BuildDir subdirectory.
"""
import TestSCons
test = TestSCons.TestSCons()
test.subdir(['work'], ['work', 'src'])
test.write(['work', 'SConstruct'], """
c_builddir = r'%s'
BuildDir(c_builddir, '.', duplicate=0)
SConscript(c_builddir + '/SConscript')
""" % test.workpath('debug'))
test.write(['work', 'SConscript'], """
SConscript('src/SConscript')
""")
test.write(['work', 'src', 'SConscript'], """
env = Environment(OBJSUFFIX='.obj',
PROGSUFFIX='.exe')
env.Program('test.cpp')
""")
test.write(['work', 'src', 'test.cpp'], """\
#include <stdio.h>
#include <stdlib.h>
int
main(int argc, char *argv[])
{
printf("work/src/test.cpp\\n");
}
""")
test.run(chdir = 'work', arguments = '.')
test.must_exist(test.workpath('debug', 'src', 'test.obj'))
test.must_exist(test.workpath('debug', 'src', 'test.exe'))
test.pass_test()
|
|
654d2070dc25a288f680b5d93e5eb4662da403eb
|
planner/scrapers/virginia.py
|
planner/scrapers/virginia.py
|
from lxml import etree
from planner.scrapers.base import Scraper
class UVAScraper(Scraper):
"""Scraper for the University of Virginia"""
def scrape(self):
with open('/home/matt/lcp/uva.html', 'r') as f:
text = f.read()
html = etree.HTML(text)
# Filter to a closer div to prevent extracting too much data.
div_list = html.xpath('.//div[contains(@class, "view-events")]')
if len(div_list) != 1:
raise Exception('Expected 1 div, found {}'.format(len(div_list)))
events_div = div_list[0]
for row in events_div.xpath('//tr'):
print('text', row)
# TODO: Get list of objects that has raw dates and text
# TODO: match text against regular expressions to find milestone types
# TODO: create or update milestones.
# TODO: Add concept of academic year to milestones?
# TODO: Refactor to template pattern to reduce mechanical work needed.
|
Add the start of the UVA scraper.
|
Add the start of the UVA scraper.
|
Python
|
bsd-2-clause
|
mblayman/lcp,mblayman/lcp,mblayman/lcp
|
Add the start of the UVA scraper.
|
from lxml import etree
from planner.scrapers.base import Scraper
class UVAScraper(Scraper):
"""Scraper for the University of Virginia"""
def scrape(self):
with open('/home/matt/lcp/uva.html', 'r') as f:
text = f.read()
html = etree.HTML(text)
# Filter to a closer div to prevent extracting too much data.
div_list = html.xpath('.//div[contains(@class, "view-events")]')
if len(div_list) != 1:
raise Exception('Expected 1 div, found {}'.format(len(div_list)))
events_div = div_list[0]
for row in events_div.xpath('//tr'):
print('text', row)
# TODO: Get list of objects that has raw dates and text
# TODO: match text against regular expressions to find milestone types
# TODO: create or update milestones.
# TODO: Add concept of academic year to milestones?
# TODO: Refactor to template pattern to reduce mechanical work needed.
|
<commit_before><commit_msg>Add the start of the UVA scraper.<commit_after>
|
from lxml import etree
from planner.scrapers.base import Scraper
class UVAScraper(Scraper):
"""Scraper for the University of Virginia"""
def scrape(self):
with open('/home/matt/lcp/uva.html', 'r') as f:
text = f.read()
html = etree.HTML(text)
# Filter to a closer div to prevent extracting too much data.
div_list = html.xpath('.//div[contains(@class, "view-events")]')
if len(div_list) != 1:
raise Exception('Expected 1 div, found {}'.format(len(div_list)))
events_div = div_list[0]
for row in events_div.xpath('//tr'):
print('text', row)
# TODO: Get list of objects that has raw dates and text
# TODO: match text against regular expressions to find milestone types
# TODO: create or update milestones.
# TODO: Add concept of academic year to milestones?
# TODO: Refactor to template pattern to reduce mechanical work needed.
|
Add the start of the UVA scraper.from lxml import etree
from planner.scrapers.base import Scraper
class UVAScraper(Scraper):
"""Scraper for the University of Virginia"""
def scrape(self):
with open('/home/matt/lcp/uva.html', 'r') as f:
text = f.read()
html = etree.HTML(text)
# Filter to a closer div to prevent extracting too much data.
div_list = html.xpath('.//div[contains(@class, "view-events")]')
if len(div_list) != 1:
raise Exception('Expected 1 div, found {}'.format(len(div_list)))
events_div = div_list[0]
for row in events_div.xpath('//tr'):
print('text', row)
# TODO: Get list of objects that has raw dates and text
# TODO: match text against regular expressions to find milestone types
# TODO: create or update milestones.
# TODO: Add concept of academic year to milestones?
# TODO: Refactor to template pattern to reduce mechanical work needed.
|
<commit_before><commit_msg>Add the start of the UVA scraper.<commit_after>from lxml import etree
from planner.scrapers.base import Scraper
class UVAScraper(Scraper):
"""Scraper for the University of Virginia"""
def scrape(self):
with open('/home/matt/lcp/uva.html', 'r') as f:
text = f.read()
html = etree.HTML(text)
# Filter to a closer div to prevent extracting too much data.
div_list = html.xpath('.//div[contains(@class, "view-events")]')
if len(div_list) != 1:
raise Exception('Expected 1 div, found {}'.format(len(div_list)))
events_div = div_list[0]
for row in events_div.xpath('//tr'):
print('text', row)
# TODO: Get list of objects that has raw dates and text
# TODO: match text against regular expressions to find milestone types
# TODO: create or update milestones.
# TODO: Add concept of academic year to milestones?
# TODO: Refactor to template pattern to reduce mechanical work needed.
|
|
7a8488327ba41275d4b569fc73da617140e64aea
|
sedlex/AddGitHubArticleLinkVisitor.py
|
sedlex/AddGitHubArticleLinkVisitor.py
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
class AddGitHubArticleLinkVisitor(AbstractVisitor):
def __init__(self, args):
self.repo = args.github_repository
self.law_id = None
super(AddGitHubArticleLinkVisitor, self).__init__()
def visit_law_reference_node(self, node, post):
if post:
return
self.law_id = node['lawId']
def visit_article_reference_node(self, node, post):
if post:
return
node['githubHistory'] = ('https://github.com/'
+ self.repo + '/commits/master/'
+ 'loi_' + self.law_id + '/'
+ 'Article_' + node['id'] + '.md')
|
Add a visitor to generate the github history link.
|
Add a visitor to generate the github history link.
|
Python
|
agpl-3.0
|
Legilibre/SedLex
|
Add a visitor to generate the github history link.
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
class AddGitHubArticleLinkVisitor(AbstractVisitor):
def __init__(self, args):
self.repo = args.github_repository
self.law_id = None
super(AddGitHubArticleLinkVisitor, self).__init__()
def visit_law_reference_node(self, node, post):
if post:
return
self.law_id = node['lawId']
def visit_article_reference_node(self, node, post):
if post:
return
node['githubHistory'] = ('https://github.com/'
+ self.repo + '/commits/master/'
+ 'loi_' + self.law_id + '/'
+ 'Article_' + node['id'] + '.md')
|
<commit_before><commit_msg>Add a visitor to generate the github history link.<commit_after>
|
# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
class AddGitHubArticleLinkVisitor(AbstractVisitor):
def __init__(self, args):
self.repo = args.github_repository
self.law_id = None
super(AddGitHubArticleLinkVisitor, self).__init__()
def visit_law_reference_node(self, node, post):
if post:
return
self.law_id = node['lawId']
def visit_article_reference_node(self, node, post):
if post:
return
node['githubHistory'] = ('https://github.com/'
+ self.repo + '/commits/master/'
+ 'loi_' + self.law_id + '/'
+ 'Article_' + node['id'] + '.md')
|
Add a visitor to generate the github history link.# -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
class AddGitHubArticleLinkVisitor(AbstractVisitor):
def __init__(self, args):
self.repo = args.github_repository
self.law_id = None
super(AddGitHubArticleLinkVisitor, self).__init__()
def visit_law_reference_node(self, node, post):
if post:
return
self.law_id = node['lawId']
def visit_article_reference_node(self, node, post):
if post:
return
node['githubHistory'] = ('https://github.com/'
+ self.repo + '/commits/master/'
+ 'loi_' + self.law_id + '/'
+ 'Article_' + node['id'] + '.md')
|
<commit_before><commit_msg>Add a visitor to generate the github history link.<commit_after># -*- coding: utf-8 -*-
from AbstractVisitor import AbstractVisitor
class AddGitHubArticleLinkVisitor(AbstractVisitor):
def __init__(self, args):
self.repo = args.github_repository
self.law_id = None
super(AddGitHubArticleLinkVisitor, self).__init__()
def visit_law_reference_node(self, node, post):
if post:
return
self.law_id = node['lawId']
def visit_article_reference_node(self, node, post):
if post:
return
node['githubHistory'] = ('https://github.com/'
+ self.repo + '/commits/master/'
+ 'loi_' + self.law_id + '/'
+ 'Article_' + node['id'] + '.md')
|
|
6200630904db2452ecfb551d54974acfad978d17
|
synapse/crypto/context_factory.py
|
synapse/crypto/context_factory.py
|
from twisted.internet import reactor, ssl
from OpenSSL import SSL
class ServerContextFactory(ssl.ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections and to make connections to remote servers."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
|
Add server TLS context factory
|
Add server TLS context factory
|
Python
|
apache-2.0
|
illicitonion/synapse,illicitonion/synapse,howethomas/synapse,TribeMedia/synapse,iot-factory/synapse,matrix-org/synapse,illicitonion/synapse,iot-factory/synapse,iot-factory/synapse,iot-factory/synapse,rzr/synapse,illicitonion/synapse,TribeMedia/synapse,matrix-org/synapse,rzr/synapse,howethomas/synapse,rzr/synapse,howethomas/synapse,matrix-org/synapse,rzr/synapse,matrix-org/synapse,matrix-org/synapse,TribeMedia/synapse,howethomas/synapse,TribeMedia/synapse,howethomas/synapse,rzr/synapse,matrix-org/synapse,illicitonion/synapse,iot-factory/synapse,TribeMedia/synapse
|
Add server TLS context factory
|
from twisted.internet import reactor, ssl
from OpenSSL import SSL
class ServerContextFactory(ssl.ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections and to make connections to remote servers."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
|
<commit_before><commit_msg>Add server TLS context factory<commit_after>
|
from twisted.internet import reactor, ssl
from OpenSSL import SSL
class ServerContextFactory(ssl.ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections and to make connections to remote servers."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
|
Add server TLS context factoryfrom twisted.internet import reactor, ssl
from OpenSSL import SSL
class ServerContextFactory(ssl.ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections and to make connections to remote servers."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
|
<commit_before><commit_msg>Add server TLS context factory<commit_after>from twisted.internet import reactor, ssl
from OpenSSL import SSL
class ServerContextFactory(ssl.ContextFactory):
"""Factory for PyOpenSSL SSL contexts that are used to handle incoming
connections and to make connections to remote servers."""
def __init__(self, config):
self._context = SSL.Context(SSL.SSLv23_METHOD)
self.configure_context(self._context, config)
@staticmethod
def configure_context(context, config):
context.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
context.use_certificate(config.tls_certificate)
context.use_privatekey(config.tls_private_key)
context.load_tmp_dh(config.tls_dh_params_path)
context.set_cipher_list("!ADH:HIGH+kEDH:!AECDH:HIGH+kEECDH")
def getContext(self):
return self._context
|
|
c7daf4548580b0de2aa9ec02cffc17ef3e796b28
|
vertex/test/test_q2qclient.py
|
vertex/test/test_q2qclient.py
|
# Copyright 2005-2008 Divmod, Inc. See LICENSE file for details
# -*- vertex.test.test_q2q.UDPConnection -*-
"""
Tests for L{vertex.q2qclient}.
"""
from twisted.trial import unittest
from twisted.internet.protocol import Factory
from twisted.protocols.amp import Command, AMP, AmpBox
from vertex import q2q, q2qclient
from vertex.test.helpers import FakeQ2QService
class TestCase(unittest.TestCase):
def test_stuff(self):
svc = FakeQ2QService()
serverAddr = q2q.Q2QAddress("domain", "accounts")
server = AMP()
def respond(box):
self.assertEqual(box['_command'], "add_user")
self.assertEqual(box['name'], "user")
self.assertEqual(box['password'], "password")
return AmpBox()
server.amp_ADD_USER = respond
factory = Factory.forProtocol(lambda: server)
chooser = {"identity-admin": factory}
svc.listenQ2Q(serverAddr, chooser, "Admin")
d = q2qclient.enregister(svc, q2q.Q2QAddress("domain", "user"), "password")
svc.flush()
self.successResultOf(d)
|
Add a silly test for vertex.q2qclient.enregister.
|
Add a silly test for vertex.q2qclient.enregister.
|
Python
|
mit
|
insequent/vertex,twisted/vertex,glyph/vertex,insequent/vertex,twisted/vertex
|
Add a silly test for vertex.q2qclient.enregister.
|
# Copyright 2005-2008 Divmod, Inc. See LICENSE file for details
# -*- vertex.test.test_q2q.UDPConnection -*-
"""
Tests for L{vertex.q2qclient}.
"""
from twisted.trial import unittest
from twisted.internet.protocol import Factory
from twisted.protocols.amp import Command, AMP, AmpBox
from vertex import q2q, q2qclient
from vertex.test.helpers import FakeQ2QService
class TestCase(unittest.TestCase):
def test_stuff(self):
svc = FakeQ2QService()
serverAddr = q2q.Q2QAddress("domain", "accounts")
server = AMP()
def respond(box):
self.assertEqual(box['_command'], "add_user")
self.assertEqual(box['name'], "user")
self.assertEqual(box['password'], "password")
return AmpBox()
server.amp_ADD_USER = respond
factory = Factory.forProtocol(lambda: server)
chooser = {"identity-admin": factory}
svc.listenQ2Q(serverAddr, chooser, "Admin")
d = q2qclient.enregister(svc, q2q.Q2QAddress("domain", "user"), "password")
svc.flush()
self.successResultOf(d)
|
<commit_before><commit_msg>Add a silly test for vertex.q2qclient.enregister.<commit_after>
|
# Copyright 2005-2008 Divmod, Inc. See LICENSE file for details
# -*- vertex.test.test_q2q.UDPConnection -*-
"""
Tests for L{vertex.q2qclient}.
"""
from twisted.trial import unittest
from twisted.internet.protocol import Factory
from twisted.protocols.amp import Command, AMP, AmpBox
from vertex import q2q, q2qclient
from vertex.test.helpers import FakeQ2QService
class TestCase(unittest.TestCase):
def test_stuff(self):
svc = FakeQ2QService()
serverAddr = q2q.Q2QAddress("domain", "accounts")
server = AMP()
def respond(box):
self.assertEqual(box['_command'], "add_user")
self.assertEqual(box['name'], "user")
self.assertEqual(box['password'], "password")
return AmpBox()
server.amp_ADD_USER = respond
factory = Factory.forProtocol(lambda: server)
chooser = {"identity-admin": factory}
svc.listenQ2Q(serverAddr, chooser, "Admin")
d = q2qclient.enregister(svc, q2q.Q2QAddress("domain", "user"), "password")
svc.flush()
self.successResultOf(d)
|
Add a silly test for vertex.q2qclient.enregister.# Copyright 2005-2008 Divmod, Inc. See LICENSE file for details
# -*- vertex.test.test_q2q.UDPConnection -*-
"""
Tests for L{vertex.q2qclient}.
"""
from twisted.trial import unittest
from twisted.internet.protocol import Factory
from twisted.protocols.amp import Command, AMP, AmpBox
from vertex import q2q, q2qclient
from vertex.test.helpers import FakeQ2QService
class TestCase(unittest.TestCase):
def test_stuff(self):
svc = FakeQ2QService()
serverAddr = q2q.Q2QAddress("domain", "accounts")
server = AMP()
def respond(box):
self.assertEqual(box['_command'], "add_user")
self.assertEqual(box['name'], "user")
self.assertEqual(box['password'], "password")
return AmpBox()
server.amp_ADD_USER = respond
factory = Factory.forProtocol(lambda: server)
chooser = {"identity-admin": factory}
svc.listenQ2Q(serverAddr, chooser, "Admin")
d = q2qclient.enregister(svc, q2q.Q2QAddress("domain", "user"), "password")
svc.flush()
self.successResultOf(d)
|
<commit_before><commit_msg>Add a silly test for vertex.q2qclient.enregister.<commit_after># Copyright 2005-2008 Divmod, Inc. See LICENSE file for details
# -*- vertex.test.test_q2q.UDPConnection -*-
"""
Tests for L{vertex.q2qclient}.
"""
from twisted.trial import unittest
from twisted.internet.protocol import Factory
from twisted.protocols.amp import Command, AMP, AmpBox
from vertex import q2q, q2qclient
from vertex.test.helpers import FakeQ2QService
class TestCase(unittest.TestCase):
def test_stuff(self):
svc = FakeQ2QService()
serverAddr = q2q.Q2QAddress("domain", "accounts")
server = AMP()
def respond(box):
self.assertEqual(box['_command'], "add_user")
self.assertEqual(box['name'], "user")
self.assertEqual(box['password'], "password")
return AmpBox()
server.amp_ADD_USER = respond
factory = Factory.forProtocol(lambda: server)
chooser = {"identity-admin": factory}
svc.listenQ2Q(serverAddr, chooser, "Admin")
d = q2qclient.enregister(svc, q2q.Q2QAddress("domain", "user"), "password")
svc.flush()
self.successResultOf(d)
|
|
b447470e8f4e8826d87f712c6726bf8855216330
|
ureport/jobs/migrations/0005_add_photos.py
|
ureport/jobs/migrations/0005_add_photos.py
|
# Generated by Django 2.2.5 on 2019-09-26 23:07
from django.db import migrations
def generate_job_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.create(username="root")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(
name="Photos",
slug="photos",
description="Photos",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by_id=root.id,
modified_by_id=root.id,
)
class Migration(migrations.Migration):
dependencies = [("jobs", "0004_auto_20170615_1455")]
operations = [migrations.RunPython(generate_job_block_types)]
|
Add migration to add photo blocks type
|
Add migration to add photo blocks type
|
Python
|
agpl-3.0
|
Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,rapidpro/ureport,Ilhasoft/ureport,rapidpro/ureport
|
Add migration to add photo blocks type
|
# Generated by Django 2.2.5 on 2019-09-26 23:07
from django.db import migrations
def generate_job_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.create(username="root")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(
name="Photos",
slug="photos",
description="Photos",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by_id=root.id,
modified_by_id=root.id,
)
class Migration(migrations.Migration):
dependencies = [("jobs", "0004_auto_20170615_1455")]
operations = [migrations.RunPython(generate_job_block_types)]
|
<commit_before><commit_msg>Add migration to add photo blocks type<commit_after>
|
# Generated by Django 2.2.5 on 2019-09-26 23:07
from django.db import migrations
def generate_job_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.create(username="root")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(
name="Photos",
slug="photos",
description="Photos",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by_id=root.id,
modified_by_id=root.id,
)
class Migration(migrations.Migration):
dependencies = [("jobs", "0004_auto_20170615_1455")]
operations = [migrations.RunPython(generate_job_block_types)]
|
Add migration to add photo blocks type# Generated by Django 2.2.5 on 2019-09-26 23:07
from django.db import migrations
def generate_job_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.create(username="root")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(
name="Photos",
slug="photos",
description="Photos",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by_id=root.id,
modified_by_id=root.id,
)
class Migration(migrations.Migration):
dependencies = [("jobs", "0004_auto_20170615_1455")]
operations = [migrations.RunPython(generate_job_block_types)]
|
<commit_before><commit_msg>Add migration to add photo blocks type<commit_after># Generated by Django 2.2.5 on 2019-09-26 23:07
from django.db import migrations
def generate_job_block_types(apps, schema_editor):
User = apps.get_model("auth", "User")
root = User.objects.filter(username="root").first()
if not root:
root = User.objects.create(username="root")
DashBlockType = apps.get_model("dashblocks", "DashBlockType")
DashBlockType.objects.get_or_create(
name="Photos",
slug="photos",
description="Photos",
has_title=True,
has_image=True,
has_rich_text=False,
has_summary=False,
has_link=False,
has_gallery=False,
has_color=False,
has_video=False,
has_tags=False,
created_by_id=root.id,
modified_by_id=root.id,
)
class Migration(migrations.Migration):
dependencies = [("jobs", "0004_auto_20170615_1455")]
operations = [migrations.RunPython(generate_job_block_types)]
|
|
3c01ff0ecee7abdd25a5812c5d3a9bfcb1df732d
|
tests/test_units/test_route_escapes.py
|
tests/test_units/test_route_escapes.py
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(
r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(
r.routelist, ['/prefix/escaped:escaped/foo=',
{'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(
r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/',
{'name': 'brackets', 'type': ':'}, '/',
{'name': 'colon', 'type': ':'}])
|
Add tests for backslash escapes in route paths
|
Add tests for backslash escapes in route paths
Co-Authored-By: Stephen Finucane <06fa905d7f2aaced6dc72e9511c71a2a51e8aead@that.guru>
|
Python
|
mit
|
bbangert/routes,webknjaz/routes
|
Add tests for backslash escapes in route paths
Co-Authored-By: Stephen Finucane <06fa905d7f2aaced6dc72e9511c71a2a51e8aead@that.guru>
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(
r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(
r.routelist, ['/prefix/escaped:escaped/foo=',
{'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(
r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/',
{'name': 'brackets', 'type': ':'}, '/',
{'name': 'colon', 'type': ':'}])
|
<commit_before><commit_msg>Add tests for backslash escapes in route paths
Co-Authored-By: Stephen Finucane <06fa905d7f2aaced6dc72e9511c71a2a51e8aead@that.guru><commit_after>
|
import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(
r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(
r.routelist, ['/prefix/escaped:escaped/foo=',
{'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(
r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/',
{'name': 'brackets', 'type': ':'}, '/',
{'name': 'colon', 'type': ':'}])
|
Add tests for backslash escapes in route paths
Co-Authored-By: Stephen Finucane <06fa905d7f2aaced6dc72e9511c71a2a51e8aead@that.guru>import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(
r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(
r.routelist, ['/prefix/escaped:escaped/foo=',
{'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(
r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/',
{'name': 'brackets', 'type': ':'}, '/',
{'name': 'colon', 'type': ':'}])
|
<commit_before><commit_msg>Add tests for backslash escapes in route paths
Co-Authored-By: Stephen Finucane <06fa905d7f2aaced6dc72e9511c71a2a51e8aead@that.guru><commit_after>import unittest
from routes.route import Route
class TestRouteEscape(unittest.TestCase):
def test_normal_route(self):
r = Route('test', '/foo/bar')
self.assertEqual(r.routelist, ['/foo/bar'])
def test_route_with_backslash(self):
r = Route('test', '/foo\\\\bar')
self.assertEqual(r.routelist, ['/foo\\bar'])
def test_route_with_random_escapes(self):
r = Route('test', '\\/f\\oo\\/ba\\r')
self.assertEqual(r.routelist, ['\\/f\\oo\\/ba\\r'])
def test_route_with_colon(self):
r = Route('test', '/foo:bar/baz')
self.assertEqual(
r.routelist, ['/foo', {'name': 'bar', 'type': ':'}, '/', 'baz'])
def test_route_with_escaped_colon(self):
r = Route('test', '/foo\\:bar/baz')
self.assertEqual(r.routelist, ['/foo:bar/baz'])
def test_route_with_both_colons(self):
r = Route('test', '/prefix/escaped\\:escaped/foo=:notescaped/bar=42')
self.assertEqual(
r.routelist, ['/prefix/escaped:escaped/foo=',
{'name': 'notescaped', 'type': ':'}, '/', 'bar=42'])
def test_route_with_all_escapes(self):
r = Route('test', '/hmm\\:\\*\\{\\}*star/{brackets}/:colon')
self.assertEqual(
r.routelist, ['/hmm:*{}', {'name': 'star', 'type': '*'}, '/',
{'name': 'brackets', 'type': ':'}, '/',
{'name': 'colon', 'type': ':'}])
|
|
52c34e5cd6222de35ab750e6feb88be4ef8498f1
|
vumi/middleware/session_length.py
|
vumi/middleware/session_length.py
|
# -*- test-case-name: vumi.middleware.tests.test_provider_setter -*-
from twisted.internet.defer import inlineCallbacks
from vumi.middleware.base import BaseMiddleware
from vumi.persist.txredis_manager import TxRedisManager
class SessionLengthMiddleware(BaseMiddleware):
""" Middleware for storing the session length in the message.
Session length is stored if the end of the session is reached.
Configuration option:
:param dict redis:
Redis configuration parameters.
"""
@inlineCallbacks
def setup_middleware(self):
r_config = self.config.get('redis_manager', {})
self.redis = yield TxRedisManager.from_config(r_config)
@inlineCallbacks
def teardown_middleware(self):
yield self.redis.close_manager()
|
Add basic framework for middleware.
|
Add basic framework for middleware.
|
Python
|
bsd-3-clause
|
TouK/vumi,TouK/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix
|
Add basic framework for middleware.
|
# -*- test-case-name: vumi.middleware.tests.test_provider_setter -*-
from twisted.internet.defer import inlineCallbacks
from vumi.middleware.base import BaseMiddleware
from vumi.persist.txredis_manager import TxRedisManager
class SessionLengthMiddleware(BaseMiddleware):
""" Middleware for storing the session length in the message.
Session length is stored if the end of the session is reached.
Configuration option:
:param dict redis:
Redis configuration parameters.
"""
@inlineCallbacks
def setup_middleware(self):
r_config = self.config.get('redis_manager', {})
self.redis = yield TxRedisManager.from_config(r_config)
@inlineCallbacks
def teardown_middleware(self):
yield self.redis.close_manager()
|
<commit_before><commit_msg>Add basic framework for middleware.<commit_after>
|
# -*- test-case-name: vumi.middleware.tests.test_provider_setter -*-
from twisted.internet.defer import inlineCallbacks
from vumi.middleware.base import BaseMiddleware
from vumi.persist.txredis_manager import TxRedisManager
class SessionLengthMiddleware(BaseMiddleware):
""" Middleware for storing the session length in the message.
Session length is stored if the end of the session is reached.
Configuration option:
:param dict redis:
Redis configuration parameters.
"""
@inlineCallbacks
def setup_middleware(self):
r_config = self.config.get('redis_manager', {})
self.redis = yield TxRedisManager.from_config(r_config)
@inlineCallbacks
def teardown_middleware(self):
yield self.redis.close_manager()
|
Add basic framework for middleware.# -*- test-case-name: vumi.middleware.tests.test_provider_setter -*-
from twisted.internet.defer import inlineCallbacks
from vumi.middleware.base import BaseMiddleware
from vumi.persist.txredis_manager import TxRedisManager
class SessionLengthMiddleware(BaseMiddleware):
""" Middleware for storing the session length in the message.
Session length is stored if the end of the session is reached.
Configuration option:
:param dict redis:
Redis configuration parameters.
"""
@inlineCallbacks
def setup_middleware(self):
r_config = self.config.get('redis_manager', {})
self.redis = yield TxRedisManager.from_config(r_config)
@inlineCallbacks
def teardown_middleware(self):
yield self.redis.close_manager()
|
<commit_before><commit_msg>Add basic framework for middleware.<commit_after># -*- test-case-name: vumi.middleware.tests.test_provider_setter -*-
from twisted.internet.defer import inlineCallbacks
from vumi.middleware.base import BaseMiddleware
from vumi.persist.txredis_manager import TxRedisManager
class SessionLengthMiddleware(BaseMiddleware):
""" Middleware for storing the session length in the message.
Session length is stored if the end of the session is reached.
Configuration option:
:param dict redis:
Redis configuration parameters.
"""
@inlineCallbacks
def setup_middleware(self):
r_config = self.config.get('redis_manager', {})
self.redis = yield TxRedisManager.from_config(r_config)
@inlineCallbacks
def teardown_middleware(self):
yield self.redis.close_manager()
|
|
8eb21692d01f474b2dd8f0d7e05cb0713dbb0475
|
CodeFights/growingPlant.py
|
CodeFights/growingPlant.py
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def growingPlant(upSpeed, downSpeed, desiredHeight):
if upSpeed >= desiredHeight:
return 1
else:
return (desiredHeight // (upSpeed - downSpeed))
def main():
tests = [
[100, 10, 910, 10],
[10, 9, 4, 1]
]
for t in tests:
res = growingPlant(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: growingPlant({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: growingPlant({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights growing plant problem
|
Solve Code Fights growing plant problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights growing plant problem
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def growingPlant(upSpeed, downSpeed, desiredHeight):
if upSpeed >= desiredHeight:
return 1
else:
return (desiredHeight // (upSpeed - downSpeed))
def main():
tests = [
[100, 10, 910, 10],
[10, 9, 4, 1]
]
for t in tests:
res = growingPlant(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: growingPlant({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: growingPlant({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights growing plant problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def growingPlant(upSpeed, downSpeed, desiredHeight):
if upSpeed >= desiredHeight:
return 1
else:
return (desiredHeight // (upSpeed - downSpeed))
def main():
tests = [
[100, 10, 910, 10],
[10, 9, 4, 1]
]
for t in tests:
res = growingPlant(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: growingPlant({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: growingPlant({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights growing plant problem#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def growingPlant(upSpeed, downSpeed, desiredHeight):
if upSpeed >= desiredHeight:
return 1
else:
return (desiredHeight // (upSpeed - downSpeed))
def main():
tests = [
[100, 10, 910, 10],
[10, 9, 4, 1]
]
for t in tests:
res = growingPlant(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: growingPlant({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: growingPlant({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights growing plant problem<commit_after>#!/usr/local/bin/python
# Code Fights Growing Plant Problem
def growingPlant(upSpeed, downSpeed, desiredHeight):
if upSpeed >= desiredHeight:
return 1
else:
return (desiredHeight // (upSpeed - downSpeed))
def main():
tests = [
[100, 10, 910, 10],
[10, 9, 4, 1]
]
for t in tests:
res = growingPlant(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: growingPlant({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: growingPlant({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
|
824eea86b5166bce428c466c867708a9c8b15fb0
|
tests/lows_and_highs.py
|
tests/lows_and_highs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import cb, plot_peaks
import peakutils.peak
def plot_peaks_lows_highs(x, highs, lows, algorithm=None, mph=None, mpd=None):
"""Plot results of the peak dectection."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if highs.size:
label = 'high peak'
label = label + 's' if highs.size > 1 else label
ax.plot(highs, x[highs], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (highs.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
if lows.size:
label = 'low peak'
label = label + 's' if lows.size > 1 else label
ax.plot(lows, x[lows], '+', mfc=None, mec='g', mew=2, ms=8,
label='%d %s' % (lows.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
ax.set_title('%s (mph=%s, mpd=%s)' % (algorithm, mph, mpd))
plt.show()
threshold = 0.02
min_dist = 150
print('Detect high peaks with minimum height and distance filters.')
highs = peakutils.peak.indexes(
np.array(cb),
thres=threshold/max(cb), min_dist=min_dist
)
print('High peaks are: %s' % (highs))
print('Detect low peaks with minimum height and distance filters.')
# Invert the signal.
cbInverted = cb * -1
lows = peakutils.peak.indexes(
np.array(cbInverted),
thres=threshold/max(cbInverted), min_dist=min_dist
)
print('Low peaks are: %s' % (lows))
plot_peaks_lows_highs(
np.array(cb),
highs,
lows,
mph=threshold, mpd=min_dist, algorithm='peakutils.peak.indexes'
)
|
Add example for high and low peak detection
|
Add example for high and low peak detection
|
Python
|
mit
|
MonsieurV/py-findpeaks,MonsieurV/py-findpeaks
|
Add example for high and low peak detection
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import cb, plot_peaks
import peakutils.peak
def plot_peaks_lows_highs(x, highs, lows, algorithm=None, mph=None, mpd=None):
"""Plot results of the peak dectection."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if highs.size:
label = 'high peak'
label = label + 's' if highs.size > 1 else label
ax.plot(highs, x[highs], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (highs.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
if lows.size:
label = 'low peak'
label = label + 's' if lows.size > 1 else label
ax.plot(lows, x[lows], '+', mfc=None, mec='g', mew=2, ms=8,
label='%d %s' % (lows.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
ax.set_title('%s (mph=%s, mpd=%s)' % (algorithm, mph, mpd))
plt.show()
threshold = 0.02
min_dist = 150
print('Detect high peaks with minimum height and distance filters.')
highs = peakutils.peak.indexes(
np.array(cb),
thres=threshold/max(cb), min_dist=min_dist
)
print('High peaks are: %s' % (highs))
print('Detect low peaks with minimum height and distance filters.')
# Invert the signal.
cbInverted = cb * -1
lows = peakutils.peak.indexes(
np.array(cbInverted),
thres=threshold/max(cbInverted), min_dist=min_dist
)
print('Low peaks are: %s' % (lows))
plot_peaks_lows_highs(
np.array(cb),
highs,
lows,
mph=threshold, mpd=min_dist, algorithm='peakutils.peak.indexes'
)
|
<commit_before><commit_msg>Add example for high and low peak detection<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import cb, plot_peaks
import peakutils.peak
def plot_peaks_lows_highs(x, highs, lows, algorithm=None, mph=None, mpd=None):
"""Plot results of the peak dectection."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if highs.size:
label = 'high peak'
label = label + 's' if highs.size > 1 else label
ax.plot(highs, x[highs], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (highs.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
if lows.size:
label = 'low peak'
label = label + 's' if lows.size > 1 else label
ax.plot(lows, x[lows], '+', mfc=None, mec='g', mew=2, ms=8,
label='%d %s' % (lows.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
ax.set_title('%s (mph=%s, mpd=%s)' % (algorithm, mph, mpd))
plt.show()
threshold = 0.02
min_dist = 150
print('Detect high peaks with minimum height and distance filters.')
highs = peakutils.peak.indexes(
np.array(cb),
thres=threshold/max(cb), min_dist=min_dist
)
print('High peaks are: %s' % (highs))
print('Detect low peaks with minimum height and distance filters.')
# Invert the signal.
cbInverted = cb * -1
lows = peakutils.peak.indexes(
np.array(cbInverted),
thres=threshold/max(cbInverted), min_dist=min_dist
)
print('Low peaks are: %s' % (lows))
plot_peaks_lows_highs(
np.array(cb),
highs,
lows,
mph=threshold, mpd=min_dist, algorithm='peakutils.peak.indexes'
)
|
Add example for high and low peak detection#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import cb, plot_peaks
import peakutils.peak
def plot_peaks_lows_highs(x, highs, lows, algorithm=None, mph=None, mpd=None):
"""Plot results of the peak dectection."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if highs.size:
label = 'high peak'
label = label + 's' if highs.size > 1 else label
ax.plot(highs, x[highs], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (highs.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
if lows.size:
label = 'low peak'
label = label + 's' if lows.size > 1 else label
ax.plot(lows, x[lows], '+', mfc=None, mec='g', mew=2, ms=8,
label='%d %s' % (lows.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
ax.set_title('%s (mph=%s, mpd=%s)' % (algorithm, mph, mpd))
plt.show()
threshold = 0.02
min_dist = 150
print('Detect high peaks with minimum height and distance filters.')
highs = peakutils.peak.indexes(
np.array(cb),
thres=threshold/max(cb), min_dist=min_dist
)
print('High peaks are: %s' % (highs))
print('Detect low peaks with minimum height and distance filters.')
# Invert the signal.
cbInverted = cb * -1
lows = peakutils.peak.indexes(
np.array(cbInverted),
thres=threshold/max(cbInverted), min_dist=min_dist
)
print('Low peaks are: %s' % (lows))
plot_peaks_lows_highs(
np.array(cb),
highs,
lows,
mph=threshold, mpd=min_dist, algorithm='peakutils.peak.indexes'
)
|
<commit_before><commit_msg>Add example for high and low peak detection<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from vector import cb, plot_peaks
import peakutils.peak
def plot_peaks_lows_highs(x, highs, lows, algorithm=None, mph=None, mpd=None):
"""Plot results of the peak dectection."""
try:
import matplotlib.pyplot as plt
except ImportError:
print('matplotlib is not available.')
return
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if highs.size:
label = 'high peak'
label = label + 's' if highs.size > 1 else label
ax.plot(highs, x[highs], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (highs.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
if lows.size:
label = 'low peak'
label = label + 's' if lows.size > 1 else label
ax.plot(lows, x[lows], '+', mfc=None, mec='g', mew=2, ms=8,
label='%d %s' % (lows.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
ax.set_title('%s (mph=%s, mpd=%s)' % (algorithm, mph, mpd))
plt.show()
threshold = 0.02
min_dist = 150
print('Detect high peaks with minimum height and distance filters.')
highs = peakutils.peak.indexes(
np.array(cb),
thres=threshold/max(cb), min_dist=min_dist
)
print('High peaks are: %s' % (highs))
print('Detect low peaks with minimum height and distance filters.')
# Invert the signal.
cbInverted = cb * -1
lows = peakutils.peak.indexes(
np.array(cbInverted),
thres=threshold/max(cbInverted), min_dist=min_dist
)
print('Low peaks are: %s' % (lows))
plot_peaks_lows_highs(
np.array(cb),
highs,
lows,
mph=threshold, mpd=min_dist, algorithm='peakutils.peak.indexes'
)
|
|
73f891d7eebb5d524cee2da606bc0d195aeea890
|
tests/test_checkinfo.py
|
tests/test_checkinfo.py
|
from itertools import combinations
from string import ascii_lowercase
from botbot import checkinfo as ci
def test_bidirectional_problem_serialization(tmpdir):
info = ci.CheckResult(tmpdir.strpath)
for probs in combinations(ascii_lowercase, 3):
info.add_problem(''.join(probs))
old_probs = info.problems
ps = info.serialize_problems()
info.decode_probstr(ps)
assert info.problems == old_probs
|
Add new test for CheckResult
|
Add new test for CheckResult
|
Python
|
mit
|
jackstanek/BotBot,jackstanek/BotBot
|
Add new test for CheckResult
|
from itertools import combinations
from string import ascii_lowercase
from botbot import checkinfo as ci
def test_bidirectional_problem_serialization(tmpdir):
info = ci.CheckResult(tmpdir.strpath)
for probs in combinations(ascii_lowercase, 3):
info.add_problem(''.join(probs))
old_probs = info.problems
ps = info.serialize_problems()
info.decode_probstr(ps)
assert info.problems == old_probs
|
<commit_before><commit_msg>Add new test for CheckResult<commit_after>
|
from itertools import combinations
from string import ascii_lowercase
from botbot import checkinfo as ci
def test_bidirectional_problem_serialization(tmpdir):
info = ci.CheckResult(tmpdir.strpath)
for probs in combinations(ascii_lowercase, 3):
info.add_problem(''.join(probs))
old_probs = info.problems
ps = info.serialize_problems()
info.decode_probstr(ps)
assert info.problems == old_probs
|
Add new test for CheckResultfrom itertools import combinations
from string import ascii_lowercase
from botbot import checkinfo as ci
def test_bidirectional_problem_serialization(tmpdir):
info = ci.CheckResult(tmpdir.strpath)
for probs in combinations(ascii_lowercase, 3):
info.add_problem(''.join(probs))
old_probs = info.problems
ps = info.serialize_problems()
info.decode_probstr(ps)
assert info.problems == old_probs
|
<commit_before><commit_msg>Add new test for CheckResult<commit_after>from itertools import combinations
from string import ascii_lowercase
from botbot import checkinfo as ci
def test_bidirectional_problem_serialization(tmpdir):
info = ci.CheckResult(tmpdir.strpath)
for probs in combinations(ascii_lowercase, 3):
info.add_problem(''.join(probs))
old_probs = info.problems
ps = info.serialize_problems()
info.decode_probstr(ps)
assert info.problems == old_probs
|
|
9a2a8100b566d2332db958979b776a10ac59f38a
|
alexa_parser.py
|
alexa_parser.py
|
import csv
import requests
import zipfile
import os
import pika
from os.path import expanduser
BASEPATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
ALEXA_FILE = BASEPATH + "/alexa.zip"
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='mozcompat')
r = requests.get('http://s3.amazonaws.com/alexa-static/top-1m.csv.zip')
with open(ALEXA_FILE, 'wb') as f:
f.write(r.content)
with open(ALEXA_FILE, 'rb') as f:
z = zipfile.ZipFile(f)
for name in z.namelist():
outpath = BASEPATH
z.extract(name, outpath)
with open(BASEPATH + '/top-1m.csv', 'rb') as f:
for site in [line.split(',')[1] for line in f.read().split('\n')[:10]]:
site = "http://%s" % site
print site
channel.basic_publish(exchange='', routing_key='mozcompat', body=site)
|
Add Alexa top 1 Million fetcher to push into RabbitMQ
|
Add Alexa top 1 Million fetcher to push into RabbitMQ
|
Python
|
mpl-2.0
|
seiflotfy/compatipede,seiflotfy/compatipede,seiflotfy/compatipede,seiflotfy/compatipede
|
Add Alexa top 1 Million fetcher to push into RabbitMQ
|
import csv
import requests
import zipfile
import os
import pika
from os.path import expanduser
BASEPATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
ALEXA_FILE = BASEPATH + "/alexa.zip"
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='mozcompat')
r = requests.get('http://s3.amazonaws.com/alexa-static/top-1m.csv.zip')
with open(ALEXA_FILE, 'wb') as f:
f.write(r.content)
with open(ALEXA_FILE, 'rb') as f:
z = zipfile.ZipFile(f)
for name in z.namelist():
outpath = BASEPATH
z.extract(name, outpath)
with open(BASEPATH + '/top-1m.csv', 'rb') as f:
for site in [line.split(',')[1] for line in f.read().split('\n')[:10]]:
site = "http://%s" % site
print site
channel.basic_publish(exchange='', routing_key='mozcompat', body=site)
|
<commit_before><commit_msg>Add Alexa top 1 Million fetcher to push into RabbitMQ<commit_after>
|
import csv
import requests
import zipfile
import os
import pika
from os.path import expanduser
BASEPATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
ALEXA_FILE = BASEPATH + "/alexa.zip"
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='mozcompat')
r = requests.get('http://s3.amazonaws.com/alexa-static/top-1m.csv.zip')
with open(ALEXA_FILE, 'wb') as f:
f.write(r.content)
with open(ALEXA_FILE, 'rb') as f:
z = zipfile.ZipFile(f)
for name in z.namelist():
outpath = BASEPATH
z.extract(name, outpath)
with open(BASEPATH + '/top-1m.csv', 'rb') as f:
for site in [line.split(',')[1] for line in f.read().split('\n')[:10]]:
site = "http://%s" % site
print site
channel.basic_publish(exchange='', routing_key='mozcompat', body=site)
|
Add Alexa top 1 Million fetcher to push into RabbitMQimport csv
import requests
import zipfile
import os
import pika
from os.path import expanduser
BASEPATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
ALEXA_FILE = BASEPATH + "/alexa.zip"
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='mozcompat')
r = requests.get('http://s3.amazonaws.com/alexa-static/top-1m.csv.zip')
with open(ALEXA_FILE, 'wb') as f:
f.write(r.content)
with open(ALEXA_FILE, 'rb') as f:
z = zipfile.ZipFile(f)
for name in z.namelist():
outpath = BASEPATH
z.extract(name, outpath)
with open(BASEPATH + '/top-1m.csv', 'rb') as f:
for site in [line.split(',')[1] for line in f.read().split('\n')[:10]]:
site = "http://%s" % site
print site
channel.basic_publish(exchange='', routing_key='mozcompat', body=site)
|
<commit_before><commit_msg>Add Alexa top 1 Million fetcher to push into RabbitMQ<commit_after>import csv
import requests
import zipfile
import os
import pika
from os.path import expanduser
BASEPATH = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
ALEXA_FILE = BASEPATH + "/alexa.zip"
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='mozcompat')
r = requests.get('http://s3.amazonaws.com/alexa-static/top-1m.csv.zip')
with open(ALEXA_FILE, 'wb') as f:
f.write(r.content)
with open(ALEXA_FILE, 'rb') as f:
z = zipfile.ZipFile(f)
for name in z.namelist():
outpath = BASEPATH
z.extract(name, outpath)
with open(BASEPATH + '/top-1m.csv', 'rb') as f:
for site in [line.split(',')[1] for line in f.read().split('\n')[:10]]:
site = "http://%s" % site
print site
channel.basic_publish(exchange='', routing_key='mozcompat', body=site)
|
|
8510d648730059518ca6953aa716d59cc9f853ca
|
pybug/matlab/__init__.py
|
pybug/matlab/__init__.py
|
from numpy import gradient as np_gradient, reshape as np_reshape
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior and first differences at
the boundaries. The returned gradient hence has the same shape as the input array. This matches
Matlab's functionality, which is quoted as:
"The first output FX is always the gradient along the 2nd
dimension of F, going across columns. The second output FY is always
the gradient along the 1st dimension of F, going across rows. For the
third output FZ and the outputs that follow, the Nth output is the
gradient along the Nth dimension of F."
:param f:array_like
An N-dimensional array containing samples of a scalar function.
:param varargs: scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: dx, dy, dz, ... The default distance is 1.
:return: ndarray
N arrays of the same shape as f giving the derivative of f with respect to each
dimension. In order to match Matlab, the first output is along the second
dimension (dF/dx for images) and the second output is along the first dimension (dF/dy).
"""
gradients = np_gradient(f, *varargs)
if len(f.shape) > 1:
gradients[:2] = gradients[2::-1]
return gradients
def reshape(a, newshape):
"""
Gives a new shape to an array without changing its data. Assumes Fortran ordering to match
Matlab.
:param a: array_like
Array to be reshaped.
:param newshape: int or tuple of ints
The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:return: ndarray
This will be a new view object if possible; otherwise, it will be a copy.
"""
return np_reshape(a, newshape, order='F')
|
Add gradient and reshape functions to match Matlab
|
Add gradient and reshape functions to match Matlab
|
Python
|
bsd-3-clause
|
jabooth/menpo-archive,yuxiang-zhou/menpo,grigorisg9gr/menpo,jabooth/menpo-archive,mozata/menpo,patricksnape/menpo,grigorisg9gr/menpo,jabooth/menpo-archive,patricksnape/menpo,mozata/menpo,grigorisg9gr/menpo,menpo/menpo,menpo/menpo,mozata/menpo,yuxiang-zhou/menpo,menpo/menpo,jabooth/menpo-archive,patricksnape/menpo,yuxiang-zhou/menpo,mozata/menpo
|
Add gradient and reshape functions to match Matlab
|
from numpy import gradient as np_gradient, reshape as np_reshape
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior and first differences at
the boundaries. The returned gradient hence has the same shape as the input array. This matches
Matlab's functionality, which is quoted as:
"The first output FX is always the gradient along the 2nd
dimension of F, going across columns. The second output FY is always
the gradient along the 1st dimension of F, going across rows. For the
third output FZ and the outputs that follow, the Nth output is the
gradient along the Nth dimension of F."
:param f:array_like
An N-dimensional array containing samples of a scalar function.
:param varargs: scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: dx, dy, dz, ... The default distance is 1.
:return: ndarray
N arrays of the same shape as f giving the derivative of f with respect to each
dimension. In order to match Matlab, the first output is along the second
dimension (dF/dx for images) and the second output is along the first dimension (dF/dy).
"""
gradients = np_gradient(f, *varargs)
if len(f.shape) > 1:
gradients[:2] = gradients[2::-1]
return gradients
def reshape(a, newshape):
"""
Gives a new shape to an array without changing its data. Assumes Fortran ordering to match
Matlab.
:param a: array_like
Array to be reshaped.
:param newshape: int or tuple of ints
The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:return: ndarray
This will be a new view object if possible; otherwise, it will be a copy.
"""
return np_reshape(a, newshape, order='F')
|
<commit_before><commit_msg>Add gradient and reshape functions to match Matlab<commit_after>
|
from numpy import gradient as np_gradient, reshape as np_reshape
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior and first differences at
the boundaries. The returned gradient hence has the same shape as the input array. This matches
Matlab's functionality, which is quoted as:
"The first output FX is always the gradient along the 2nd
dimension of F, going across columns. The second output FY is always
the gradient along the 1st dimension of F, going across rows. For the
third output FZ and the outputs that follow, the Nth output is the
gradient along the Nth dimension of F."
:param f:array_like
An N-dimensional array containing samples of a scalar function.
:param varargs: scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: dx, dy, dz, ... The default distance is 1.
:return: ndarray
N arrays of the same shape as f giving the derivative of f with respect to each
dimension. In order to match Matlab, the first output is along the second
dimension (dF/dx for images) and the second output is along the first dimension (dF/dy).
"""
gradients = np_gradient(f, *varargs)
if len(f.shape) > 1:
gradients[:2] = gradients[2::-1]
return gradients
def reshape(a, newshape):
"""
Gives a new shape to an array without changing its data. Assumes Fortran ordering to match
Matlab.
:param a: array_like
Array to be reshaped.
:param newshape: int or tuple of ints
The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:return: ndarray
This will be a new view object if possible; otherwise, it will be a copy.
"""
return np_reshape(a, newshape, order='F')
|
Add gradient and reshape functions to match Matlabfrom numpy import gradient as np_gradient, reshape as np_reshape
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior and first differences at
the boundaries. The returned gradient hence has the same shape as the input array. This matches
Matlab's functionality, which is quoted as:
"The first output FX is always the gradient along the 2nd
dimension of F, going across columns. The second output FY is always
the gradient along the 1st dimension of F, going across rows. For the
third output FZ and the outputs that follow, the Nth output is the
gradient along the Nth dimension of F."
:param f:array_like
An N-dimensional array containing samples of a scalar function.
:param varargs: scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: dx, dy, dz, ... The default distance is 1.
:return: ndarray
N arrays of the same shape as f giving the derivative of f with respect to each
dimension. In order to match Matlab, the first output is along the second
dimension (dF/dx for images) and the second output is along the first dimension (dF/dy).
"""
gradients = np_gradient(f, *varargs)
if len(f.shape) > 1:
gradients[:2] = gradients[2::-1]
return gradients
def reshape(a, newshape):
"""
Gives a new shape to an array without changing its data. Assumes Fortran ordering to match
Matlab.
:param a: array_like
Array to be reshaped.
:param newshape: int or tuple of ints
The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:return: ndarray
This will be a new view object if possible; otherwise, it will be a copy.
"""
return np_reshape(a, newshape, order='F')
|
<commit_before><commit_msg>Add gradient and reshape functions to match Matlab<commit_after>from numpy import gradient as np_gradient, reshape as np_reshape
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior and first differences at
the boundaries. The returned gradient hence has the same shape as the input array. This matches
Matlab's functionality, which is quoted as:
"The first output FX is always the gradient along the 2nd
dimension of F, going across columns. The second output FY is always
the gradient along the 1st dimension of F, going across rows. For the
third output FZ and the outputs that follow, the Nth output is the
gradient along the Nth dimension of F."
:param f:array_like
An N-dimensional array containing samples of a scalar function.
:param varargs: scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: dx, dy, dz, ... The default distance is 1.
:return: ndarray
N arrays of the same shape as f giving the derivative of f with respect to each
dimension. In order to match Matlab, the first output is along the second
dimension (dF/dx for images) and the second output is along the first dimension (dF/dy).
"""
gradients = np_gradient(f, *varargs)
if len(f.shape) > 1:
gradients[:2] = gradients[2::-1]
return gradients
def reshape(a, newshape):
"""
Gives a new shape to an array without changing its data. Assumes Fortran ordering to match
Matlab.
:param a: array_like
Array to be reshaped.
:param newshape: int or tuple of ints
The new shape should be compatible with the original shape. If an integer,
then the result will be a 1-D array of that length. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:return: ndarray
This will be a new view object if possible; otherwise, it will be a copy.
"""
return np_reshape(a, newshape, order='F')
|
|
80966a18fbe92839370c90271d1118a0ac621501
|
examples/check_equal.py
|
examples/check_equal.py
|
#!/usr/bin/env python3
from segpy.reader import create_reader
from segpy.trace_header import TraceHeaderRev1
from segpy.types import Int16
from segpy.writer import write_segy
from segpy.header import are_equal, field
class CustomTraceHeader(TraceHeaderRev1):
unassigned_1 = field(
Int16, offset=233, default=0, documentation="Unassigned 1")
unassigned_2 = field(
Int16, offset=235, default=0, documentation="Unassigned 2")
unassigned_3 = field(
Int16, offset=237, default=0, documentation="Unassigned 3")
unassigned_4 = field(
Int16, offset=239, default=0, documentation="Unassigned 4")
in_filename = "data/rth.segy"
out_filename = "data/rth_out2.segy"
in_file = open(in_filename, 'rb')
with open(out_filename, 'wb') as out_file:
segy_reader_in = create_reader(in_file, trace_header_format=CustomTraceHeader)
write_segy(out_file, segy_reader_in, trace_header_format=CustomTraceHeader)
out_file = open(out_filename, 'rb')
segy_reader_out = create_reader(in_file, trace_header_format=CustomTraceHeader)
for trace_index in segy_reader_in.trace_indexes():
trace_offset = segy_reader_in._trace_offset_catalog[trace_index]
print(trace_index, hex(trace_offset))
head0 = segy_reader_in.trace_header(trace_index)
head1 = segy_reader_out.trace_header(trace_index)
assert are_equal(head0, head1), "Error {}".format(trace_index)
data0 = segy_reader_in.trace_samples(trace_index)
data1 = segy_reader_out.trace_samples(trace_index)
assert data0==data1
|
Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.
|
Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.
|
Python
|
agpl-3.0
|
hohogpb/segpy,Kramer477/segpy,kwinkunks/segpy,abingham/segpy,kjellkongsvik/segpy,asbjorn/segpy
|
Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.
|
#!/usr/bin/env python3
from segpy.reader import create_reader
from segpy.trace_header import TraceHeaderRev1
from segpy.types import Int16
from segpy.writer import write_segy
from segpy.header import are_equal, field
class CustomTraceHeader(TraceHeaderRev1):
unassigned_1 = field(
Int16, offset=233, default=0, documentation="Unassigned 1")
unassigned_2 = field(
Int16, offset=235, default=0, documentation="Unassigned 2")
unassigned_3 = field(
Int16, offset=237, default=0, documentation="Unassigned 3")
unassigned_4 = field(
Int16, offset=239, default=0, documentation="Unassigned 4")
in_filename = "data/rth.segy"
out_filename = "data/rth_out2.segy"
in_file = open(in_filename, 'rb')
with open(out_filename, 'wb') as out_file:
segy_reader_in = create_reader(in_file, trace_header_format=CustomTraceHeader)
write_segy(out_file, segy_reader_in, trace_header_format=CustomTraceHeader)
out_file = open(out_filename, 'rb')
segy_reader_out = create_reader(in_file, trace_header_format=CustomTraceHeader)
for trace_index in segy_reader_in.trace_indexes():
trace_offset = segy_reader_in._trace_offset_catalog[trace_index]
print(trace_index, hex(trace_offset))
head0 = segy_reader_in.trace_header(trace_index)
head1 = segy_reader_out.trace_header(trace_index)
assert are_equal(head0, head1), "Error {}".format(trace_index)
data0 = segy_reader_in.trace_samples(trace_index)
data1 = segy_reader_out.trace_samples(trace_index)
assert data0==data1
|
<commit_before><commit_msg>Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.<commit_after>
|
#!/usr/bin/env python3
from segpy.reader import create_reader
from segpy.trace_header import TraceHeaderRev1
from segpy.types import Int16
from segpy.writer import write_segy
from segpy.header import are_equal, field
class CustomTraceHeader(TraceHeaderRev1):
unassigned_1 = field(
Int16, offset=233, default=0, documentation="Unassigned 1")
unassigned_2 = field(
Int16, offset=235, default=0, documentation="Unassigned 2")
unassigned_3 = field(
Int16, offset=237, default=0, documentation="Unassigned 3")
unassigned_4 = field(
Int16, offset=239, default=0, documentation="Unassigned 4")
in_filename = "data/rth.segy"
out_filename = "data/rth_out2.segy"
in_file = open(in_filename, 'rb')
with open(out_filename, 'wb') as out_file:
segy_reader_in = create_reader(in_file, trace_header_format=CustomTraceHeader)
write_segy(out_file, segy_reader_in, trace_header_format=CustomTraceHeader)
out_file = open(out_filename, 'rb')
segy_reader_out = create_reader(in_file, trace_header_format=CustomTraceHeader)
for trace_index in segy_reader_in.trace_indexes():
trace_offset = segy_reader_in._trace_offset_catalog[trace_index]
print(trace_index, hex(trace_offset))
head0 = segy_reader_in.trace_header(trace_index)
head1 = segy_reader_out.trace_header(trace_index)
assert are_equal(head0, head1), "Error {}".format(trace_index)
data0 = segy_reader_in.trace_samples(trace_index)
data1 = segy_reader_out.trace_samples(trace_index)
assert data0==data1
|
Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.#!/usr/bin/env python3
from segpy.reader import create_reader
from segpy.trace_header import TraceHeaderRev1
from segpy.types import Int16
from segpy.writer import write_segy
from segpy.header import are_equal, field
class CustomTraceHeader(TraceHeaderRev1):
unassigned_1 = field(
Int16, offset=233, default=0, documentation="Unassigned 1")
unassigned_2 = field(
Int16, offset=235, default=0, documentation="Unassigned 2")
unassigned_3 = field(
Int16, offset=237, default=0, documentation="Unassigned 3")
unassigned_4 = field(
Int16, offset=239, default=0, documentation="Unassigned 4")
in_filename = "data/rth.segy"
out_filename = "data/rth_out2.segy"
in_file = open(in_filename, 'rb')
with open(out_filename, 'wb') as out_file:
segy_reader_in = create_reader(in_file, trace_header_format=CustomTraceHeader)
write_segy(out_file, segy_reader_in, trace_header_format=CustomTraceHeader)
out_file = open(out_filename, 'rb')
segy_reader_out = create_reader(in_file, trace_header_format=CustomTraceHeader)
for trace_index in segy_reader_in.trace_indexes():
trace_offset = segy_reader_in._trace_offset_catalog[trace_index]
print(trace_index, hex(trace_offset))
head0 = segy_reader_in.trace_header(trace_index)
head1 = segy_reader_out.trace_header(trace_index)
assert are_equal(head0, head1), "Error {}".format(trace_index)
data0 = segy_reader_in.trace_samples(trace_index)
data1 = segy_reader_out.trace_samples(trace_index)
assert data0==data1
|
<commit_before><commit_msg>Add a small program for checking whether SEG Y files roundtrip through the data structure flawlessly.<commit_after>#!/usr/bin/env python3
from segpy.reader import create_reader
from segpy.trace_header import TraceHeaderRev1
from segpy.types import Int16
from segpy.writer import write_segy
from segpy.header import are_equal, field
class CustomTraceHeader(TraceHeaderRev1):
unassigned_1 = field(
Int16, offset=233, default=0, documentation="Unassigned 1")
unassigned_2 = field(
Int16, offset=235, default=0, documentation="Unassigned 2")
unassigned_3 = field(
Int16, offset=237, default=0, documentation="Unassigned 3")
unassigned_4 = field(
Int16, offset=239, default=0, documentation="Unassigned 4")
in_filename = "data/rth.segy"
out_filename = "data/rth_out2.segy"
in_file = open(in_filename, 'rb')
with open(out_filename, 'wb') as out_file:
segy_reader_in = create_reader(in_file, trace_header_format=CustomTraceHeader)
write_segy(out_file, segy_reader_in, trace_header_format=CustomTraceHeader)
out_file = open(out_filename, 'rb')
segy_reader_out = create_reader(in_file, trace_header_format=CustomTraceHeader)
for trace_index in segy_reader_in.trace_indexes():
trace_offset = segy_reader_in._trace_offset_catalog[trace_index]
print(trace_index, hex(trace_offset))
head0 = segy_reader_in.trace_header(trace_index)
head1 = segy_reader_out.trace_header(trace_index)
assert are_equal(head0, head1), "Error {}".format(trace_index)
data0 = segy_reader_in.trace_samples(trace_index)
data1 = segy_reader_out.trace_samples(trace_index)
assert data0==data1
|
|
68c3ee25d505c680a08ef53de24c4b920099e081
|
scripts/clear_invalid_unclaimed_records.py
|
scripts/clear_invalid_unclaimed_records.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to remove unclaimed records for confirmed users. Once a user has
confirmed their email address, all their unclaimed records should be cleared
so that their full name shows up correctly on all projects.
To run: ::
$ python -m scripts.clear_invalid_unclaimed_records
"""
import sys
import logging
from modularodm import Q
from nose.tools import * # noqa (PEP8 asserts)
from website.app import init_app
from framework.auth.core import User
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUERY = Q('date_confirmed', 'ne', None) & Q('unclaimed_records', 'ne', {})
def do_migration(dry=False):
"""Clear unclaimed_records for confirmed users."""
n_migrated = 0
for user in get_targets():
n_migrated += 1
logger.info('Clearing unclaimed records for {0!r}'.format(user))
if not dry:
user.unclaimed_records = {}
user.save()
logger.info('Migrated {0} records.'.format(n_migrated))
return n_migrated
def get_targets():
"""Return a QuerySet containing confirmed Users who have unclaimed records."""
return User.find(QUERY)
def main():
init_app(routes=False)
if 'dry' in sys.argv:
do_migration(dry=True)
else:
do_migration(dry=False)
class TestMigrateNodeCategories(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.referrer = UserFactory()
self.project = ProjectFactory(creator=self.referrer)
def test_get_targets(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
targets = list(get_targets())
assert_in(user, targets)
def test_do_migration(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
assert_equal(len(user.unclaimed_records.keys()), 1)
do_migration()
assert_equal(len(user.unclaimed_records.keys()), 0)
if __name__ == '__main__':
main()
|
Add migration script to clear invalid unclaimed records
|
Add migration script to clear invalid unclaimed records
Confirmed users should not have unclaimed records
|
Python
|
apache-2.0
|
danielneis/osf.io,bdyetton/prettychart,petermalcolm/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,lamdnhan/osf.io,icereval/osf.io,icereval/osf.io,zkraime/osf.io,icereval/osf.io,cldershem/osf.io,chrisseto/osf.io,dplorimer/osf,doublebits/osf.io,MerlinZhang/osf.io,DanielSBrown/osf.io,kwierman/osf.io,hmoco/osf.io,lyndsysimon/osf.io,TomHeatwole/osf.io,emetsger/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,barbour-em/osf.io,brandonPurvis/osf.io,baylee-d/osf.io,GaryKriebel/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,himanshuo/osf.io,acshi/osf.io,Johnetordoff/osf.io,GageGaskins/osf.io,abought/osf.io,caseyrygt/osf.io,RomanZWang/osf.io,jnayak1/osf.io,brianjgeiger/osf.io,MerlinZhang/osf.io,haoyuchen1992/osf.io,KAsante95/osf.io,RomanZWang/osf.io,haoyuchen1992/osf.io,leb2dg/osf.io,njantrania/osf.io,wearpants/osf.io,alexschiller/osf.io,bdyetton/prettychart,kwierman/osf.io,zamattiac/osf.io,monikagrabowska/osf.io,caseyrygt/osf.io,felliott/osf.io,ZobairAlijan/osf.io,mluo613/osf.io,felliott/osf.io,jolene-esposito/osf.io,cslzchen/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,billyhunt/osf.io,billyhunt/osf.io,kch8qx/osf.io,caseyrollins/osf.io,RomanZWang/osf.io,asanfilippo7/osf.io,Nesiehr/osf.io,mfraezz/osf.io,barbour-em/osf.io,mfraezz/osf.io,asanfilippo7/osf.io,abought/osf.io,danielneis/osf.io,binoculars/osf.io,doublebits/osf.io,HarryRybacki/osf.io,jeffreyliu3230/osf.io,mfraezz/osf.io,alexschiller/osf.io,GageGaskins/osf.io,lyndsysimon/osf.io,alexschiller/osf.io,kushG/osf.io,saradbowman/osf.io,samanehsan/osf.io,reinaH/osf.io,erinspace/osf.io,Ghalko/osf.io,jmcarp/osf.io,danielneis/osf.io,ticklemepierce/osf.io,lamdnhan/osf.io,leb2dg/osf.io,pattisdr/osf.io,bdyetton/prettychart,bdyetton/prettychart,zachjanicki/osf.io,mluo613/osf.io,DanielSBrown/osf.io,mluo613/osf.io,amyshi188/osf.io,ckc6cz/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,doublebits/osf.io,cosenal/osf.io,zamattiac/osf.io,sloria/osf.io,himanshuo/osf.io,cslzchen/osf.io,chennan47/osf.io,caseyrygt/osf.io,SSJohns/osf.io,ZobairAlijan/osf.io,mluke93/osf.io,Johnetordoff/osf.io,jolene-esposito/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,hmoco/osf.io,jeffreyliu3230/osf.io,zamattiac/osf.io,arpitar/osf.io,caseyrollins/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,felliott/osf.io,Ghalko/osf.io,HalcyonChimera/osf.io,mluke93/osf.io,revanthkolli/osf.io,HalcyonChimera/osf.io,kushG/osf.io,KAsante95/osf.io,mluke93/osf.io,ZobairAlijan/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,GaryKriebel/osf.io,fabianvf/osf.io,adlius/osf.io,lyndsysimon/osf.io,erinspace/osf.io,ckc6cz/osf.io,ckc6cz/osf.io,zachjanicki/osf.io,barbour-em/osf.io,GaryKriebel/osf.io,sbt9uc/osf.io,billyhunt/osf.io,AndrewSallans/osf.io,MerlinZhang/osf.io,KAsante95/osf.io,billyhunt/osf.io,monikagrabowska/osf.io,fabianvf/osf.io,doublebits/osf.io,brianjgeiger/osf.io,jolene-esposito/osf.io,billyhunt/osf.io,sbt9uc/osf.io,asanfilippo7/osf.io,Nesiehr/osf.io,arpitar/osf.io,HarryRybacki/osf.io,jmcarp/osf.io,abought/osf.io,crcresearch/osf.io,rdhyee/osf.io,SSJohns/osf.io,emetsger/osf.io,alexschiller/osf.io,kwierman/osf.io,emetsger/osf.io,jinluyuan/osf.io,pattisdr/osf.io,revanthkolli/osf.io,petermalcolm/osf.io,fabianvf/osf.io,hmoco/osf.io,cldershem/osf.io,adlius/osf.io,brandonPurvis/osf.io,sbt9uc/osf.io,kushG/osf.io,njantrania/osf.io,GageGaskins/osf.io,MerlinZhang/osf.io,sbt9uc/osf.io,acshi/osf.io,lamdnhan/osf.io,reinaH/osf.io,brandonPurvis/osf.io,GageGaskins/osf.io,kushG/osf.io,samchrisinger/osf.io,erinspace/osf.io,SSJohns/osf.io,Johnetordoff/osf.io,jnayak1/osf.io,leb2dg/osf.io,rdhyee/osf.io,laurenrevere/osf.io,chennan47/osf.io,mfraezz/osf.io,acshi/osf.io,pattisdr/osf.io,HarryRybacki/osf.io,wearpants/osf.io,amyshi188/osf.io,TomBaxter/osf.io,aaxelb/osf.io,zkraime/osf.io,ticklemepierce/osf.io,brandonPurvis/osf.io,ZobairAlijan/osf.io,dplorimer/osf,brianjgeiger/osf.io,sloria/osf.io,reinaH/osf.io,cosenal/osf.io,brandonPurvis/osf.io,cosenal/osf.io,TomBaxter/osf.io,jnayak1/osf.io,cslzchen/osf.io,reinaH/osf.io,KAsante95/osf.io,Ghalko/osf.io,wearpants/osf.io,cldershem/osf.io,DanielSBrown/osf.io,jeffreyliu3230/osf.io,HalcyonChimera/osf.io,TomHeatwole/osf.io,kch8qx/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,RomanZWang/osf.io,mluke93/osf.io,saradbowman/osf.io,mattclark/osf.io,zkraime/osf.io,cosenal/osf.io,barbour-em/osf.io,cwisecarver/osf.io,emetsger/osf.io,alexschiller/osf.io,fabianvf/osf.io,njantrania/osf.io,himanshuo/osf.io,laurenrevere/osf.io,AndrewSallans/osf.io,mattclark/osf.io,DanielSBrown/osf.io,dplorimer/osf,mattclark/osf.io,acshi/osf.io,jinluyuan/osf.io,kch8qx/osf.io,jinluyuan/osf.io,haoyuchen1992/osf.io,doublebits/osf.io,Ghalko/osf.io,rdhyee/osf.io,GaryKriebel/osf.io,kch8qx/osf.io,chrisseto/osf.io,binoculars/osf.io,zachjanicki/osf.io,amyshi188/osf.io,samanehsan/osf.io,petermalcolm/osf.io,jolene-esposito/osf.io,GageGaskins/osf.io,amyshi188/osf.io,SSJohns/osf.io,jinluyuan/osf.io,jeffreyliu3230/osf.io,ckc6cz/osf.io,rdhyee/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,RomanZWang/osf.io,cwisecarver/osf.io,TomHeatwole/osf.io,kwierman/osf.io,Nesiehr/osf.io,sloria/osf.io,adlius/osf.io,cldershem/osf.io,petermalcolm/osf.io,acshi/osf.io,abought/osf.io,aaxelb/osf.io,samanehsan/osf.io,jmcarp/osf.io,baylee-d/osf.io,chennan47/osf.io,adlius/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,njantrania/osf.io,revanthkolli/osf.io,mluo613/osf.io,caseyrygt/osf.io,chrisseto/osf.io,caneruguz/osf.io,caneruguz/osf.io,ticklemepierce/osf.io,danielneis/osf.io,himanshuo/osf.io,caseyrollins/osf.io,revanthkolli/osf.io,zamattiac/osf.io,KAsante95/osf.io,crcresearch/osf.io,samchrisinger/osf.io,samanehsan/osf.io,felliott/osf.io,jmcarp/osf.io,zachjanicki/osf.io,wearpants/osf.io,arpitar/osf.io,binoculars/osf.io,lyndsysimon/osf.io,arpitar/osf.io,lamdnhan/osf.io,samchrisinger/osf.io,HarryRybacki/osf.io,TomHeatwole/osf.io,zkraime/osf.io,kch8qx/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,hmoco/osf.io,brianjgeiger/osf.io,ticklemepierce/osf.io,mluo613/osf.io,cslzchen/osf.io,dplorimer/osf,cwisecarver/osf.io
|
Add migration script to clear invalid unclaimed records
Confirmed users should not have unclaimed records
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to remove unclaimed records for confirmed users. Once a user has
confirmed their email address, all their unclaimed records should be cleared
so that their full name shows up correctly on all projects.
To run: ::
$ python -m scripts.clear_invalid_unclaimed_records
"""
import sys
import logging
from modularodm import Q
from nose.tools import * # noqa (PEP8 asserts)
from website.app import init_app
from framework.auth.core import User
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUERY = Q('date_confirmed', 'ne', None) & Q('unclaimed_records', 'ne', {})
def do_migration(dry=False):
"""Clear unclaimed_records for confirmed users."""
n_migrated = 0
for user in get_targets():
n_migrated += 1
logger.info('Clearing unclaimed records for {0!r}'.format(user))
if not dry:
user.unclaimed_records = {}
user.save()
logger.info('Migrated {0} records.'.format(n_migrated))
return n_migrated
def get_targets():
"""Return a QuerySet containing confirmed Users who have unclaimed records."""
return User.find(QUERY)
def main():
init_app(routes=False)
if 'dry' in sys.argv:
do_migration(dry=True)
else:
do_migration(dry=False)
class TestMigrateNodeCategories(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.referrer = UserFactory()
self.project = ProjectFactory(creator=self.referrer)
def test_get_targets(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
targets = list(get_targets())
assert_in(user, targets)
def test_do_migration(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
assert_equal(len(user.unclaimed_records.keys()), 1)
do_migration()
assert_equal(len(user.unclaimed_records.keys()), 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script to clear invalid unclaimed records
Confirmed users should not have unclaimed records<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to remove unclaimed records for confirmed users. Once a user has
confirmed their email address, all their unclaimed records should be cleared
so that their full name shows up correctly on all projects.
To run: ::
$ python -m scripts.clear_invalid_unclaimed_records
"""
import sys
import logging
from modularodm import Q
from nose.tools import * # noqa (PEP8 asserts)
from website.app import init_app
from framework.auth.core import User
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUERY = Q('date_confirmed', 'ne', None) & Q('unclaimed_records', 'ne', {})
def do_migration(dry=False):
"""Clear unclaimed_records for confirmed users."""
n_migrated = 0
for user in get_targets():
n_migrated += 1
logger.info('Clearing unclaimed records for {0!r}'.format(user))
if not dry:
user.unclaimed_records = {}
user.save()
logger.info('Migrated {0} records.'.format(n_migrated))
return n_migrated
def get_targets():
"""Return a QuerySet containing confirmed Users who have unclaimed records."""
return User.find(QUERY)
def main():
init_app(routes=False)
if 'dry' in sys.argv:
do_migration(dry=True)
else:
do_migration(dry=False)
class TestMigrateNodeCategories(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.referrer = UserFactory()
self.project = ProjectFactory(creator=self.referrer)
def test_get_targets(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
targets = list(get_targets())
assert_in(user, targets)
def test_do_migration(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
assert_equal(len(user.unclaimed_records.keys()), 1)
do_migration()
assert_equal(len(user.unclaimed_records.keys()), 0)
if __name__ == '__main__':
main()
|
Add migration script to clear invalid unclaimed records
Confirmed users should not have unclaimed records#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to remove unclaimed records for confirmed users. Once a user has
confirmed their email address, all their unclaimed records should be cleared
so that their full name shows up correctly on all projects.
To run: ::
$ python -m scripts.clear_invalid_unclaimed_records
"""
import sys
import logging
from modularodm import Q
from nose.tools import * # noqa (PEP8 asserts)
from website.app import init_app
from framework.auth.core import User
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUERY = Q('date_confirmed', 'ne', None) & Q('unclaimed_records', 'ne', {})
def do_migration(dry=False):
"""Clear unclaimed_records for confirmed users."""
n_migrated = 0
for user in get_targets():
n_migrated += 1
logger.info('Clearing unclaimed records for {0!r}'.format(user))
if not dry:
user.unclaimed_records = {}
user.save()
logger.info('Migrated {0} records.'.format(n_migrated))
return n_migrated
def get_targets():
"""Return a QuerySet containing confirmed Users who have unclaimed records."""
return User.find(QUERY)
def main():
init_app(routes=False)
if 'dry' in sys.argv:
do_migration(dry=True)
else:
do_migration(dry=False)
class TestMigrateNodeCategories(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.referrer = UserFactory()
self.project = ProjectFactory(creator=self.referrer)
def test_get_targets(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
targets = list(get_targets())
assert_in(user, targets)
def test_do_migration(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
assert_equal(len(user.unclaimed_records.keys()), 1)
do_migration()
assert_equal(len(user.unclaimed_records.keys()), 0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script to clear invalid unclaimed records
Confirmed users should not have unclaimed records<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to remove unclaimed records for confirmed users. Once a user has
confirmed their email address, all their unclaimed records should be cleared
so that their full name shows up correctly on all projects.
To run: ::
$ python -m scripts.clear_invalid_unclaimed_records
"""
import sys
import logging
from modularodm import Q
from nose.tools import * # noqa (PEP8 asserts)
from website.app import init_app
from framework.auth.core import User
from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
QUERY = Q('date_confirmed', 'ne', None) & Q('unclaimed_records', 'ne', {})
def do_migration(dry=False):
"""Clear unclaimed_records for confirmed users."""
n_migrated = 0
for user in get_targets():
n_migrated += 1
logger.info('Clearing unclaimed records for {0!r}'.format(user))
if not dry:
user.unclaimed_records = {}
user.save()
logger.info('Migrated {0} records.'.format(n_migrated))
return n_migrated
def get_targets():
"""Return a QuerySet containing confirmed Users who have unclaimed records."""
return User.find(QUERY)
def main():
init_app(routes=False)
if 'dry' in sys.argv:
do_migration(dry=True)
else:
do_migration(dry=False)
class TestMigrateNodeCategories(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.referrer = UserFactory()
self.project = ProjectFactory(creator=self.referrer)
def test_get_targets(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
targets = list(get_targets())
assert_in(user, targets)
def test_do_migration(self):
user = UserFactory.build()
user.add_unclaimed_record(self.project, self.referrer, 'foo')
user.save()
assert_true(user.is_confirmed())
assert_equal(len(user.unclaimed_records.keys()), 1)
do_migration()
assert_equal(len(user.unclaimed_records.keys()), 0)
if __name__ == '__main__':
main()
|
|
1073485bc5ceadf771b09501e3302b792de75691
|
portal/migrations/versions/9ebdfeb28bef_.py
|
portal/migrations/versions/9ebdfeb28bef_.py
|
"""Correct rank of CRV baseline
Revision ID: 9ebdfeb28bef
Revises: a679f493bcdd
Create Date: 2017-10-09 10:16:40.584279
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from portal.models.questionnaire_bank import QuestionnaireBank
# revision identifiers, used by Alembic.
revision = '9ebdfeb28bef'
down_revision = 'a679f493bcdd'
def upgrade():
# site_persistence isn't handling small changes - wants to destroy and
# recreate, which breaks foreign key constraints to existing results, etc.
# https://github.com/uwcirg/ePROMs-site-config/pull/71 shows the desired
# rank corrections.
# On CRV_baseline:
# rank questionnaire
# 0: epic26
# 1: eproms_add
# 2: comorb
desired_order = ['epic26', 'eproms_add', 'comorb']
Session = sessionmaker()
bind = op.get_bind()
session = Session(bind=bind)
qb = session.query(QuestionnaireBank).filter(
QuestionnaireBank.name == 'CRV_baseline').one()
found_order = []
for q in qb.questionnaires:
found_order.append(q.name)
if found_order == desired_order:
# in correct order, done
return
# correct rank collides with existing - move out of way (+100)
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank + 100
session.commit()
# now restore to desired rank
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Correct crv baseline questionnaire rank w/ migration (FKs prevent site_persistence fix from working)
|
Correct crv baseline questionnaire rank w/ migration
(FKs prevent site_persistence fix from working)
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Correct crv baseline questionnaire rank w/ migration
(FKs prevent site_persistence fix from working)
|
"""Correct rank of CRV baseline
Revision ID: 9ebdfeb28bef
Revises: a679f493bcdd
Create Date: 2017-10-09 10:16:40.584279
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from portal.models.questionnaire_bank import QuestionnaireBank
# revision identifiers, used by Alembic.
revision = '9ebdfeb28bef'
down_revision = 'a679f493bcdd'
def upgrade():
# site_persistence isn't handling small changes - wants to destroy and
# recreate, which breaks foreign key constraints to existing results, etc.
# https://github.com/uwcirg/ePROMs-site-config/pull/71 shows the desired
# rank corrections.
# On CRV_baseline:
# rank questionnaire
# 0: epic26
# 1: eproms_add
# 2: comorb
desired_order = ['epic26', 'eproms_add', 'comorb']
Session = sessionmaker()
bind = op.get_bind()
session = Session(bind=bind)
qb = session.query(QuestionnaireBank).filter(
QuestionnaireBank.name == 'CRV_baseline').one()
found_order = []
for q in qb.questionnaires:
found_order.append(q.name)
if found_order == desired_order:
# in correct order, done
return
# correct rank collides with existing - move out of way (+100)
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank + 100
session.commit()
# now restore to desired rank
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Correct crv baseline questionnaire rank w/ migration
(FKs prevent site_persistence fix from working)<commit_after>
|
"""Correct rank of CRV baseline
Revision ID: 9ebdfeb28bef
Revises: a679f493bcdd
Create Date: 2017-10-09 10:16:40.584279
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from portal.models.questionnaire_bank import QuestionnaireBank
# revision identifiers, used by Alembic.
revision = '9ebdfeb28bef'
down_revision = 'a679f493bcdd'
def upgrade():
# site_persistence isn't handling small changes - wants to destroy and
# recreate, which breaks foreign key constraints to existing results, etc.
# https://github.com/uwcirg/ePROMs-site-config/pull/71 shows the desired
# rank corrections.
# On CRV_baseline:
# rank questionnaire
# 0: epic26
# 1: eproms_add
# 2: comorb
desired_order = ['epic26', 'eproms_add', 'comorb']
Session = sessionmaker()
bind = op.get_bind()
session = Session(bind=bind)
qb = session.query(QuestionnaireBank).filter(
QuestionnaireBank.name == 'CRV_baseline').one()
found_order = []
for q in qb.questionnaires:
found_order.append(q.name)
if found_order == desired_order:
# in correct order, done
return
# correct rank collides with existing - move out of way (+100)
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank + 100
session.commit()
# now restore to desired rank
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
Correct crv baseline questionnaire rank w/ migration
(FKs prevent site_persistence fix from working)"""Correct rank of CRV baseline
Revision ID: 9ebdfeb28bef
Revises: a679f493bcdd
Create Date: 2017-10-09 10:16:40.584279
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from portal.models.questionnaire_bank import QuestionnaireBank
# revision identifiers, used by Alembic.
revision = '9ebdfeb28bef'
down_revision = 'a679f493bcdd'
def upgrade():
# site_persistence isn't handling small changes - wants to destroy and
# recreate, which breaks foreign key constraints to existing results, etc.
# https://github.com/uwcirg/ePROMs-site-config/pull/71 shows the desired
# rank corrections.
# On CRV_baseline:
# rank questionnaire
# 0: epic26
# 1: eproms_add
# 2: comorb
desired_order = ['epic26', 'eproms_add', 'comorb']
Session = sessionmaker()
bind = op.get_bind()
session = Session(bind=bind)
qb = session.query(QuestionnaireBank).filter(
QuestionnaireBank.name == 'CRV_baseline').one()
found_order = []
for q in qb.questionnaires:
found_order.append(q.name)
if found_order == desired_order:
# in correct order, done
return
# correct rank collides with existing - move out of way (+100)
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank + 100
session.commit()
# now restore to desired rank
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
<commit_before><commit_msg>Correct crv baseline questionnaire rank w/ migration
(FKs prevent site_persistence fix from working)<commit_after>"""Correct rank of CRV baseline
Revision ID: 9ebdfeb28bef
Revises: a679f493bcdd
Create Date: 2017-10-09 10:16:40.584279
"""
from alembic import op
from sqlalchemy.orm import sessionmaker
from portal.models.questionnaire_bank import QuestionnaireBank
# revision identifiers, used by Alembic.
revision = '9ebdfeb28bef'
down_revision = 'a679f493bcdd'
def upgrade():
# site_persistence isn't handling small changes - wants to destroy and
# recreate, which breaks foreign key constraints to existing results, etc.
# https://github.com/uwcirg/ePROMs-site-config/pull/71 shows the desired
# rank corrections.
# On CRV_baseline:
# rank questionnaire
# 0: epic26
# 1: eproms_add
# 2: comorb
desired_order = ['epic26', 'eproms_add', 'comorb']
Session = sessionmaker()
bind = op.get_bind()
session = Session(bind=bind)
qb = session.query(QuestionnaireBank).filter(
QuestionnaireBank.name == 'CRV_baseline').one()
found_order = []
for q in qb.questionnaires:
found_order.append(q.name)
if found_order == desired_order:
# in correct order, done
return
# correct rank collides with existing - move out of way (+100)
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank + 100
session.commit()
# now restore to desired rank
for rank, name in enumerate(desired_order):
match = [q for q in qb.questionnaires if q.name == name]
assert len(match) == 1
match[0].rank = rank
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
|
91abe26fd0b06339b3283d1dbe096edfdc52dadb
|
numpy-array-of-tuple.py
|
numpy-array-of-tuple.py
|
# Numpy converts a list of tuples *not* into an array of tuples, but into a 2D
# array instead.
list_of_tuples = [(1, 2), (3, 4)]
import numpy as np
print('list of tuples:', list_of_tuples, 'type:', type(list_of_tuples))
A = np.array(list_of_tuples)
print('numpy array of tuples:', A, 'type:', type(A))
# It makes computing unique rows trickier than it should:
unique_A, indices_to_A = np.unique(list_of_tuples, return_inverse=True)
print('naive numpy unique:', unique_A, 'and indices:', indices_to_A) # WRONG!
# Workaround to do np.unique by row (http://stackoverflow.com/a/8024764/3438463)
A_by_row = np.empty(len(list_of_tuples), object)
A_by_row[:] = list_of_tuples
unique_A, indices_to_A = np.unique(A_by_row, return_inverse=True)
print('unique tuples:', unique_A, 'and indices:', indices_to_A)
|
Add numpy array of tuples
|
Add numpy array of tuples
|
Python
|
mit
|
cmey/surprising-snippets,cmey/surprising-snippets
|
Add numpy array of tuples
|
# Numpy converts a list of tuples *not* into an array of tuples, but into a 2D
# array instead.
list_of_tuples = [(1, 2), (3, 4)]
import numpy as np
print('list of tuples:', list_of_tuples, 'type:', type(list_of_tuples))
A = np.array(list_of_tuples)
print('numpy array of tuples:', A, 'type:', type(A))
# It makes computing unique rows trickier than it should:
unique_A, indices_to_A = np.unique(list_of_tuples, return_inverse=True)
print('naive numpy unique:', unique_A, 'and indices:', indices_to_A) # WRONG!
# Workaround to do np.unique by row (http://stackoverflow.com/a/8024764/3438463)
A_by_row = np.empty(len(list_of_tuples), object)
A_by_row[:] = list_of_tuples
unique_A, indices_to_A = np.unique(A_by_row, return_inverse=True)
print('unique tuples:', unique_A, 'and indices:', indices_to_A)
|
<commit_before><commit_msg>Add numpy array of tuples<commit_after>
|
# Numpy converts a list of tuples *not* into an array of tuples, but into a 2D
# array instead.
list_of_tuples = [(1, 2), (3, 4)]
import numpy as np
print('list of tuples:', list_of_tuples, 'type:', type(list_of_tuples))
A = np.array(list_of_tuples)
print('numpy array of tuples:', A, 'type:', type(A))
# It makes computing unique rows trickier than it should:
unique_A, indices_to_A = np.unique(list_of_tuples, return_inverse=True)
print('naive numpy unique:', unique_A, 'and indices:', indices_to_A) # WRONG!
# Workaround to do np.unique by row (http://stackoverflow.com/a/8024764/3438463)
A_by_row = np.empty(len(list_of_tuples), object)
A_by_row[:] = list_of_tuples
unique_A, indices_to_A = np.unique(A_by_row, return_inverse=True)
print('unique tuples:', unique_A, 'and indices:', indices_to_A)
|
Add numpy array of tuples# Numpy converts a list of tuples *not* into an array of tuples, but into a 2D
# array instead.
list_of_tuples = [(1, 2), (3, 4)]
import numpy as np
print('list of tuples:', list_of_tuples, 'type:', type(list_of_tuples))
A = np.array(list_of_tuples)
print('numpy array of tuples:', A, 'type:', type(A))
# It makes computing unique rows trickier than it should:
unique_A, indices_to_A = np.unique(list_of_tuples, return_inverse=True)
print('naive numpy unique:', unique_A, 'and indices:', indices_to_A) # WRONG!
# Workaround to do np.unique by row (http://stackoverflow.com/a/8024764/3438463)
A_by_row = np.empty(len(list_of_tuples), object)
A_by_row[:] = list_of_tuples
unique_A, indices_to_A = np.unique(A_by_row, return_inverse=True)
print('unique tuples:', unique_A, 'and indices:', indices_to_A)
|
<commit_before><commit_msg>Add numpy array of tuples<commit_after># Numpy converts a list of tuples *not* into an array of tuples, but into a 2D
# array instead.
list_of_tuples = [(1, 2), (3, 4)]
import numpy as np
print('list of tuples:', list_of_tuples, 'type:', type(list_of_tuples))
A = np.array(list_of_tuples)
print('numpy array of tuples:', A, 'type:', type(A))
# It makes computing unique rows trickier than it should:
unique_A, indices_to_A = np.unique(list_of_tuples, return_inverse=True)
print('naive numpy unique:', unique_A, 'and indices:', indices_to_A) # WRONG!
# Workaround to do np.unique by row (http://stackoverflow.com/a/8024764/3438463)
A_by_row = np.empty(len(list_of_tuples), object)
A_by_row[:] = list_of_tuples
unique_A, indices_to_A = np.unique(A_by_row, return_inverse=True)
print('unique tuples:', unique_A, 'and indices:', indices_to_A)
|
|
45c4b092a7753c3a5dd7c36baf2bcfc17b32a871
|
blink_strip/python_example/flash_example.py
|
blink_strip/python_example/flash_example.py
|
import serial
import time
class blinkyBoard:
def init(self, port, baud):
self.serial = serial.Serial(port, baud)
self.serial.open()
def sendPixel(self,r,g,b):
data = bytearray()
data.append(0x80 | (r>>1))
data.append(0x80 | (g>>1))
data.append(0x80 | (b>>1))
self.serial.write(data)
self.serial.flush()
def sendBreak(self):
data = bytearray()
for i in range(0,8):
data.append(0x00)
self.serial.write(data)
self.serial.flush()
b = blinkyBoard()
b.init('/dev/cu.usbmodemfd121', 57600)
b.sendBreak()
while True:
b.sendPixel(0,0,255)
for i in range(0,30):
b.sendPixel(255, 0, 0)
b.sendPixel(0,0,255)
b.sendBreak()
time.sleep(.01)
b.sendPixel(255,0,0)
for i in range(0,30):
b.sendPixel(0, 0, 255)
b.sendPixel(255,0,0)
b.sendBreak()
time.sleep(.01)
|
Add python data transmission example.
|
Add python data transmission example.
|
Python
|
apache-2.0
|
Blinkinlabs/BlinkyTape,Blinkinlabs/BlinkyTape,Blinkinlabs/BlinkyTape
|
Add python data transmission example.
|
import serial
import time
class blinkyBoard:
def init(self, port, baud):
self.serial = serial.Serial(port, baud)
self.serial.open()
def sendPixel(self,r,g,b):
data = bytearray()
data.append(0x80 | (r>>1))
data.append(0x80 | (g>>1))
data.append(0x80 | (b>>1))
self.serial.write(data)
self.serial.flush()
def sendBreak(self):
data = bytearray()
for i in range(0,8):
data.append(0x00)
self.serial.write(data)
self.serial.flush()
b = blinkyBoard()
b.init('/dev/cu.usbmodemfd121', 57600)
b.sendBreak()
while True:
b.sendPixel(0,0,255)
for i in range(0,30):
b.sendPixel(255, 0, 0)
b.sendPixel(0,0,255)
b.sendBreak()
time.sleep(.01)
b.sendPixel(255,0,0)
for i in range(0,30):
b.sendPixel(0, 0, 255)
b.sendPixel(255,0,0)
b.sendBreak()
time.sleep(.01)
|
<commit_before><commit_msg>Add python data transmission example.<commit_after>
|
import serial
import time
class blinkyBoard:
def init(self, port, baud):
self.serial = serial.Serial(port, baud)
self.serial.open()
def sendPixel(self,r,g,b):
data = bytearray()
data.append(0x80 | (r>>1))
data.append(0x80 | (g>>1))
data.append(0x80 | (b>>1))
self.serial.write(data)
self.serial.flush()
def sendBreak(self):
data = bytearray()
for i in range(0,8):
data.append(0x00)
self.serial.write(data)
self.serial.flush()
b = blinkyBoard()
b.init('/dev/cu.usbmodemfd121', 57600)
b.sendBreak()
while True:
b.sendPixel(0,0,255)
for i in range(0,30):
b.sendPixel(255, 0, 0)
b.sendPixel(0,0,255)
b.sendBreak()
time.sleep(.01)
b.sendPixel(255,0,0)
for i in range(0,30):
b.sendPixel(0, 0, 255)
b.sendPixel(255,0,0)
b.sendBreak()
time.sleep(.01)
|
Add python data transmission example.import serial
import time
class blinkyBoard:
def init(self, port, baud):
self.serial = serial.Serial(port, baud)
self.serial.open()
def sendPixel(self,r,g,b):
data = bytearray()
data.append(0x80 | (r>>1))
data.append(0x80 | (g>>1))
data.append(0x80 | (b>>1))
self.serial.write(data)
self.serial.flush()
def sendBreak(self):
data = bytearray()
for i in range(0,8):
data.append(0x00)
self.serial.write(data)
self.serial.flush()
b = blinkyBoard()
b.init('/dev/cu.usbmodemfd121', 57600)
b.sendBreak()
while True:
b.sendPixel(0,0,255)
for i in range(0,30):
b.sendPixel(255, 0, 0)
b.sendPixel(0,0,255)
b.sendBreak()
time.sleep(.01)
b.sendPixel(255,0,0)
for i in range(0,30):
b.sendPixel(0, 0, 255)
b.sendPixel(255,0,0)
b.sendBreak()
time.sleep(.01)
|
<commit_before><commit_msg>Add python data transmission example.<commit_after>import serial
import time
class blinkyBoard:
def init(self, port, baud):
self.serial = serial.Serial(port, baud)
self.serial.open()
def sendPixel(self,r,g,b):
data = bytearray()
data.append(0x80 | (r>>1))
data.append(0x80 | (g>>1))
data.append(0x80 | (b>>1))
self.serial.write(data)
self.serial.flush()
def sendBreak(self):
data = bytearray()
for i in range(0,8):
data.append(0x00)
self.serial.write(data)
self.serial.flush()
b = blinkyBoard()
b.init('/dev/cu.usbmodemfd121', 57600)
b.sendBreak()
while True:
b.sendPixel(0,0,255)
for i in range(0,30):
b.sendPixel(255, 0, 0)
b.sendPixel(0,0,255)
b.sendBreak()
time.sleep(.01)
b.sendPixel(255,0,0)
for i in range(0,30):
b.sendPixel(0, 0, 255)
b.sendPixel(255,0,0)
b.sendBreak()
time.sleep(.01)
|
|
4bf03eaf81f8d4c28e3b3b89c7442a787361eb5e
|
scripts/structure_mlsp2013_dataset.py
|
scripts/structure_mlsp2013_dataset.py
|
import csv
def test():
with open("CVfolds_2.txt", newline='') as id2set, open("rec_id2filename.txt", newline='') as id2file, open("rec_labels_test_hidden.txt", newline='') as id2label:
with open("file2label.csv", 'w', newline='') as file2label:
readId2Label = csv.reader(id2label)
readId2Set = csv.reader(id2set)
readId2File = csv.reader(id2file)
file2labelwriter = csv.writer(file2label)
id2file = {}
for r in readId2File:
if r[0] == 'rec_id':
print("Reading id to file...")
else:
id2file[r[0]] = r[1]
print("Done reading id to file.")
nb_samples = 0
nb_bird_present = 0
print("Creating file to labels csv...")
for (id2label, id2set) in zip(readId2Label, readId2Set):
if(id2set[0] != id2label[0]):
raise ValueError
iden = id2set[0]
if(id2set[1] == '0'):
nb_samples += 1
if(len(id2label) > 1):
labels = id2label[1:]
nb_bird_present += 1
f = id2file[iden]
file2labelwriter.writerow([f] + labels)
else:
file2labelwriter.writerow([f])
print("Number of training samples: ", nb_samples)
print("Number of training samples with birds present: ", nb_bird_present)
|
Add a script which structures the mlsp2013 data
|
Add a script which structures the mlsp2013 data
- creates a csv file which maps a file name to a label set
|
Python
|
mit
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
Add a script which structures the mlsp2013 data
- creates a csv file which maps a file name to a label set
|
import csv
def test():
with open("CVfolds_2.txt", newline='') as id2set, open("rec_id2filename.txt", newline='') as id2file, open("rec_labels_test_hidden.txt", newline='') as id2label:
with open("file2label.csv", 'w', newline='') as file2label:
readId2Label = csv.reader(id2label)
readId2Set = csv.reader(id2set)
readId2File = csv.reader(id2file)
file2labelwriter = csv.writer(file2label)
id2file = {}
for r in readId2File:
if r[0] == 'rec_id':
print("Reading id to file...")
else:
id2file[r[0]] = r[1]
print("Done reading id to file.")
nb_samples = 0
nb_bird_present = 0
print("Creating file to labels csv...")
for (id2label, id2set) in zip(readId2Label, readId2Set):
if(id2set[0] != id2label[0]):
raise ValueError
iden = id2set[0]
if(id2set[1] == '0'):
nb_samples += 1
if(len(id2label) > 1):
labels = id2label[1:]
nb_bird_present += 1
f = id2file[iden]
file2labelwriter.writerow([f] + labels)
else:
file2labelwriter.writerow([f])
print("Number of training samples: ", nb_samples)
print("Number of training samples with birds present: ", nb_bird_present)
|
<commit_before><commit_msg>Add a script which structures the mlsp2013 data
- creates a csv file which maps a file name to a label set<commit_after>
|
import csv
def test():
with open("CVfolds_2.txt", newline='') as id2set, open("rec_id2filename.txt", newline='') as id2file, open("rec_labels_test_hidden.txt", newline='') as id2label:
with open("file2label.csv", 'w', newline='') as file2label:
readId2Label = csv.reader(id2label)
readId2Set = csv.reader(id2set)
readId2File = csv.reader(id2file)
file2labelwriter = csv.writer(file2label)
id2file = {}
for r in readId2File:
if r[0] == 'rec_id':
print("Reading id to file...")
else:
id2file[r[0]] = r[1]
print("Done reading id to file.")
nb_samples = 0
nb_bird_present = 0
print("Creating file to labels csv...")
for (id2label, id2set) in zip(readId2Label, readId2Set):
if(id2set[0] != id2label[0]):
raise ValueError
iden = id2set[0]
if(id2set[1] == '0'):
nb_samples += 1
if(len(id2label) > 1):
labels = id2label[1:]
nb_bird_present += 1
f = id2file[iden]
file2labelwriter.writerow([f] + labels)
else:
file2labelwriter.writerow([f])
print("Number of training samples: ", nb_samples)
print("Number of training samples with birds present: ", nb_bird_present)
|
Add a script which structures the mlsp2013 data
- creates a csv file which maps a file name to a label setimport csv
def test():
with open("CVfolds_2.txt", newline='') as id2set, open("rec_id2filename.txt", newline='') as id2file, open("rec_labels_test_hidden.txt", newline='') as id2label:
with open("file2label.csv", 'w', newline='') as file2label:
readId2Label = csv.reader(id2label)
readId2Set = csv.reader(id2set)
readId2File = csv.reader(id2file)
file2labelwriter = csv.writer(file2label)
id2file = {}
for r in readId2File:
if r[0] == 'rec_id':
print("Reading id to file...")
else:
id2file[r[0]] = r[1]
print("Done reading id to file.")
nb_samples = 0
nb_bird_present = 0
print("Creating file to labels csv...")
for (id2label, id2set) in zip(readId2Label, readId2Set):
if(id2set[0] != id2label[0]):
raise ValueError
iden = id2set[0]
if(id2set[1] == '0'):
nb_samples += 1
if(len(id2label) > 1):
labels = id2label[1:]
nb_bird_present += 1
f = id2file[iden]
file2labelwriter.writerow([f] + labels)
else:
file2labelwriter.writerow([f])
print("Number of training samples: ", nb_samples)
print("Number of training samples with birds present: ", nb_bird_present)
|
<commit_before><commit_msg>Add a script which structures the mlsp2013 data
- creates a csv file which maps a file name to a label set<commit_after>import csv
def test():
with open("CVfolds_2.txt", newline='') as id2set, open("rec_id2filename.txt", newline='') as id2file, open("rec_labels_test_hidden.txt", newline='') as id2label:
with open("file2label.csv", 'w', newline='') as file2label:
readId2Label = csv.reader(id2label)
readId2Set = csv.reader(id2set)
readId2File = csv.reader(id2file)
file2labelwriter = csv.writer(file2label)
id2file = {}
for r in readId2File:
if r[0] == 'rec_id':
print("Reading id to file...")
else:
id2file[r[0]] = r[1]
print("Done reading id to file.")
nb_samples = 0
nb_bird_present = 0
print("Creating file to labels csv...")
for (id2label, id2set) in zip(readId2Label, readId2Set):
if(id2set[0] != id2label[0]):
raise ValueError
iden = id2set[0]
if(id2set[1] == '0'):
nb_samples += 1
if(len(id2label) > 1):
labels = id2label[1:]
nb_bird_present += 1
f = id2file[iden]
file2labelwriter.writerow([f] + labels)
else:
file2labelwriter.writerow([f])
print("Number of training samples: ", nb_samples)
print("Number of training samples with birds present: ", nb_bird_present)
|
|
dc399c7834b918ea99baa92d1cdededa43623d87
|
ovp_projects/migrations/0033_auto_20170208_2118.py
|
ovp_projects/migrations/0033_auto_20170208_2118.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_projects', '0032_auto_20170206_1905'),
]
operations = [
migrations.AlterField(
model_name='apply',
name='email',
field=models.CharField(blank=True, max_length=190, null=True, verbose_name='email'),
),
]
|
Add missing migrations for last commit
|
Add missing migrations for last commit
|
Python
|
agpl-3.0
|
OpenVolunteeringPlatform/django-ovp-projects,OpenVolunteeringPlatform/django-ovp-projects
|
Add missing migrations for last commit
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_projects', '0032_auto_20170206_1905'),
]
operations = [
migrations.AlterField(
model_name='apply',
name='email',
field=models.CharField(blank=True, max_length=190, null=True, verbose_name='email'),
),
]
|
<commit_before><commit_msg>Add missing migrations for last commit<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_projects', '0032_auto_20170206_1905'),
]
operations = [
migrations.AlterField(
model_name='apply',
name='email',
field=models.CharField(blank=True, max_length=190, null=True, verbose_name='email'),
),
]
|
Add missing migrations for last commit# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_projects', '0032_auto_20170206_1905'),
]
operations = [
migrations.AlterField(
model_name='apply',
name='email',
field=models.CharField(blank=True, max_length=190, null=True, verbose_name='email'),
),
]
|
<commit_before><commit_msg>Add missing migrations for last commit<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-08 21:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ovp_projects', '0032_auto_20170206_1905'),
]
operations = [
migrations.AlterField(
model_name='apply',
name='email',
field=models.CharField(blank=True, max_length=190, null=True, verbose_name='email'),
),
]
|
|
f320ff11f74de4df9933fc25db6e9fabbcb89f81
|
api/bots/followup/test_followup.py
|
api/bots/followup/test_followup.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(our_dir)))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestFollowUpBot(BotTestCase):
bot_name = "followup"
def test_bot(self):
expected_send_reply = {
"": 'Please specify the message you want to send to followup stream after @mention-bot'
}
self.check_expected_responses(expected_send_reply, expected_method='send_reply')
expected_send_message = {
"foo": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: foo',
},
"I have completed my task": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: I have completed my task',
},
}
self.check_expected_responses(expected_send_message, expected_method='send_message')
|
Add tests for followup bot.
|
bots: Add tests for followup bot.
'followup' bot has different message handling behavior for
different messages.
For usual messages it calls 'send_message' function of
'BotHandlerApi' class.
For empty messages it calls 'send_reply' function of
'BotHandlerApi' class.
|
Python
|
apache-2.0
|
verma-varsha/zulip,eeshangarg/zulip,vabs22/zulip,punchagan/zulip,tommyip/zulip,vaidap/zulip,synicalsyntax/zulip,vabs22/zulip,punchagan/zulip,kou/zulip,vabs22/zulip,timabbott/zulip,vaidap/zulip,amanharitsh123/zulip,andersk/zulip,shubhamdhama/zulip,jrowan/zulip,rht/zulip,Galexrt/zulip,rht/zulip,rht/zulip,andersk/zulip,brockwhittaker/zulip,timabbott/zulip,showell/zulip,synicalsyntax/zulip,andersk/zulip,brainwane/zulip,mahim97/zulip,rht/zulip,mahim97/zulip,brainwane/zulip,eeshangarg/zulip,zulip/zulip,jackrzhang/zulip,tommyip/zulip,kou/zulip,zulip/zulip,zulip/zulip,synicalsyntax/zulip,Galexrt/zulip,mahim97/zulip,timabbott/zulip,eeshangarg/zulip,synicalsyntax/zulip,andersk/zulip,jackrzhang/zulip,dhcrzf/zulip,shubhamdhama/zulip,showell/zulip,verma-varsha/zulip,jrowan/zulip,timabbott/zulip,shubhamdhama/zulip,mahim97/zulip,hackerkid/zulip,kou/zulip,showell/zulip,zulip/zulip,amanharitsh123/zulip,brockwhittaker/zulip,brainwane/zulip,kou/zulip,hackerkid/zulip,timabbott/zulip,brockwhittaker/zulip,verma-varsha/zulip,shubhamdhama/zulip,tommyip/zulip,vaidap/zulip,jackrzhang/zulip,synicalsyntax/zulip,dhcrzf/zulip,amanharitsh123/zulip,tommyip/zulip,zulip/zulip,jrowan/zulip,andersk/zulip,brockwhittaker/zulip,verma-varsha/zulip,Galexrt/zulip,dhcrzf/zulip,zulip/zulip,dhcrzf/zulip,amanharitsh123/zulip,rishig/zulip,amanharitsh123/zulip,showell/zulip,hackerkid/zulip,Galexrt/zulip,shubhamdhama/zulip,Galexrt/zulip,kou/zulip,rishig/zulip,jrowan/zulip,eeshangarg/zulip,verma-varsha/zulip,punchagan/zulip,mahim97/zulip,eeshangarg/zulip,andersk/zulip,dhcrzf/zulip,punchagan/zulip,timabbott/zulip,rishig/zulip,rishig/zulip,brainwane/zulip,dhcrzf/zulip,andersk/zulip,dhcrzf/zulip,jackrzhang/zulip,shubhamdhama/zulip,showell/zulip,punchagan/zulip,synicalsyntax/zulip,tommyip/zulip,eeshangarg/zulip,brainwane/zulip,jrowan/zulip,mahim97/zulip,brockwhittaker/zulip,vabs22/zulip,vaidap/zulip,punchagan/zulip,brainwane/zulip,shubhamdhama/zulip,eeshangarg/zulip,jrowan/zulip,Galexrt/zulip,jackrzhang/zulip,showell/zulip,hackerkid/zulip,hackerkid/zulip,zulip/zulip,vaidap/zulip,rht/zulip,timabbott/zulip,tommyip/zulip,brainwane/zulip,Galexrt/zulip,rishig/zulip,kou/zulip,vaidap/zulip,jackrzhang/zulip,synicalsyntax/zulip,verma-varsha/zulip,jackrzhang/zulip,rht/zulip,hackerkid/zulip,rishig/zulip,rht/zulip,rishig/zulip,tommyip/zulip,punchagan/zulip,showell/zulip,kou/zulip,vabs22/zulip,vabs22/zulip,brockwhittaker/zulip,hackerkid/zulip,amanharitsh123/zulip
|
bots: Add tests for followup bot.
'followup' bot has different message handling behavior for
different messages.
For usual messages it calls 'send_message' function of
'BotHandlerApi' class.
For empty messages it calls 'send_reply' function of
'BotHandlerApi' class.
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(our_dir)))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestFollowUpBot(BotTestCase):
bot_name = "followup"
def test_bot(self):
expected_send_reply = {
"": 'Please specify the message you want to send to followup stream after @mention-bot'
}
self.check_expected_responses(expected_send_reply, expected_method='send_reply')
expected_send_message = {
"foo": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: foo',
},
"I have completed my task": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: I have completed my task',
},
}
self.check_expected_responses(expected_send_message, expected_method='send_message')
|
<commit_before><commit_msg>bots: Add tests for followup bot.
'followup' bot has different message handling behavior for
different messages.
For usual messages it calls 'send_message' function of
'BotHandlerApi' class.
For empty messages it calls 'send_reply' function of
'BotHandlerApi' class.<commit_after>
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(our_dir)))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestFollowUpBot(BotTestCase):
bot_name = "followup"
def test_bot(self):
expected_send_reply = {
"": 'Please specify the message you want to send to followup stream after @mention-bot'
}
self.check_expected_responses(expected_send_reply, expected_method='send_reply')
expected_send_message = {
"foo": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: foo',
},
"I have completed my task": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: I have completed my task',
},
}
self.check_expected_responses(expected_send_message, expected_method='send_message')
|
bots: Add tests for followup bot.
'followup' bot has different message handling behavior for
different messages.
For usual messages it calls 'send_message' function of
'BotHandlerApi' class.
For empty messages it calls 'send_reply' function of
'BotHandlerApi' class.#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(our_dir)))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestFollowUpBot(BotTestCase):
bot_name = "followup"
def test_bot(self):
expected_send_reply = {
"": 'Please specify the message you want to send to followup stream after @mention-bot'
}
self.check_expected_responses(expected_send_reply, expected_method='send_reply')
expected_send_message = {
"foo": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: foo',
},
"I have completed my task": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: I have completed my task',
},
}
self.check_expected_responses(expected_send_message, expected_method='send_message')
|
<commit_before><commit_msg>bots: Add tests for followup bot.
'followup' bot has different message handling behavior for
different messages.
For usual messages it calls 'send_message' function of
'BotHandlerApi' class.
For empty messages it calls 'send_reply' function of
'BotHandlerApi' class.<commit_after>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.normpath(os.path.join(our_dir)))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestFollowUpBot(BotTestCase):
bot_name = "followup"
def test_bot(self):
expected_send_reply = {
"": 'Please specify the message you want to send to followup stream after @mention-bot'
}
self.check_expected_responses(expected_send_reply, expected_method='send_reply')
expected_send_message = {
"foo": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: foo',
},
"I have completed my task": {
'type': 'stream',
'to': 'followup',
'subject': 'foo_sender@zulip.com',
'content': 'from foo_sender@zulip.com: I have completed my task',
},
}
self.check_expected_responses(expected_send_message, expected_method='send_message')
|
|
0f9697c94c75e86edacfaa7982a5128779ee82e7
|
ideascube/search/migrations/0002_reindex.py
|
ideascube/search/migrations/0002_reindex.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ideascube.search.utils import create_index_table
reindex = lambda *args: create_index_table(force=True)
class Migration(migrations.Migration):
dependencies = [('search', '0001_initial')]
operations = [
migrations.RunPython(reindex, reindex),
]
|
Add a migration script that reindex the contents
|
Add a migration script that reindex the contents
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add a migration script that reindex the contents
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ideascube.search.utils import create_index_table
reindex = lambda *args: create_index_table(force=True)
class Migration(migrations.Migration):
dependencies = [('search', '0001_initial')]
operations = [
migrations.RunPython(reindex, reindex),
]
|
<commit_before><commit_msg>Add a migration script that reindex the contents<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ideascube.search.utils import create_index_table
reindex = lambda *args: create_index_table(force=True)
class Migration(migrations.Migration):
dependencies = [('search', '0001_initial')]
operations = [
migrations.RunPython(reindex, reindex),
]
|
Add a migration script that reindex the contents# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ideascube.search.utils import create_index_table
reindex = lambda *args: create_index_table(force=True)
class Migration(migrations.Migration):
dependencies = [('search', '0001_initial')]
operations = [
migrations.RunPython(reindex, reindex),
]
|
<commit_before><commit_msg>Add a migration script that reindex the contents<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from ideascube.search.utils import create_index_table
reindex = lambda *args: create_index_table(force=True)
class Migration(migrations.Migration):
dependencies = [('search', '0001_initial')]
operations = [
migrations.RunPython(reindex, reindex),
]
|
|
93642ed71a2a0ec8fe96858b20c319c061706f39
|
cleanup_repo.py
|
cleanup_repo.py
|
#!/usr/bin/env python
import subprocess
import os.path
import sys
import re
from contextlib import contextmanager
TAGS_RE = re.compile('.+/tags/(.+)')
def git(*args):
p = subprocess.Popen(['git'] + list(args), stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode != 0:
raise Exception('git failed, err code {}'.format(p.returncode))
return output
@contextmanager
def chdir(path):
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def name_of(remote):
return remote.rsplit('/', 1)[1]
def get_branches_and_tags():
output = git('branch', '-r')
branches, tags = [], []
for line in output.splitlines():
line = line.strip()
m = TAGS_RE.match(line)
t = tags if m is not None else branches
t.append(line)
return branches, tags
@contextmanager
def checkout(branch, remote=None):
# if remote is not None -> create local branch from remote
if remote is not None:
git('checkout', '-b', branch, remote)
else:
git('checkout', branch)
yield
git('checkout', 'master')
def branchtag_to_tag(tag_name, remote_tag):
with checkout(tag_name, remote_tag):
pass
git('tag', tag_name, tag_name)
git('branch', '-D', tag_name)
def cleanup(repo):
with chdir(repo):
branches, tags = get_branches_and_tags()
for branch in branches:
# trunk is automatically remapped to master by git svn
if name_of(branch) == 'trunk':
continue
with checkout(name_of(branch), branch):
pass
for tag in tags:
branchtag_to_tag(name_of(tag), tag)
if __name__ == '__main__':
cleanup(sys.argv[1])
|
Add little script to do basic cleanup after git svn clone
|
Add little script to do basic cleanup after git svn clone
|
Python
|
mit
|
develersrl/git-externals,develersrl/git-externals,develersrl/git-externals
|
Add little script to do basic cleanup after git svn clone
|
#!/usr/bin/env python
import subprocess
import os.path
import sys
import re
from contextlib import contextmanager
TAGS_RE = re.compile('.+/tags/(.+)')
def git(*args):
p = subprocess.Popen(['git'] + list(args), stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode != 0:
raise Exception('git failed, err code {}'.format(p.returncode))
return output
@contextmanager
def chdir(path):
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def name_of(remote):
return remote.rsplit('/', 1)[1]
def get_branches_and_tags():
output = git('branch', '-r')
branches, tags = [], []
for line in output.splitlines():
line = line.strip()
m = TAGS_RE.match(line)
t = tags if m is not None else branches
t.append(line)
return branches, tags
@contextmanager
def checkout(branch, remote=None):
# if remote is not None -> create local branch from remote
if remote is not None:
git('checkout', '-b', branch, remote)
else:
git('checkout', branch)
yield
git('checkout', 'master')
def branchtag_to_tag(tag_name, remote_tag):
with checkout(tag_name, remote_tag):
pass
git('tag', tag_name, tag_name)
git('branch', '-D', tag_name)
def cleanup(repo):
with chdir(repo):
branches, tags = get_branches_and_tags()
for branch in branches:
# trunk is automatically remapped to master by git svn
if name_of(branch) == 'trunk':
continue
with checkout(name_of(branch), branch):
pass
for tag in tags:
branchtag_to_tag(name_of(tag), tag)
if __name__ == '__main__':
cleanup(sys.argv[1])
|
<commit_before><commit_msg>Add little script to do basic cleanup after git svn clone<commit_after>
|
#!/usr/bin/env python
import subprocess
import os.path
import sys
import re
from contextlib import contextmanager
TAGS_RE = re.compile('.+/tags/(.+)')
def git(*args):
p = subprocess.Popen(['git'] + list(args), stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode != 0:
raise Exception('git failed, err code {}'.format(p.returncode))
return output
@contextmanager
def chdir(path):
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def name_of(remote):
return remote.rsplit('/', 1)[1]
def get_branches_and_tags():
output = git('branch', '-r')
branches, tags = [], []
for line in output.splitlines():
line = line.strip()
m = TAGS_RE.match(line)
t = tags if m is not None else branches
t.append(line)
return branches, tags
@contextmanager
def checkout(branch, remote=None):
# if remote is not None -> create local branch from remote
if remote is not None:
git('checkout', '-b', branch, remote)
else:
git('checkout', branch)
yield
git('checkout', 'master')
def branchtag_to_tag(tag_name, remote_tag):
with checkout(tag_name, remote_tag):
pass
git('tag', tag_name, tag_name)
git('branch', '-D', tag_name)
def cleanup(repo):
with chdir(repo):
branches, tags = get_branches_and_tags()
for branch in branches:
# trunk is automatically remapped to master by git svn
if name_of(branch) == 'trunk':
continue
with checkout(name_of(branch), branch):
pass
for tag in tags:
branchtag_to_tag(name_of(tag), tag)
if __name__ == '__main__':
cleanup(sys.argv[1])
|
Add little script to do basic cleanup after git svn clone#!/usr/bin/env python
import subprocess
import os.path
import sys
import re
from contextlib import contextmanager
TAGS_RE = re.compile('.+/tags/(.+)')
def git(*args):
p = subprocess.Popen(['git'] + list(args), stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode != 0:
raise Exception('git failed, err code {}'.format(p.returncode))
return output
@contextmanager
def chdir(path):
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def name_of(remote):
return remote.rsplit('/', 1)[1]
def get_branches_and_tags():
output = git('branch', '-r')
branches, tags = [], []
for line in output.splitlines():
line = line.strip()
m = TAGS_RE.match(line)
t = tags if m is not None else branches
t.append(line)
return branches, tags
@contextmanager
def checkout(branch, remote=None):
# if remote is not None -> create local branch from remote
if remote is not None:
git('checkout', '-b', branch, remote)
else:
git('checkout', branch)
yield
git('checkout', 'master')
def branchtag_to_tag(tag_name, remote_tag):
with checkout(tag_name, remote_tag):
pass
git('tag', tag_name, tag_name)
git('branch', '-D', tag_name)
def cleanup(repo):
with chdir(repo):
branches, tags = get_branches_and_tags()
for branch in branches:
# trunk is automatically remapped to master by git svn
if name_of(branch) == 'trunk':
continue
with checkout(name_of(branch), branch):
pass
for tag in tags:
branchtag_to_tag(name_of(tag), tag)
if __name__ == '__main__':
cleanup(sys.argv[1])
|
<commit_before><commit_msg>Add little script to do basic cleanup after git svn clone<commit_after>#!/usr/bin/env python
import subprocess
import os.path
import sys
import re
from contextlib import contextmanager
TAGS_RE = re.compile('.+/tags/(.+)')
def git(*args):
p = subprocess.Popen(['git'] + list(args), stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode != 0:
raise Exception('git failed, err code {}'.format(p.returncode))
return output
@contextmanager
def chdir(path):
cwd = os.path.abspath(os.getcwd())
try:
os.chdir(path)
yield
finally:
os.chdir(cwd)
def name_of(remote):
return remote.rsplit('/', 1)[1]
def get_branches_and_tags():
output = git('branch', '-r')
branches, tags = [], []
for line in output.splitlines():
line = line.strip()
m = TAGS_RE.match(line)
t = tags if m is not None else branches
t.append(line)
return branches, tags
@contextmanager
def checkout(branch, remote=None):
# if remote is not None -> create local branch from remote
if remote is not None:
git('checkout', '-b', branch, remote)
else:
git('checkout', branch)
yield
git('checkout', 'master')
def branchtag_to_tag(tag_name, remote_tag):
with checkout(tag_name, remote_tag):
pass
git('tag', tag_name, tag_name)
git('branch', '-D', tag_name)
def cleanup(repo):
with chdir(repo):
branches, tags = get_branches_and_tags()
for branch in branches:
# trunk is automatically remapped to master by git svn
if name_of(branch) == 'trunk':
continue
with checkout(name_of(branch), branch):
pass
for tag in tags:
branchtag_to_tag(name_of(tag), tag)
if __name__ == '__main__':
cleanup(sys.argv[1])
|
|
3055bc1e261c4f3eef7eb24d704cb9c19a8d4723
|
sfepy/discrete/dg/dg_conditions.py
|
sfepy/discrete/dg/dg_conditions.py
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import Condition, PeriodicBC, EssentialBC
import six
class DGPeriodicBC(PeriodicBC):
...
class DGEssentialBC(EssentialBC):
...
|
Reformat source files, add some docstrings.
|
Reformat source files, add some docstrings.
|
Python
|
bsd-3-clause
|
sfepy/sfepy,sfepy/sfepy,vlukes/sfepy,BubuLK/sfepy,BubuLK/sfepy,vlukes/sfepy,sfepy/sfepy,BubuLK/sfepy,rc/sfepy,rc/sfepy,vlukes/sfepy,rc/sfepy
|
Reformat source files, add some docstrings.
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import Condition, PeriodicBC, EssentialBC
import six
class DGPeriodicBC(PeriodicBC):
...
class DGEssentialBC(EssentialBC):
...
|
<commit_before><commit_msg>Reformat source files, add some docstrings.<commit_after>
|
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import Condition, PeriodicBC, EssentialBC
import six
class DGPeriodicBC(PeriodicBC):
...
class DGEssentialBC(EssentialBC):
...
|
Reformat source files, add some docstrings.from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import Condition, PeriodicBC, EssentialBC
import six
class DGPeriodicBC(PeriodicBC):
...
class DGEssentialBC(EssentialBC):
...
|
<commit_before><commit_msg>Reformat source files, add some docstrings.<commit_after>from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import basestr, Container, Struct
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import Condition, PeriodicBC, EssentialBC
import six
class DGPeriodicBC(PeriodicBC):
...
class DGEssentialBC(EssentialBC):
...
|
|
10fc0e0b85edb04c676fd75e95ab539084063d13
|
python/lisa_workbook.py
|
python/lisa_workbook.py
|
chapters, problems_per_page = list(map(int, input().strip().split(' ')))
problems_per_chapter = list(map(int, input().strip().split(' ')))
pages = [[]]
special_problems = 0
for chapter, problems in enumerate(problems_per_chapter):
current_problem = 1
if len(pages[-1]) != 0:
pages.append([])
while current_problem <= problems:
if len(pages[-1]) == problems_per_page:
pages.append([])
if current_problem == len(pages):
special_problems += 1
pages[-1].append(current_problem)
current_problem += 1
print(special_problems)
|
Solve lisa workbook w/ too many nested loops
|
Solve lisa workbook w/ too many nested loops
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve lisa workbook w/ too many nested loops
|
chapters, problems_per_page = list(map(int, input().strip().split(' ')))
problems_per_chapter = list(map(int, input().strip().split(' ')))
pages = [[]]
special_problems = 0
for chapter, problems in enumerate(problems_per_chapter):
current_problem = 1
if len(pages[-1]) != 0:
pages.append([])
while current_problem <= problems:
if len(pages[-1]) == problems_per_page:
pages.append([])
if current_problem == len(pages):
special_problems += 1
pages[-1].append(current_problem)
current_problem += 1
print(special_problems)
|
<commit_before><commit_msg>Solve lisa workbook w/ too many nested loops<commit_after>
|
chapters, problems_per_page = list(map(int, input().strip().split(' ')))
problems_per_chapter = list(map(int, input().strip().split(' ')))
pages = [[]]
special_problems = 0
for chapter, problems in enumerate(problems_per_chapter):
current_problem = 1
if len(pages[-1]) != 0:
pages.append([])
while current_problem <= problems:
if len(pages[-1]) == problems_per_page:
pages.append([])
if current_problem == len(pages):
special_problems += 1
pages[-1].append(current_problem)
current_problem += 1
print(special_problems)
|
Solve lisa workbook w/ too many nested loopschapters, problems_per_page = list(map(int, input().strip().split(' ')))
problems_per_chapter = list(map(int, input().strip().split(' ')))
pages = [[]]
special_problems = 0
for chapter, problems in enumerate(problems_per_chapter):
current_problem = 1
if len(pages[-1]) != 0:
pages.append([])
while current_problem <= problems:
if len(pages[-1]) == problems_per_page:
pages.append([])
if current_problem == len(pages):
special_problems += 1
pages[-1].append(current_problem)
current_problem += 1
print(special_problems)
|
<commit_before><commit_msg>Solve lisa workbook w/ too many nested loops<commit_after>chapters, problems_per_page = list(map(int, input().strip().split(' ')))
problems_per_chapter = list(map(int, input().strip().split(' ')))
pages = [[]]
special_problems = 0
for chapter, problems in enumerate(problems_per_chapter):
current_problem = 1
if len(pages[-1]) != 0:
pages.append([])
while current_problem <= problems:
if len(pages[-1]) == problems_per_page:
pages.append([])
if current_problem == len(pages):
special_problems += 1
pages[-1].append(current_problem)
current_problem += 1
print(special_problems)
|
|
f88e5b7438617fdd10a25951fc442f59a8def1ca
|
zerver/migrations/0018_realm_emoji_message.py
|
zerver/migrations/0018_realm_emoji_message.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('zerver', '0017_userprofile_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realmemoji',
name='name',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$', message='Invalid characters in Emoji name')]),
),
]
|
Add missing no-op migration for realm_emoji.
|
Add missing no-op migration for realm_emoji.
Because of how Django's migration system works, changing the error
message attached to a model field's validator results in an extra
migration.
|
Python
|
apache-2.0
|
joyhchen/zulip,sharmaeklavya2/zulip,vaidap/zulip,timabbott/zulip,ahmadassaf/zulip,mohsenSy/zulip,arpith/zulip,AZtheAsian/zulip,zulip/zulip,niftynei/zulip,rht/zulip,dhcrzf/zulip,vabs22/zulip,vaidap/zulip,calvinleenyc/zulip,calvinleenyc/zulip,umkay/zulip,synicalsyntax/zulip,SmartPeople/zulip,KingxBanana/zulip,niftynei/zulip,amyliu345/zulip,peguin40/zulip,brainwane/zulip,amanharitsh123/zulip,sonali0901/zulip,showell/zulip,jackrzhang/zulip,tommyip/zulip,reyha/zulip,amanharitsh123/zulip,rht/zulip,jrowan/zulip,ahmadassaf/zulip,aakash-cr7/zulip,umkay/zulip,ryanbackman/zulip,zacps/zulip,souravbadami/zulip,calvinleenyc/zulip,kou/zulip,sup95/zulip,paxapy/zulip,verma-varsha/zulip,dawran6/zulip,paxapy/zulip,KingxBanana/zulip,aakash-cr7/zulip,vabs22/zulip,Diptanshu8/zulip,cosmicAsymmetry/zulip,rishig/zulip,dhcrzf/zulip,joyhchen/zulip,umkay/zulip,zulip/zulip,showell/zulip,sonali0901/zulip,zulip/zulip,timabbott/zulip,ahmadassaf/zulip,andersk/zulip,dhcrzf/zulip,susansls/zulip,cosmicAsymmetry/zulip,KingxBanana/zulip,hackerkid/zulip,shubhamdhama/zulip,sonali0901/zulip,vabs22/zulip,shubhamdhama/zulip,joyhchen/zulip,vaidap/zulip,samatdav/zulip,samatdav/zulip,souravbadami/zulip,dattatreya303/zulip,ahmadassaf/zulip,Galexrt/zulip,showell/zulip,kou/zulip,samatdav/zulip,souravbadami/zulip,kou/zulip,eeshangarg/zulip,shubhamdhama/zulip,aakash-cr7/zulip,Juanvulcano/zulip,umkay/zulip,kou/zulip,christi3k/zulip,eeshangarg/zulip,j831/zulip,dattatreya303/zulip,sharmaeklavya2/zulip,jphilipsen05/zulip,synicalsyntax/zulip,sonali0901/zulip,dhcrzf/zulip,kou/zulip,ahmadassaf/zulip,sup95/zulip,sup95/zulip,susansls/zulip,j831/zulip,samatdav/zulip,SmartPeople/zulip,sonali0901/zulip,Galexrt/zulip,sharmaeklavya2/zulip,shubhamdhama/zulip,Juanvulcano/zulip,mohsenSy/zulip,rishig/zulip,tommyip/zulip,reyha/zulip,AZtheAsian/zulip,Juanvulcano/zulip,Jianchun1/zulip,joyhchen/zulip,j831/zulip,rht/zulip,krtkmj/zulip,isht3/zulip,mohsenSy/zulip,showell/zulip,SmartPeople/zulip,eeshangarg/zulip,zacps/zulip,hackerkid/zulip,mahim97/zulip,aakash-cr7/zulip,niftynei/zulip,paxapy/zulip,brockwhittaker/zulip,amyliu345/zulip,mohsenSy/zulip,brainwane/zulip,PhilSk/zulip,SmartPeople/zulip,brainwane/zulip,sharmaeklavya2/zulip,amyliu345/zulip,dawran6/zulip,hackerkid/zulip,verma-varsha/zulip,rht/zulip,sharmaeklavya2/zulip,paxapy/zulip,punchagan/zulip,kou/zulip,paxapy/zulip,jainayush975/zulip,eeshangarg/zulip,calvinleenyc/zulip,sup95/zulip,brockwhittaker/zulip,Diptanshu8/zulip,vaidap/zulip,Jianchun1/zulip,umkay/zulip,krtkmj/zulip,Diptanshu8/zulip,rht/zulip,peguin40/zulip,dawran6/zulip,jrowan/zulip,zulip/zulip,blaze225/zulip,synicalsyntax/zulip,verma-varsha/zulip,kou/zulip,verma-varsha/zulip,brockwhittaker/zulip,dattatreya303/zulip,andersk/zulip,paxapy/zulip,hackerkid/zulip,blaze225/zulip,umkay/zulip,andersk/zulip,PhilSk/zulip,rishig/zulip,zulip/zulip,Galexrt/zulip,vikas-parashar/zulip,TigorC/zulip,punchagan/zulip,jackrzhang/zulip,mahim97/zulip,mahim97/zulip,brockwhittaker/zulip,hackerkid/zulip,aakash-cr7/zulip,reyha/zulip,peguin40/zulip,KingxBanana/zulip,grave-w-grave/zulip,reyha/zulip,showell/zulip,blaze225/zulip,TigorC/zulip,arpith/zulip,vaidap/zulip,jackrzhang/zulip,TigorC/zulip,samatdav/zulip,isht3/zulip,vikas-parashar/zulip,tommyip/zulip,Jianchun1/zulip,vikas-parashar/zulip,jackrzhang/zulip,rishig/zulip,punchagan/zulip,grave-w-grave/zulip,punchagan/zulip,tommyip/zulip,eeshangarg/zulip,grave-w-grave/zulip,hackerkid/zulip,timabbott/zulip,niftynei/zulip,Galexrt/zulip,souravbadami/zulip,dhcrzf/zulip,ryanbackman/zulip,christi3k/zulip,synicalsyntax/zulip,krtkmj/zulip,isht3/zulip,samatdav/zulip,dattatreya303/zulip,vikas-parashar/zulip,jphilipsen05/zulip,punchagan/zulip,timabbott/zulip,grave-w-grave/zulip,rishig/zulip,eeshangarg/zulip,synicalsyntax/zulip,TigorC/zulip,jphilipsen05/zulip,calvinleenyc/zulip,sup95/zulip,jrowan/zulip,dawran6/zulip,jackrzhang/zulip,dhcrzf/zulip,arpith/zulip,amyliu345/zulip,calvinleenyc/zulip,Jianchun1/zulip,mohsenSy/zulip,shubhamdhama/zulip,punchagan/zulip,jainayush975/zulip,dattatreya303/zulip,PhilSk/zulip,dhcrzf/zulip,Jianchun1/zulip,eeshangarg/zulip,zacps/zulip,susansls/zulip,isht3/zulip,ryanbackman/zulip,niftynei/zulip,reyha/zulip,shubhamdhama/zulip,grave-w-grave/zulip,susansls/zulip,joyhchen/zulip,JPJPJPOPOP/zulip,jrowan/zulip,christi3k/zulip,christi3k/zulip,jphilipsen05/zulip,vabs22/zulip,brainwane/zulip,andersk/zulip,synicalsyntax/zulip,sonali0901/zulip,tommyip/zulip,ryanbackman/zulip,vabs22/zulip,amanharitsh123/zulip,vaidap/zulip,krtkmj/zulip,jainayush975/zulip,cosmicAsymmetry/zulip,shubhamdhama/zulip,rishig/zulip,j831/zulip,zacps/zulip,SmartPeople/zulip,mahim97/zulip,jainayush975/zulip,tommyip/zulip,Galexrt/zulip,reyha/zulip,ryanbackman/zulip,peguin40/zulip,Jianchun1/zulip,zacps/zulip,mahim97/zulip,amanharitsh123/zulip,JPJPJPOPOP/zulip,niftynei/zulip,jphilipsen05/zulip,ryanbackman/zulip,jackrzhang/zulip,brockwhittaker/zulip,Galexrt/zulip,JPJPJPOPOP/zulip,krtkmj/zulip,cosmicAsymmetry/zulip,blaze225/zulip,jphilipsen05/zulip,jackrzhang/zulip,sup95/zulip,andersk/zulip,christi3k/zulip,dattatreya303/zulip,umkay/zulip,Diptanshu8/zulip,peguin40/zulip,brainwane/zulip,showell/zulip,peguin40/zulip,isht3/zulip,timabbott/zulip,krtkmj/zulip,jainayush975/zulip,amyliu345/zulip,showell/zulip,PhilSk/zulip,brainwane/zulip,Diptanshu8/zulip,ahmadassaf/zulip,zacps/zulip,verma-varsha/zulip,jainayush975/zulip,Juanvulcano/zulip,jrowan/zulip,souravbadami/zulip,hackerkid/zulip,cosmicAsymmetry/zulip,AZtheAsian/zulip,AZtheAsian/zulip,amanharitsh123/zulip,mohsenSy/zulip,synicalsyntax/zulip,krtkmj/zulip,brockwhittaker/zulip,amyliu345/zulip,zulip/zulip,ahmadassaf/zulip,susansls/zulip,j831/zulip,rht/zulip,AZtheAsian/zulip,JPJPJPOPOP/zulip,isht3/zulip,JPJPJPOPOP/zulip,jrowan/zulip,amanharitsh123/zulip,sharmaeklavya2/zulip,tommyip/zulip,souravbadami/zulip,TigorC/zulip,susansls/zulip,andersk/zulip,joyhchen/zulip,cosmicAsymmetry/zulip,punchagan/zulip,dawran6/zulip,grave-w-grave/zulip,PhilSk/zulip,arpith/zulip,christi3k/zulip,Juanvulcano/zulip,vikas-parashar/zulip,timabbott/zulip,brainwane/zulip,vabs22/zulip,KingxBanana/zulip,AZtheAsian/zulip,j831/zulip,arpith/zulip,Juanvulcano/zulip,andersk/zulip,rht/zulip,arpith/zulip,dawran6/zulip,blaze225/zulip,Diptanshu8/zulip,SmartPeople/zulip,aakash-cr7/zulip,Galexrt/zulip,PhilSk/zulip,vikas-parashar/zulip,verma-varsha/zulip,TigorC/zulip,rishig/zulip,mahim97/zulip,blaze225/zulip,JPJPJPOPOP/zulip,KingxBanana/zulip,zulip/zulip,timabbott/zulip
|
Add missing no-op migration for realm_emoji.
Because of how Django's migration system works, changing the error
message attached to a model field's validator results in an extra
migration.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('zerver', '0017_userprofile_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realmemoji',
name='name',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$', message='Invalid characters in Emoji name')]),
),
]
|
<commit_before><commit_msg>Add missing no-op migration for realm_emoji.
Because of how Django's migration system works, changing the error
message attached to a model field's validator results in an extra
migration.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('zerver', '0017_userprofile_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realmemoji',
name='name',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$', message='Invalid characters in Emoji name')]),
),
]
|
Add missing no-op migration for realm_emoji.
Because of how Django's migration system works, changing the error
message attached to a model field's validator results in an extra
migration.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('zerver', '0017_userprofile_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realmemoji',
name='name',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$', message='Invalid characters in Emoji name')]),
),
]
|
<commit_before><commit_msg>Add missing no-op migration for realm_emoji.
Because of how Django's migration system works, changing the error
message attached to a model field's validator results in an extra
migration.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('zerver', '0017_userprofile_bot_type'),
]
operations = [
migrations.AlterField(
model_name='realmemoji',
name='name',
field=models.TextField(validators=[django.core.validators.MinLengthValidator(1), django.core.validators.RegexValidator(regex=b'^[0-9a-zA-Z.\\-_]+(?<![.\\-_])$', message='Invalid characters in Emoji name')]),
),
]
|
|
5968c21c2c2b0e8305229fe5976d9d13786390e5
|
notifications/schedule_posted.py
|
notifications/schedule_posted.py
|
from consts.notification_type import NotificationType
from notifications.base_notification import BaseNotification
class SchedulePostedNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.SCHEDULE_POSTED]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key_name
data['message_data']['event_name'] = self.event.name
data['message_data']['first_match_time'] = self.event.matches[0].time
return data
|
Add notification for schedule being posted
|
Add notification for schedule being posted
|
Python
|
mit
|
fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,nwalters512/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,verycumbersome/the-blue-alliance,1fish2/the-blue-alliance,josephbisch/the-blue-alliance,tsteward/the-blue-alliance,tsteward/the-blue-alliance,bvisness/the-blue-alliance,1fish2/the-blue-alliance,nwalters512/the-blue-alliance,josephbisch/the-blue-alliance,1fish2/the-blue-alliance,jaredhasenklein/the-blue-alliance,bvisness/the-blue-alliance,fangeugene/the-blue-alliance,fangeugene/the-blue-alliance,bdaroz/the-blue-alliance,phil-lopreiato/the-blue-alliance,1fish2/the-blue-alliance,nwalters512/the-blue-alliance,josephbisch/the-blue-alliance,fangeugene/the-blue-alliance,synth3tk/the-blue-alliance,bdaroz/the-blue-alliance,bvisness/the-blue-alliance,jaredhasenklein/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,josephbisch/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,josephbisch/the-blue-alliance,bdaroz/the-blue-alliance,phil-lopreiato/the-blue-alliance,the-blue-alliance/the-blue-alliance,jaredhasenklein/the-blue-alliance,bdaroz/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,synth3tk/the-blue-alliance,bvisness/the-blue-alliance,1fish2/the-blue-alliance,synth3tk/the-blue-alliance,1fish2/the-blue-alliance,synth3tk/the-blue-alliance,tsteward/the-blue-alliance,verycumbersome/the-blue-alliance,bvisness/the-blue-alliance,josephbisch/the-blue-alliance,nwalters512/the-blue-alliance,fangeugene/the-blue-alliance,tsteward/the-blue-alliance,jaredhasenklein/the-blue-alliance,jaredhasenklein/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,the-blue-alliance/the-blue-alliance,bvisness/the-blue-alliance
|
Add notification for schedule being posted
|
from consts.notification_type import NotificationType
from notifications.base_notification import BaseNotification
class SchedulePostedNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.SCHEDULE_POSTED]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key_name
data['message_data']['event_name'] = self.event.name
data['message_data']['first_match_time'] = self.event.matches[0].time
return data
|
<commit_before><commit_msg>Add notification for schedule being posted<commit_after>
|
from consts.notification_type import NotificationType
from notifications.base_notification import BaseNotification
class SchedulePostedNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.SCHEDULE_POSTED]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key_name
data['message_data']['event_name'] = self.event.name
data['message_data']['first_match_time'] = self.event.matches[0].time
return data
|
Add notification for schedule being postedfrom consts.notification_type import NotificationType
from notifications.base_notification import BaseNotification
class SchedulePostedNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.SCHEDULE_POSTED]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key_name
data['message_data']['event_name'] = self.event.name
data['message_data']['first_match_time'] = self.event.matches[0].time
return data
|
<commit_before><commit_msg>Add notification for schedule being posted<commit_after>from consts.notification_type import NotificationType
from notifications.base_notification import BaseNotification
class SchedulePostedNotification(BaseNotification):
def __init__(self, event):
self.event = event
def _build_dict(self):
data = {}
data['message_type'] = NotificationType.type_names[NotificationType.SCHEDULE_POSTED]
data['message_data'] = {}
data['message_data']['event_key'] = self.event.key_name
data['message_data']['event_name'] = self.event.name
data['message_data']['first_match_time'] = self.event.matches[0].time
return data
|
|
4aa37bafa437614542c58f6d8ad40d9d8670df95
|
test/test_logger.py
|
test/test_logger.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablewriter import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger
|
Add test cases for the logger
|
Python
|
mit
|
thombashi/pytablewriter
|
Add test cases for the logger
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablewriter import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after>
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablewriter import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablewriter import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after># encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pytablewriter import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
|
71cd3709ba08bf87825816099e203eae57c05752
|
dictionaries/scripts/create_english_superset_addendum.py
|
dictionaries/scripts/create_english_superset_addendum.py
|
import optparse
import os
optparser = optparse.OptionParser()
optparser.add_option("-n", "--new_files", dest="new_files", default="", help="Comma separated list of new files")
optparser.add_option("-d", "--directory", dest="directory", default="dictionaries/", help="Directory with dictionaries")
optparser.add_option("-i", "--start_suffix", dest="start_suffix", type=int, help="Start suffix, i.e. the last suffix + 1")
(opts, _) = optparser.parse_args()
full_path = os.path.abspath(opts.directory)
new_filenames = opts.new_files.split(',')
existing_english_words = set()
for filename in os.listdir(full_path):
if filename.startswith('dict') and not filename in new_filenames:
for line in open(full_path+'/'+filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
existing_english_words.add(word)
new_english_words = set()
for filename in new_filenames:
for line in open(full_path + '/' + filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
if word not in existing_english_words:
new_english_words.add(word)
all_english_words_list = list(new_english_words)
words_per_batch = 10000
words_by_batch = [all_english_words_list[i:i+words_per_batch] for i in range(0, len(all_english_words_list), words_per_batch)]
start_suffix = opts.start_suffix or 0
for i, word_batch in enumerate(words_by_batch):
with open(full_path+'/english.superset'+"{0:0=2d}".format(start_suffix+i), 'w', encoding='utf-8') as text_file:
text_file.write("\n".join(word_batch))
|
Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them in
|
Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them in
|
Python
|
mit
|
brendandc/multilingual-google-image-scraper
|
Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them in
|
import optparse
import os
optparser = optparse.OptionParser()
optparser.add_option("-n", "--new_files", dest="new_files", default="", help="Comma separated list of new files")
optparser.add_option("-d", "--directory", dest="directory", default="dictionaries/", help="Directory with dictionaries")
optparser.add_option("-i", "--start_suffix", dest="start_suffix", type=int, help="Start suffix, i.e. the last suffix + 1")
(opts, _) = optparser.parse_args()
full_path = os.path.abspath(opts.directory)
new_filenames = opts.new_files.split(',')
existing_english_words = set()
for filename in os.listdir(full_path):
if filename.startswith('dict') and not filename in new_filenames:
for line in open(full_path+'/'+filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
existing_english_words.add(word)
new_english_words = set()
for filename in new_filenames:
for line in open(full_path + '/' + filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
if word not in existing_english_words:
new_english_words.add(word)
all_english_words_list = list(new_english_words)
words_per_batch = 10000
words_by_batch = [all_english_words_list[i:i+words_per_batch] for i in range(0, len(all_english_words_list), words_per_batch)]
start_suffix = opts.start_suffix or 0
for i, word_batch in enumerate(words_by_batch):
with open(full_path+'/english.superset'+"{0:0=2d}".format(start_suffix+i), 'w', encoding='utf-8') as text_file:
text_file.write("\n".join(word_batch))
|
<commit_before><commit_msg>Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them in<commit_after>
|
import optparse
import os
optparser = optparse.OptionParser()
optparser.add_option("-n", "--new_files", dest="new_files", default="", help="Comma separated list of new files")
optparser.add_option("-d", "--directory", dest="directory", default="dictionaries/", help="Directory with dictionaries")
optparser.add_option("-i", "--start_suffix", dest="start_suffix", type=int, help="Start suffix, i.e. the last suffix + 1")
(opts, _) = optparser.parse_args()
full_path = os.path.abspath(opts.directory)
new_filenames = opts.new_files.split(',')
existing_english_words = set()
for filename in os.listdir(full_path):
if filename.startswith('dict') and not filename in new_filenames:
for line in open(full_path+'/'+filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
existing_english_words.add(word)
new_english_words = set()
for filename in new_filenames:
for line in open(full_path + '/' + filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
if word not in existing_english_words:
new_english_words.add(word)
all_english_words_list = list(new_english_words)
words_per_batch = 10000
words_by_batch = [all_english_words_list[i:i+words_per_batch] for i in range(0, len(all_english_words_list), words_per_batch)]
start_suffix = opts.start_suffix or 0
for i, word_batch in enumerate(words_by_batch):
with open(full_path+'/english.superset'+"{0:0=2d}".format(start_suffix+i), 'w', encoding='utf-8') as text_file:
text_file.write("\n".join(word_batch))
|
Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them inimport optparse
import os
optparser = optparse.OptionParser()
optparser.add_option("-n", "--new_files", dest="new_files", default="", help="Comma separated list of new files")
optparser.add_option("-d", "--directory", dest="directory", default="dictionaries/", help="Directory with dictionaries")
optparser.add_option("-i", "--start_suffix", dest="start_suffix", type=int, help="Start suffix, i.e. the last suffix + 1")
(opts, _) = optparser.parse_args()
full_path = os.path.abspath(opts.directory)
new_filenames = opts.new_files.split(',')
existing_english_words = set()
for filename in os.listdir(full_path):
if filename.startswith('dict') and not filename in new_filenames:
for line in open(full_path+'/'+filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
existing_english_words.add(word)
new_english_words = set()
for filename in new_filenames:
for line in open(full_path + '/' + filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
if word not in existing_english_words:
new_english_words.add(word)
all_english_words_list = list(new_english_words)
words_per_batch = 10000
words_by_batch = [all_english_words_list[i:i+words_per_batch] for i in range(0, len(all_english_words_list), words_per_batch)]
start_suffix = opts.start_suffix or 0
for i, word_batch in enumerate(words_by_batch):
with open(full_path+'/english.superset'+"{0:0=2d}".format(start_suffix+i), 'w', encoding='utf-8') as text_file:
text_file.write("\n".join(word_batch))
|
<commit_before><commit_msg>Add script that creates an addendum to the english superset, to account for new dictionaries being added. Ensures we don't already have the english words in the existing set before adding them in<commit_after>import optparse
import os
optparser = optparse.OptionParser()
optparser.add_option("-n", "--new_files", dest="new_files", default="", help="Comma separated list of new files")
optparser.add_option("-d", "--directory", dest="directory", default="dictionaries/", help="Directory with dictionaries")
optparser.add_option("-i", "--start_suffix", dest="start_suffix", type=int, help="Start suffix, i.e. the last suffix + 1")
(opts, _) = optparser.parse_args()
full_path = os.path.abspath(opts.directory)
new_filenames = opts.new_files.split(',')
existing_english_words = set()
for filename in os.listdir(full_path):
if filename.startswith('dict') and not filename in new_filenames:
for line in open(full_path+'/'+filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
existing_english_words.add(word)
new_english_words = set()
for filename in new_filenames:
for line in open(full_path + '/' + filename, encoding='utf-8'):
translations = line.strip().split('\t')
foreign_word = translations[0]
# skip the first word because it is the foreign word
for word in translations[1:]:
if word not in existing_english_words:
new_english_words.add(word)
all_english_words_list = list(new_english_words)
words_per_batch = 10000
words_by_batch = [all_english_words_list[i:i+words_per_batch] for i in range(0, len(all_english_words_list), words_per_batch)]
start_suffix = opts.start_suffix or 0
for i, word_batch in enumerate(words_by_batch):
with open(full_path+'/english.superset'+"{0:0=2d}".format(start_suffix+i), 'w', encoding='utf-8') as text_file:
text_file.write("\n".join(word_batch))
|
|
a68699fbea88b541c6ce8bd519c3886b0fa8f197
|
scripts/etherbone_perf.py
|
scripts/etherbone_perf.py
|
import time
import cProfile
import argparse
from utils import memread, memwrite
def run(wb, rw, n, *, burst, profile=True):
datas = list(range(n))
ctx = locals()
ctx['wb'] = wb
ctx['memread'] = memread
ctx['memwrite'] = memwrite
fname = 'tmp/profiling/{}_0x{:x}_b{}.profile'.format(rw, n, burst)
command = {
'memread': 'memread(wb, n, burst=burst)',
'memwrite': 'memwrite(wb, datas, burst=burst)',
}[rw]
def runner():
if profile:
cProfile.runctx(command, {}, ctx, fname)
else:
if rw == 'memread':
x = len(memread(wb, n, burst=burst))
print(x)
else:
memwrite(wb, datas, burst=burst)
measure(runner, 4*n)
def measure(runner, nbytes):
start = time.time()
runner()
elapsed = time.time() - start
bytes_per_sec = nbytes / elapsed
print('Elapsed = {:.3f} sec'.format(elapsed))
def human(val):
if val > 2**20:
return (val / 2**20, 'M')
elif val > 2**10:
return (val / 2**10, 'K')
return (val, '')
print('Size = {:.3f} {}B'.format(*human(nbytes)))
print('Speed = {:.3f} {}Bps'.format(*human(bytes_per_sec)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Measure EtherBone bridge performance')
parser.add_argument('rw', choices=['memread', 'memwrite'], help='Transfer type')
parser.add_argument('n', help='Number of 32-bit words transfered')
parser.add_argument('--burst', help='Burst size')
parser.add_argument('--profile', action='store_true', help='Profile the code with cProfile')
args = parser.parse_args()
from litex import RemoteClient
wb = RemoteClient()
wb.open()
run(wb, args.rw, int(args.n, 0), burst=int(args.burst, 0), profile=args.profile)
wb.close()
|
Add a script for EtherBone transfers profiling
|
Add a script for EtherBone transfers profiling
|
Python
|
apache-2.0
|
antmicro/litex-rowhammer-tester,antmicro/litex-rowhammer-tester,antmicro/litex-rowhammer-tester
|
Add a script for EtherBone transfers profiling
|
import time
import cProfile
import argparse
from utils import memread, memwrite
def run(wb, rw, n, *, burst, profile=True):
datas = list(range(n))
ctx = locals()
ctx['wb'] = wb
ctx['memread'] = memread
ctx['memwrite'] = memwrite
fname = 'tmp/profiling/{}_0x{:x}_b{}.profile'.format(rw, n, burst)
command = {
'memread': 'memread(wb, n, burst=burst)',
'memwrite': 'memwrite(wb, datas, burst=burst)',
}[rw]
def runner():
if profile:
cProfile.runctx(command, {}, ctx, fname)
else:
if rw == 'memread':
x = len(memread(wb, n, burst=burst))
print(x)
else:
memwrite(wb, datas, burst=burst)
measure(runner, 4*n)
def measure(runner, nbytes):
start = time.time()
runner()
elapsed = time.time() - start
bytes_per_sec = nbytes / elapsed
print('Elapsed = {:.3f} sec'.format(elapsed))
def human(val):
if val > 2**20:
return (val / 2**20, 'M')
elif val > 2**10:
return (val / 2**10, 'K')
return (val, '')
print('Size = {:.3f} {}B'.format(*human(nbytes)))
print('Speed = {:.3f} {}Bps'.format(*human(bytes_per_sec)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Measure EtherBone bridge performance')
parser.add_argument('rw', choices=['memread', 'memwrite'], help='Transfer type')
parser.add_argument('n', help='Number of 32-bit words transfered')
parser.add_argument('--burst', help='Burst size')
parser.add_argument('--profile', action='store_true', help='Profile the code with cProfile')
args = parser.parse_args()
from litex import RemoteClient
wb = RemoteClient()
wb.open()
run(wb, args.rw, int(args.n, 0), burst=int(args.burst, 0), profile=args.profile)
wb.close()
|
<commit_before><commit_msg>Add a script for EtherBone transfers profiling<commit_after>
|
import time
import cProfile
import argparse
from utils import memread, memwrite
def run(wb, rw, n, *, burst, profile=True):
datas = list(range(n))
ctx = locals()
ctx['wb'] = wb
ctx['memread'] = memread
ctx['memwrite'] = memwrite
fname = 'tmp/profiling/{}_0x{:x}_b{}.profile'.format(rw, n, burst)
command = {
'memread': 'memread(wb, n, burst=burst)',
'memwrite': 'memwrite(wb, datas, burst=burst)',
}[rw]
def runner():
if profile:
cProfile.runctx(command, {}, ctx, fname)
else:
if rw == 'memread':
x = len(memread(wb, n, burst=burst))
print(x)
else:
memwrite(wb, datas, burst=burst)
measure(runner, 4*n)
def measure(runner, nbytes):
start = time.time()
runner()
elapsed = time.time() - start
bytes_per_sec = nbytes / elapsed
print('Elapsed = {:.3f} sec'.format(elapsed))
def human(val):
if val > 2**20:
return (val / 2**20, 'M')
elif val > 2**10:
return (val / 2**10, 'K')
return (val, '')
print('Size = {:.3f} {}B'.format(*human(nbytes)))
print('Speed = {:.3f} {}Bps'.format(*human(bytes_per_sec)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Measure EtherBone bridge performance')
parser.add_argument('rw', choices=['memread', 'memwrite'], help='Transfer type')
parser.add_argument('n', help='Number of 32-bit words transfered')
parser.add_argument('--burst', help='Burst size')
parser.add_argument('--profile', action='store_true', help='Profile the code with cProfile')
args = parser.parse_args()
from litex import RemoteClient
wb = RemoteClient()
wb.open()
run(wb, args.rw, int(args.n, 0), burst=int(args.burst, 0), profile=args.profile)
wb.close()
|
Add a script for EtherBone transfers profilingimport time
import cProfile
import argparse
from utils import memread, memwrite
def run(wb, rw, n, *, burst, profile=True):
datas = list(range(n))
ctx = locals()
ctx['wb'] = wb
ctx['memread'] = memread
ctx['memwrite'] = memwrite
fname = 'tmp/profiling/{}_0x{:x}_b{}.profile'.format(rw, n, burst)
command = {
'memread': 'memread(wb, n, burst=burst)',
'memwrite': 'memwrite(wb, datas, burst=burst)',
}[rw]
def runner():
if profile:
cProfile.runctx(command, {}, ctx, fname)
else:
if rw == 'memread':
x = len(memread(wb, n, burst=burst))
print(x)
else:
memwrite(wb, datas, burst=burst)
measure(runner, 4*n)
def measure(runner, nbytes):
start = time.time()
runner()
elapsed = time.time() - start
bytes_per_sec = nbytes / elapsed
print('Elapsed = {:.3f} sec'.format(elapsed))
def human(val):
if val > 2**20:
return (val / 2**20, 'M')
elif val > 2**10:
return (val / 2**10, 'K')
return (val, '')
print('Size = {:.3f} {}B'.format(*human(nbytes)))
print('Speed = {:.3f} {}Bps'.format(*human(bytes_per_sec)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Measure EtherBone bridge performance')
parser.add_argument('rw', choices=['memread', 'memwrite'], help='Transfer type')
parser.add_argument('n', help='Number of 32-bit words transfered')
parser.add_argument('--burst', help='Burst size')
parser.add_argument('--profile', action='store_true', help='Profile the code with cProfile')
args = parser.parse_args()
from litex import RemoteClient
wb = RemoteClient()
wb.open()
run(wb, args.rw, int(args.n, 0), burst=int(args.burst, 0), profile=args.profile)
wb.close()
|
<commit_before><commit_msg>Add a script for EtherBone transfers profiling<commit_after>import time
import cProfile
import argparse
from utils import memread, memwrite
def run(wb, rw, n, *, burst, profile=True):
datas = list(range(n))
ctx = locals()
ctx['wb'] = wb
ctx['memread'] = memread
ctx['memwrite'] = memwrite
fname = 'tmp/profiling/{}_0x{:x}_b{}.profile'.format(rw, n, burst)
command = {
'memread': 'memread(wb, n, burst=burst)',
'memwrite': 'memwrite(wb, datas, burst=burst)',
}[rw]
def runner():
if profile:
cProfile.runctx(command, {}, ctx, fname)
else:
if rw == 'memread':
x = len(memread(wb, n, burst=burst))
print(x)
else:
memwrite(wb, datas, burst=burst)
measure(runner, 4*n)
def measure(runner, nbytes):
start = time.time()
runner()
elapsed = time.time() - start
bytes_per_sec = nbytes / elapsed
print('Elapsed = {:.3f} sec'.format(elapsed))
def human(val):
if val > 2**20:
return (val / 2**20, 'M')
elif val > 2**10:
return (val / 2**10, 'K')
return (val, '')
print('Size = {:.3f} {}B'.format(*human(nbytes)))
print('Speed = {:.3f} {}Bps'.format(*human(bytes_per_sec)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Measure EtherBone bridge performance')
parser.add_argument('rw', choices=['memread', 'memwrite'], help='Transfer type')
parser.add_argument('n', help='Number of 32-bit words transfered')
parser.add_argument('--burst', help='Burst size')
parser.add_argument('--profile', action='store_true', help='Profile the code with cProfile')
args = parser.parse_args()
from litex import RemoteClient
wb = RemoteClient()
wb.open()
run(wb, args.rw, int(args.n, 0), burst=int(args.burst, 0), profile=args.profile)
wb.close()
|
|
0a75aa4bbb9396c448b4cef58a42068d30933a95
|
tests/formatter/test_rawer.py
|
tests/formatter/test_rawer.py
|
import unittest, argparse
from echolalia.formatter.rawer import Formatter
class RawerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{chr(i): i - 96} for i in xrange(97, 123)]
self.formatter = Formatter()
def test_add_args(self):
self.assertEqual(self.formatter.add_args(self.parser), self.parser)
def test_marshall(self):
args = self.parser.parse_args([])
result = self.formatter.marshall(args, self.data)
self.assertEqual(result, self.data)
|
Add tests for formatter raw
|
Add tests for formatter raw
|
Python
|
mit
|
eiri/echolalia-prototype
|
Add tests for formatter raw
|
import unittest, argparse
from echolalia.formatter.rawer import Formatter
class RawerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{chr(i): i - 96} for i in xrange(97, 123)]
self.formatter = Formatter()
def test_add_args(self):
self.assertEqual(self.formatter.add_args(self.parser), self.parser)
def test_marshall(self):
args = self.parser.parse_args([])
result = self.formatter.marshall(args, self.data)
self.assertEqual(result, self.data)
|
<commit_before><commit_msg>Add tests for formatter raw<commit_after>
|
import unittest, argparse
from echolalia.formatter.rawer import Formatter
class RawerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{chr(i): i - 96} for i in xrange(97, 123)]
self.formatter = Formatter()
def test_add_args(self):
self.assertEqual(self.formatter.add_args(self.parser), self.parser)
def test_marshall(self):
args = self.parser.parse_args([])
result = self.formatter.marshall(args, self.data)
self.assertEqual(result, self.data)
|
Add tests for formatter rawimport unittest, argparse
from echolalia.formatter.rawer import Formatter
class RawerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{chr(i): i - 96} for i in xrange(97, 123)]
self.formatter = Formatter()
def test_add_args(self):
self.assertEqual(self.formatter.add_args(self.parser), self.parser)
def test_marshall(self):
args = self.parser.parse_args([])
result = self.formatter.marshall(args, self.data)
self.assertEqual(result, self.data)
|
<commit_before><commit_msg>Add tests for formatter raw<commit_after>import unittest, argparse
from echolalia.formatter.rawer import Formatter
class RawerTestCase(unittest.TestCase):
def setUp(self):
self.parser = argparse.ArgumentParser()
self.data = [{chr(i): i - 96} for i in xrange(97, 123)]
self.formatter = Formatter()
def test_add_args(self):
self.assertEqual(self.formatter.add_args(self.parser), self.parser)
def test_marshall(self):
args = self.parser.parse_args([])
result = self.formatter.marshall(args, self.data)
self.assertEqual(result, self.data)
|
|
546bc7ffab0a0ca287550d97946d3e8b12b6c59a
|
tools/distribute.py
|
tools/distribute.py
|
#!/usr/bin/python2
# Copyright (c) 2015 Kenneth Henderick <kenneth@ketronic.be>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Functionality to distribute the scripts to the live location and restart the service
* For testing purposes only
* Do not use without understanding the consequences
* Running untested code might light your house on fire
* Use with caution
* Seriously, use with caution
"""
import os
from subprocess import check_output
if __name__ == '__main__':
this_directory = os.path.dirname(os.path.abspath(__file__))
print 'Copying files to local node...'
commands = []
for filename in ['clean.py', 'helper.py', 'manager.py', 'zfs.py']:
commands.append('cp {0}/../scripts/{1} /usr/lib/zfs-snap-manager/{1}'.format(this_directory, filename))
commands.append('rm -f /usr/lib/zfs-snap-manager/{0}c'.format(filename))
commands.append('systemctl restart zfs-snap-manager')
for command in commands:
check_output(command, shell=True)
|
Add script to copy the files to their correct location (e.g. during testing)
|
Add script to copy the files to their correct location (e.g. during testing)
|
Python
|
mit
|
khenderick/zfs-snap-manager,tylerjl/zfs-snap-manager
|
Add script to copy the files to their correct location (e.g. during testing)
|
#!/usr/bin/python2
# Copyright (c) 2015 Kenneth Henderick <kenneth@ketronic.be>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Functionality to distribute the scripts to the live location and restart the service
* For testing purposes only
* Do not use without understanding the consequences
* Running untested code might light your house on fire
* Use with caution
* Seriously, use with caution
"""
import os
from subprocess import check_output
if __name__ == '__main__':
this_directory = os.path.dirname(os.path.abspath(__file__))
print 'Copying files to local node...'
commands = []
for filename in ['clean.py', 'helper.py', 'manager.py', 'zfs.py']:
commands.append('cp {0}/../scripts/{1} /usr/lib/zfs-snap-manager/{1}'.format(this_directory, filename))
commands.append('rm -f /usr/lib/zfs-snap-manager/{0}c'.format(filename))
commands.append('systemctl restart zfs-snap-manager')
for command in commands:
check_output(command, shell=True)
|
<commit_before><commit_msg>Add script to copy the files to their correct location (e.g. during testing)<commit_after>
|
#!/usr/bin/python2
# Copyright (c) 2015 Kenneth Henderick <kenneth@ketronic.be>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Functionality to distribute the scripts to the live location and restart the service
* For testing purposes only
* Do not use without understanding the consequences
* Running untested code might light your house on fire
* Use with caution
* Seriously, use with caution
"""
import os
from subprocess import check_output
if __name__ == '__main__':
this_directory = os.path.dirname(os.path.abspath(__file__))
print 'Copying files to local node...'
commands = []
for filename in ['clean.py', 'helper.py', 'manager.py', 'zfs.py']:
commands.append('cp {0}/../scripts/{1} /usr/lib/zfs-snap-manager/{1}'.format(this_directory, filename))
commands.append('rm -f /usr/lib/zfs-snap-manager/{0}c'.format(filename))
commands.append('systemctl restart zfs-snap-manager')
for command in commands:
check_output(command, shell=True)
|
Add script to copy the files to their correct location (e.g. during testing)#!/usr/bin/python2
# Copyright (c) 2015 Kenneth Henderick <kenneth@ketronic.be>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Functionality to distribute the scripts to the live location and restart the service
* For testing purposes only
* Do not use without understanding the consequences
* Running untested code might light your house on fire
* Use with caution
* Seriously, use with caution
"""
import os
from subprocess import check_output
if __name__ == '__main__':
this_directory = os.path.dirname(os.path.abspath(__file__))
print 'Copying files to local node...'
commands = []
for filename in ['clean.py', 'helper.py', 'manager.py', 'zfs.py']:
commands.append('cp {0}/../scripts/{1} /usr/lib/zfs-snap-manager/{1}'.format(this_directory, filename))
commands.append('rm -f /usr/lib/zfs-snap-manager/{0}c'.format(filename))
commands.append('systemctl restart zfs-snap-manager')
for command in commands:
check_output(command, shell=True)
|
<commit_before><commit_msg>Add script to copy the files to their correct location (e.g. during testing)<commit_after>#!/usr/bin/python2
# Copyright (c) 2015 Kenneth Henderick <kenneth@ketronic.be>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Functionality to distribute the scripts to the live location and restart the service
* For testing purposes only
* Do not use without understanding the consequences
* Running untested code might light your house on fire
* Use with caution
* Seriously, use with caution
"""
import os
from subprocess import check_output
if __name__ == '__main__':
this_directory = os.path.dirname(os.path.abspath(__file__))
print 'Copying files to local node...'
commands = []
for filename in ['clean.py', 'helper.py', 'manager.py', 'zfs.py']:
commands.append('cp {0}/../scripts/{1} /usr/lib/zfs-snap-manager/{1}'.format(this_directory, filename))
commands.append('rm -f /usr/lib/zfs-snap-manager/{0}c'.format(filename))
commands.append('systemctl restart zfs-snap-manager')
for command in commands:
check_output(command, shell=True)
|
|
60125c0852780d88ec29757c8a11a5846a4e1bba
|
portal/migrations/versions/63262fe95b9c_.py
|
portal/migrations/versions/63262fe95b9c_.py
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 63262fe95b9c
Revises: 13a45e9375d7
Create Date: 2018-02-28 13:17:47.248361
"""
# revision identifiers, used by Alembic.
revision = '63262fe95b9c'
down_revision = '13a45e9375d7'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization_research_protocols',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('research_protocol_id', sa.Integer(), nullable=False),
sa.Column('retired_as_of', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['research_protocol_id'], ['research_protocols.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'research_protocol_id', name='_organization_research_protocol')
)
op.drop_constraint(u'organizations_rp_id_fkey', 'organizations', type_='foreignkey')
op.drop_column(u'organizations', 'research_protocol_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'organizations', sa.Column('research_protocol_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'organizations_rp_id_fkey', 'organizations', 'research_protocols', ['research_protocol_id'], ['id'])
op.drop_table('organization_research_protocols')
# ### end Alembic commands ###
|
Upgrade script to add many:many relationship between orgs and rps.
|
Upgrade script to add many:many relationship between orgs and rps.
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Upgrade script to add many:many relationship between orgs and rps.
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 63262fe95b9c
Revises: 13a45e9375d7
Create Date: 2018-02-28 13:17:47.248361
"""
# revision identifiers, used by Alembic.
revision = '63262fe95b9c'
down_revision = '13a45e9375d7'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization_research_protocols',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('research_protocol_id', sa.Integer(), nullable=False),
sa.Column('retired_as_of', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['research_protocol_id'], ['research_protocols.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'research_protocol_id', name='_organization_research_protocol')
)
op.drop_constraint(u'organizations_rp_id_fkey', 'organizations', type_='foreignkey')
op.drop_column(u'organizations', 'research_protocol_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'organizations', sa.Column('research_protocol_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'organizations_rp_id_fkey', 'organizations', 'research_protocols', ['research_protocol_id'], ['id'])
op.drop_table('organization_research_protocols')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Upgrade script to add many:many relationship between orgs and rps.<commit_after>
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 63262fe95b9c
Revises: 13a45e9375d7
Create Date: 2018-02-28 13:17:47.248361
"""
# revision identifiers, used by Alembic.
revision = '63262fe95b9c'
down_revision = '13a45e9375d7'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization_research_protocols',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('research_protocol_id', sa.Integer(), nullable=False),
sa.Column('retired_as_of', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['research_protocol_id'], ['research_protocols.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'research_protocol_id', name='_organization_research_protocol')
)
op.drop_constraint(u'organizations_rp_id_fkey', 'organizations', type_='foreignkey')
op.drop_column(u'organizations', 'research_protocol_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'organizations', sa.Column('research_protocol_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'organizations_rp_id_fkey', 'organizations', 'research_protocols', ['research_protocol_id'], ['id'])
op.drop_table('organization_research_protocols')
# ### end Alembic commands ###
|
Upgrade script to add many:many relationship between orgs and rps.from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 63262fe95b9c
Revises: 13a45e9375d7
Create Date: 2018-02-28 13:17:47.248361
"""
# revision identifiers, used by Alembic.
revision = '63262fe95b9c'
down_revision = '13a45e9375d7'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization_research_protocols',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('research_protocol_id', sa.Integer(), nullable=False),
sa.Column('retired_as_of', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['research_protocol_id'], ['research_protocols.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'research_protocol_id', name='_organization_research_protocol')
)
op.drop_constraint(u'organizations_rp_id_fkey', 'organizations', type_='foreignkey')
op.drop_column(u'organizations', 'research_protocol_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'organizations', sa.Column('research_protocol_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'organizations_rp_id_fkey', 'organizations', 'research_protocols', ['research_protocol_id'], ['id'])
op.drop_table('organization_research_protocols')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Upgrade script to add many:many relationship between orgs and rps.<commit_after>from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 63262fe95b9c
Revises: 13a45e9375d7
Create Date: 2018-02-28 13:17:47.248361
"""
# revision identifiers, used by Alembic.
revision = '63262fe95b9c'
down_revision = '13a45e9375d7'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('organization_research_protocols',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=False),
sa.Column('research_protocol_id', sa.Integer(), nullable=False),
sa.Column('retired_as_of', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organizations.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['research_protocol_id'], ['research_protocols.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('organization_id', 'research_protocol_id', name='_organization_research_protocol')
)
op.drop_constraint(u'organizations_rp_id_fkey', 'organizations', type_='foreignkey')
op.drop_column(u'organizations', 'research_protocol_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'organizations', sa.Column('research_protocol_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'organizations_rp_id_fkey', 'organizations', 'research_protocols', ['research_protocol_id'], ['id'])
op.drop_table('organization_research_protocols')
# ### end Alembic commands ###
|
|
d61862b138858721d6baf06ab2ad29ebb566f09b
|
tests/t_bad_acceptor_name.py
|
tests/t_bad_acceptor_name.py
|
#!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from stat import ST_MODE
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/bad_acceptor_name/' % os.environ['NSS_WRAPPER_HOSTNAME']
r = sess.get(url, auth=HTTPKerberosAuth(delegate=True))
if r.status_code != 200:
raise ValueError('Bad Acceptor Name failed')
|
Add test to check when an acceptor name is bad
|
Add test to check when an acceptor name is bad
Had this in my tree but forgot to add to the commit.
Related to #131
Signed-off-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
|
Python
|
mit
|
frenche/mod_auth_gssapi,frenche/mod_auth_gssapi,frenche/mod_auth_gssapi,frenche/mod_auth_gssapi
|
Add test to check when an acceptor name is bad
Had this in my tree but forgot to add to the commit.
Related to #131
Signed-off-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>
|
#!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from stat import ST_MODE
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/bad_acceptor_name/' % os.environ['NSS_WRAPPER_HOSTNAME']
r = sess.get(url, auth=HTTPKerberosAuth(delegate=True))
if r.status_code != 200:
raise ValueError('Bad Acceptor Name failed')
|
<commit_before><commit_msg>Add test to check when an acceptor name is bad
Had this in my tree but forgot to add to the commit.
Related to #131
Signed-off-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com><commit_after>
|
#!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from stat import ST_MODE
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/bad_acceptor_name/' % os.environ['NSS_WRAPPER_HOSTNAME']
r = sess.get(url, auth=HTTPKerberosAuth(delegate=True))
if r.status_code != 200:
raise ValueError('Bad Acceptor Name failed')
|
Add test to check when an acceptor name is bad
Had this in my tree but forgot to add to the commit.
Related to #131
Signed-off-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com>#!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from stat import ST_MODE
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/bad_acceptor_name/' % os.environ['NSS_WRAPPER_HOSTNAME']
r = sess.get(url, auth=HTTPKerberosAuth(delegate=True))
if r.status_code != 200:
raise ValueError('Bad Acceptor Name failed')
|
<commit_before><commit_msg>Add test to check when an acceptor name is bad
Had this in my tree but forgot to add to the commit.
Related to #131
Signed-off-by: Simo Sorce <65f99581a93cf30dafc32b5c178edc6b0294a07f@redhat.com><commit_after>#!/usr/bin/python
# Copyright (C) 2015 - mod_auth_gssapi contributors, see COPYING for license.
import os
import requests
from stat import ST_MODE
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
if __name__ == '__main__':
sess = requests.Session()
url = 'http://%s/bad_acceptor_name/' % os.environ['NSS_WRAPPER_HOSTNAME']
r = sess.get(url, auth=HTTPKerberosAuth(delegate=True))
if r.status_code != 200:
raise ValueError('Bad Acceptor Name failed')
|
|
1870cc1f25426bee9b1b4a66f2167c5cd474cd23
|
openstack/tests/functional/network/v2/test_dvr_router.py
|
openstack/tests/functional/network/v2/test_dvr_router.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import router
from openstack.tests.functional import base
class TestDVRRouter(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestDVRRouter, cls).setUpClass()
sot = cls.conn.network.create_router(name=cls.NAME, distributed=True)
assert isinstance(sot, router.Router)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_router(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_router(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
self.assertTrue(sot.is_distributed)
def test_list(self):
names = [o.name for o in self.conn.network.routers()]
self.assertIn(self.NAME, names)
dvr = [o.is_distributed for o in self.conn.network.routers()]
self.assertTrue(dvr)
def test_update(self):
sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME)
self.assertEqual(self.UPDATE_NAME, sot.name)
|
Add functional tests for DVR router.
|
Add functional tests for DVR router.
Change-Id: Iafa10dc02626a40a90c48f74c363d977ac4f512f
|
Python
|
apache-2.0
|
stackforge/python-openstacksdk,dtroyer/python-openstacksdk,dtroyer/python-openstacksdk,briancurtin/python-openstacksdk,briancurtin/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,openstack/python-openstacksdk
|
Add functional tests for DVR router.
Change-Id: Iafa10dc02626a40a90c48f74c363d977ac4f512f
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import router
from openstack.tests.functional import base
class TestDVRRouter(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestDVRRouter, cls).setUpClass()
sot = cls.conn.network.create_router(name=cls.NAME, distributed=True)
assert isinstance(sot, router.Router)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_router(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_router(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
self.assertTrue(sot.is_distributed)
def test_list(self):
names = [o.name for o in self.conn.network.routers()]
self.assertIn(self.NAME, names)
dvr = [o.is_distributed for o in self.conn.network.routers()]
self.assertTrue(dvr)
def test_update(self):
sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME)
self.assertEqual(self.UPDATE_NAME, sot.name)
|
<commit_before><commit_msg>Add functional tests for DVR router.
Change-Id: Iafa10dc02626a40a90c48f74c363d977ac4f512f<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import router
from openstack.tests.functional import base
class TestDVRRouter(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestDVRRouter, cls).setUpClass()
sot = cls.conn.network.create_router(name=cls.NAME, distributed=True)
assert isinstance(sot, router.Router)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_router(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_router(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
self.assertTrue(sot.is_distributed)
def test_list(self):
names = [o.name for o in self.conn.network.routers()]
self.assertIn(self.NAME, names)
dvr = [o.is_distributed for o in self.conn.network.routers()]
self.assertTrue(dvr)
def test_update(self):
sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME)
self.assertEqual(self.UPDATE_NAME, sot.name)
|
Add functional tests for DVR router.
Change-Id: Iafa10dc02626a40a90c48f74c363d977ac4f512f# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import router
from openstack.tests.functional import base
class TestDVRRouter(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestDVRRouter, cls).setUpClass()
sot = cls.conn.network.create_router(name=cls.NAME, distributed=True)
assert isinstance(sot, router.Router)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_router(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_router(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
self.assertTrue(sot.is_distributed)
def test_list(self):
names = [o.name for o in self.conn.network.routers()]
self.assertIn(self.NAME, names)
dvr = [o.is_distributed for o in self.conn.network.routers()]
self.assertTrue(dvr)
def test_update(self):
sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME)
self.assertEqual(self.UPDATE_NAME, sot.name)
|
<commit_before><commit_msg>Add functional tests for DVR router.
Change-Id: Iafa10dc02626a40a90c48f74c363d977ac4f512f<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import router
from openstack.tests.functional import base
class TestDVRRouter(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
UPDATE_NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestDVRRouter, cls).setUpClass()
sot = cls.conn.network.create_router(name=cls.NAME, distributed=True)
assert isinstance(sot, router.Router)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_router(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_router(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
self.assertTrue(sot.is_distributed)
def test_list(self):
names = [o.name for o in self.conn.network.routers()]
self.assertIn(self.NAME, names)
dvr = [o.is_distributed for o in self.conn.network.routers()]
self.assertTrue(dvr)
def test_update(self):
sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME)
self.assertEqual(self.UPDATE_NAME, sot.name)
|
|
f27382d63224f55998320e496a82aea32d82319b
|
04/test_sum_sector_ids.py
|
04/test_sum_sector_ids.py
|
import unittest
from sum_sector_ids import (extract_checksum,
create_checksum,
is_valid_checksum,
extract_sector_id,
sum_sector_ids)
class TestSumSectorIds(unittest.TestCase):
def setUp(self):
self.valid_room_a = 'aaaaa-bbb-z-y-x-123[abxyz]'
self.valid_room_b = 'a-b-c-d-e-f-g-h-987[abcde]'
self.invalid_room = 'not-a-real-room-404[oarel]'
def test_extract_checksum(self):
assert extract_checksum(self.valid_room_a) == 'abxyz'
assert extract_checksum(self.valid_room_b) == 'abcde'
assert extract_checksum(self.invalid_room) == 'oarel'
def test_create_checksum(self):
assert create_checksum(self.valid_room_a) == 'abxyz'
assert create_checksum(self.valid_room_b) == 'abcde'
assert create_checksum(self.invalid_room) == 'aelor'
def test_is_valid_checksum(self):
assert is_valid_checksum(self.valid_room_a) == True
assert is_valid_checksum(self.valid_room_b) == True
assert is_valid_checksum(self.invalid_room) == False
def test_extract_sector_id(self):
assert extract_sector_id(self.valid_room_a) == '123'
assert extract_sector_id(self.valid_room_b) == '987'
def test_sum_sector_ids(self):
assert sum_sector_ids([self.valid_room_a, self.valid_room_b,
self.invalid_room]) == 1110
|
Add tests for summing sector ids.
|
Add tests for summing sector ids.
|
Python
|
mit
|
machinelearningdeveloper/aoc_2016
|
Add tests for summing sector ids.
|
import unittest
from sum_sector_ids import (extract_checksum,
create_checksum,
is_valid_checksum,
extract_sector_id,
sum_sector_ids)
class TestSumSectorIds(unittest.TestCase):
def setUp(self):
self.valid_room_a = 'aaaaa-bbb-z-y-x-123[abxyz]'
self.valid_room_b = 'a-b-c-d-e-f-g-h-987[abcde]'
self.invalid_room = 'not-a-real-room-404[oarel]'
def test_extract_checksum(self):
assert extract_checksum(self.valid_room_a) == 'abxyz'
assert extract_checksum(self.valid_room_b) == 'abcde'
assert extract_checksum(self.invalid_room) == 'oarel'
def test_create_checksum(self):
assert create_checksum(self.valid_room_a) == 'abxyz'
assert create_checksum(self.valid_room_b) == 'abcde'
assert create_checksum(self.invalid_room) == 'aelor'
def test_is_valid_checksum(self):
assert is_valid_checksum(self.valid_room_a) == True
assert is_valid_checksum(self.valid_room_b) == True
assert is_valid_checksum(self.invalid_room) == False
def test_extract_sector_id(self):
assert extract_sector_id(self.valid_room_a) == '123'
assert extract_sector_id(self.valid_room_b) == '987'
def test_sum_sector_ids(self):
assert sum_sector_ids([self.valid_room_a, self.valid_room_b,
self.invalid_room]) == 1110
|
<commit_before><commit_msg>Add tests for summing sector ids.<commit_after>
|
import unittest
from sum_sector_ids import (extract_checksum,
create_checksum,
is_valid_checksum,
extract_sector_id,
sum_sector_ids)
class TestSumSectorIds(unittest.TestCase):
def setUp(self):
self.valid_room_a = 'aaaaa-bbb-z-y-x-123[abxyz]'
self.valid_room_b = 'a-b-c-d-e-f-g-h-987[abcde]'
self.invalid_room = 'not-a-real-room-404[oarel]'
def test_extract_checksum(self):
assert extract_checksum(self.valid_room_a) == 'abxyz'
assert extract_checksum(self.valid_room_b) == 'abcde'
assert extract_checksum(self.invalid_room) == 'oarel'
def test_create_checksum(self):
assert create_checksum(self.valid_room_a) == 'abxyz'
assert create_checksum(self.valid_room_b) == 'abcde'
assert create_checksum(self.invalid_room) == 'aelor'
def test_is_valid_checksum(self):
assert is_valid_checksum(self.valid_room_a) == True
assert is_valid_checksum(self.valid_room_b) == True
assert is_valid_checksum(self.invalid_room) == False
def test_extract_sector_id(self):
assert extract_sector_id(self.valid_room_a) == '123'
assert extract_sector_id(self.valid_room_b) == '987'
def test_sum_sector_ids(self):
assert sum_sector_ids([self.valid_room_a, self.valid_room_b,
self.invalid_room]) == 1110
|
Add tests for summing sector ids.import unittest
from sum_sector_ids import (extract_checksum,
create_checksum,
is_valid_checksum,
extract_sector_id,
sum_sector_ids)
class TestSumSectorIds(unittest.TestCase):
def setUp(self):
self.valid_room_a = 'aaaaa-bbb-z-y-x-123[abxyz]'
self.valid_room_b = 'a-b-c-d-e-f-g-h-987[abcde]'
self.invalid_room = 'not-a-real-room-404[oarel]'
def test_extract_checksum(self):
assert extract_checksum(self.valid_room_a) == 'abxyz'
assert extract_checksum(self.valid_room_b) == 'abcde'
assert extract_checksum(self.invalid_room) == 'oarel'
def test_create_checksum(self):
assert create_checksum(self.valid_room_a) == 'abxyz'
assert create_checksum(self.valid_room_b) == 'abcde'
assert create_checksum(self.invalid_room) == 'aelor'
def test_is_valid_checksum(self):
assert is_valid_checksum(self.valid_room_a) == True
assert is_valid_checksum(self.valid_room_b) == True
assert is_valid_checksum(self.invalid_room) == False
def test_extract_sector_id(self):
assert extract_sector_id(self.valid_room_a) == '123'
assert extract_sector_id(self.valid_room_b) == '987'
def test_sum_sector_ids(self):
assert sum_sector_ids([self.valid_room_a, self.valid_room_b,
self.invalid_room]) == 1110
|
<commit_before><commit_msg>Add tests for summing sector ids.<commit_after>import unittest
from sum_sector_ids import (extract_checksum,
create_checksum,
is_valid_checksum,
extract_sector_id,
sum_sector_ids)
class TestSumSectorIds(unittest.TestCase):
def setUp(self):
self.valid_room_a = 'aaaaa-bbb-z-y-x-123[abxyz]'
self.valid_room_b = 'a-b-c-d-e-f-g-h-987[abcde]'
self.invalid_room = 'not-a-real-room-404[oarel]'
def test_extract_checksum(self):
assert extract_checksum(self.valid_room_a) == 'abxyz'
assert extract_checksum(self.valid_room_b) == 'abcde'
assert extract_checksum(self.invalid_room) == 'oarel'
def test_create_checksum(self):
assert create_checksum(self.valid_room_a) == 'abxyz'
assert create_checksum(self.valid_room_b) == 'abcde'
assert create_checksum(self.invalid_room) == 'aelor'
def test_is_valid_checksum(self):
assert is_valid_checksum(self.valid_room_a) == True
assert is_valid_checksum(self.valid_room_b) == True
assert is_valid_checksum(self.invalid_room) == False
def test_extract_sector_id(self):
assert extract_sector_id(self.valid_room_a) == '123'
assert extract_sector_id(self.valid_room_b) == '987'
def test_sum_sector_ids(self):
assert sum_sector_ids([self.valid_room_a, self.valid_room_b,
self.invalid_room]) == 1110
|
|
238c550d8131f3b35dae437182c924191ff08b72
|
tools/find_scan_roots.py
|
tools/find_scan_roots.py
|
#!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes output of
# adb shell 'find /data -print0 | xargs -0 ls -ldZ' | awk '{print $5 " " $9}'
# in standard input and generates list of directories we need to scan to cover
# the labels given on the command line.
import sys
import argparse
class Node(object):
def __init__(self, name, label=None):
self.name = name
self.label = label
self.marked = False
self.children = {}
def Find(self, components):
if not components:
return self
child = components[0]
if child in self.children:
return self.children[child].Find(components[1:])
n = Node(child)
self.children[child] = n
return n
def __iter__(self):
for child in self.children.itervalues():
yield self.name + '/' + child.name, child
for p, ch in child:
yield self.name + '/' + p, ch
def Mark(self, labels):
# Either incorrect label or already marked, we do not need to scan
# this path.
if self.marked or self.label not in labels:
return False
self.marked = True
for child in self.children.itervalues():
child.Mark(labels)
return True
def BuildTree(stream=sys.stdin):
root = Node("")
for line in stream:
line = line.strip()
if not line:
continue
label, path = line.split(' ', 1)
# u:object_r:system_data_file:s0 -> system_data_file.
sanitized_label = label.split(':')[2]
# Strip leading slash.
components = path[1:].split('/')
n = root.Find(components)
n.label = sanitized_label
return root
def main():
parser = argparse.ArgumentParser()
parser.add_argument('labels', metavar='L', type=str, nargs='+',
help='labels we want to find')
args = parser.parse_args()
root = BuildTree()
for fullpath, elem in root:
if elem.Mark(args.labels):
print fullpath
if __name__ == '__main__':
main()
|
Add tool to find scan roots.
|
Add tool to find scan roots.
Bug: 73625480
Change-Id: I93b405feb999aaed9675d2ea52712663fa83c9e0
|
Python
|
apache-2.0
|
google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto
|
Add tool to find scan roots.
Bug: 73625480
Change-Id: I93b405feb999aaed9675d2ea52712663fa83c9e0
|
#!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes output of
# adb shell 'find /data -print0 | xargs -0 ls -ldZ' | awk '{print $5 " " $9}'
# in standard input and generates list of directories we need to scan to cover
# the labels given on the command line.
import sys
import argparse
class Node(object):
def __init__(self, name, label=None):
self.name = name
self.label = label
self.marked = False
self.children = {}
def Find(self, components):
if not components:
return self
child = components[0]
if child in self.children:
return self.children[child].Find(components[1:])
n = Node(child)
self.children[child] = n
return n
def __iter__(self):
for child in self.children.itervalues():
yield self.name + '/' + child.name, child
for p, ch in child:
yield self.name + '/' + p, ch
def Mark(self, labels):
# Either incorrect label or already marked, we do not need to scan
# this path.
if self.marked or self.label not in labels:
return False
self.marked = True
for child in self.children.itervalues():
child.Mark(labels)
return True
def BuildTree(stream=sys.stdin):
root = Node("")
for line in stream:
line = line.strip()
if not line:
continue
label, path = line.split(' ', 1)
# u:object_r:system_data_file:s0 -> system_data_file.
sanitized_label = label.split(':')[2]
# Strip leading slash.
components = path[1:].split('/')
n = root.Find(components)
n.label = sanitized_label
return root
def main():
parser = argparse.ArgumentParser()
parser.add_argument('labels', metavar='L', type=str, nargs='+',
help='labels we want to find')
args = parser.parse_args()
root = BuildTree()
for fullpath, elem in root:
if elem.Mark(args.labels):
print fullpath
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to find scan roots.
Bug: 73625480
Change-Id: I93b405feb999aaed9675d2ea52712663fa83c9e0<commit_after>
|
#!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes output of
# adb shell 'find /data -print0 | xargs -0 ls -ldZ' | awk '{print $5 " " $9}'
# in standard input and generates list of directories we need to scan to cover
# the labels given on the command line.
import sys
import argparse
class Node(object):
def __init__(self, name, label=None):
self.name = name
self.label = label
self.marked = False
self.children = {}
def Find(self, components):
if not components:
return self
child = components[0]
if child in self.children:
return self.children[child].Find(components[1:])
n = Node(child)
self.children[child] = n
return n
def __iter__(self):
for child in self.children.itervalues():
yield self.name + '/' + child.name, child
for p, ch in child:
yield self.name + '/' + p, ch
def Mark(self, labels):
# Either incorrect label or already marked, we do not need to scan
# this path.
if self.marked or self.label not in labels:
return False
self.marked = True
for child in self.children.itervalues():
child.Mark(labels)
return True
def BuildTree(stream=sys.stdin):
root = Node("")
for line in stream:
line = line.strip()
if not line:
continue
label, path = line.split(' ', 1)
# u:object_r:system_data_file:s0 -> system_data_file.
sanitized_label = label.split(':')[2]
# Strip leading slash.
components = path[1:].split('/')
n = root.Find(components)
n.label = sanitized_label
return root
def main():
parser = argparse.ArgumentParser()
parser.add_argument('labels', metavar='L', type=str, nargs='+',
help='labels we want to find')
args = parser.parse_args()
root = BuildTree()
for fullpath, elem in root:
if elem.Mark(args.labels):
print fullpath
if __name__ == '__main__':
main()
|
Add tool to find scan roots.
Bug: 73625480
Change-Id: I93b405feb999aaed9675d2ea52712663fa83c9e0#!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes output of
# adb shell 'find /data -print0 | xargs -0 ls -ldZ' | awk '{print $5 " " $9}'
# in standard input and generates list of directories we need to scan to cover
# the labels given on the command line.
import sys
import argparse
class Node(object):
def __init__(self, name, label=None):
self.name = name
self.label = label
self.marked = False
self.children = {}
def Find(self, components):
if not components:
return self
child = components[0]
if child in self.children:
return self.children[child].Find(components[1:])
n = Node(child)
self.children[child] = n
return n
def __iter__(self):
for child in self.children.itervalues():
yield self.name + '/' + child.name, child
for p, ch in child:
yield self.name + '/' + p, ch
def Mark(self, labels):
# Either incorrect label or already marked, we do not need to scan
# this path.
if self.marked or self.label not in labels:
return False
self.marked = True
for child in self.children.itervalues():
child.Mark(labels)
return True
def BuildTree(stream=sys.stdin):
root = Node("")
for line in stream:
line = line.strip()
if not line:
continue
label, path = line.split(' ', 1)
# u:object_r:system_data_file:s0 -> system_data_file.
sanitized_label = label.split(':')[2]
# Strip leading slash.
components = path[1:].split('/')
n = root.Find(components)
n.label = sanitized_label
return root
def main():
parser = argparse.ArgumentParser()
parser.add_argument('labels', metavar='L', type=str, nargs='+',
help='labels we want to find')
args = parser.parse_args()
root = BuildTree()
for fullpath, elem in root:
if elem.Mark(args.labels):
print fullpath
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to find scan roots.
Bug: 73625480
Change-Id: I93b405feb999aaed9675d2ea52712663fa83c9e0<commit_after>#!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Takes output of
# adb shell 'find /data -print0 | xargs -0 ls -ldZ' | awk '{print $5 " " $9}'
# in standard input and generates list of directories we need to scan to cover
# the labels given on the command line.
import sys
import argparse
class Node(object):
def __init__(self, name, label=None):
self.name = name
self.label = label
self.marked = False
self.children = {}
def Find(self, components):
if not components:
return self
child = components[0]
if child in self.children:
return self.children[child].Find(components[1:])
n = Node(child)
self.children[child] = n
return n
def __iter__(self):
for child in self.children.itervalues():
yield self.name + '/' + child.name, child
for p, ch in child:
yield self.name + '/' + p, ch
def Mark(self, labels):
# Either incorrect label or already marked, we do not need to scan
# this path.
if self.marked or self.label not in labels:
return False
self.marked = True
for child in self.children.itervalues():
child.Mark(labels)
return True
def BuildTree(stream=sys.stdin):
root = Node("")
for line in stream:
line = line.strip()
if not line:
continue
label, path = line.split(' ', 1)
# u:object_r:system_data_file:s0 -> system_data_file.
sanitized_label = label.split(':')[2]
# Strip leading slash.
components = path[1:].split('/')
n = root.Find(components)
n.label = sanitized_label
return root
def main():
parser = argparse.ArgumentParser()
parser.add_argument('labels', metavar='L', type=str, nargs='+',
help='labels we want to find')
args = parser.parse_args()
root = BuildTree()
for fullpath, elem in root:
if elem.Mark(args.labels):
print fullpath
if __name__ == '__main__':
main()
|
|
7eac12bb8fe31b397c8598af14973f2337ca8c53
|
cla_public/apps/contact/tests/test_notes.py
|
cla_public/apps/contact/tests/test_notes.py
|
import unittest
from werkzeug.datastructures import MultiDict
from cla_public.app import create_app
from cla_public.apps.contact.forms import ContactForm
def submit(**kwargs):
return ContactForm(MultiDict(kwargs), csrf_enabled=False)
class NotesTest(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
app.test_request_context().push()
def validate_notes(self, notes):
form = submit(
extra_notes=notes)
form.validate()
return u'Your notes must be 4000 characters or less' not in \
form.extra_notes.errors
def test_notes_max_length(self):
longest_allowed = 'x' * 4000
self.assertTrue(self.validate_notes(longest_allowed))
too_long = longest_allowed + 'x'
self.assertFalse(self.validate_notes(too_long))
|
Add python test for notes length validation
|
Add python test for notes length validation
|
Python
|
mit
|
ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public
|
Add python test for notes length validation
|
import unittest
from werkzeug.datastructures import MultiDict
from cla_public.app import create_app
from cla_public.apps.contact.forms import ContactForm
def submit(**kwargs):
return ContactForm(MultiDict(kwargs), csrf_enabled=False)
class NotesTest(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
app.test_request_context().push()
def validate_notes(self, notes):
form = submit(
extra_notes=notes)
form.validate()
return u'Your notes must be 4000 characters or less' not in \
form.extra_notes.errors
def test_notes_max_length(self):
longest_allowed = 'x' * 4000
self.assertTrue(self.validate_notes(longest_allowed))
too_long = longest_allowed + 'x'
self.assertFalse(self.validate_notes(too_long))
|
<commit_before><commit_msg>Add python test for notes length validation<commit_after>
|
import unittest
from werkzeug.datastructures import MultiDict
from cla_public.app import create_app
from cla_public.apps.contact.forms import ContactForm
def submit(**kwargs):
return ContactForm(MultiDict(kwargs), csrf_enabled=False)
class NotesTest(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
app.test_request_context().push()
def validate_notes(self, notes):
form = submit(
extra_notes=notes)
form.validate()
return u'Your notes must be 4000 characters or less' not in \
form.extra_notes.errors
def test_notes_max_length(self):
longest_allowed = 'x' * 4000
self.assertTrue(self.validate_notes(longest_allowed))
too_long = longest_allowed + 'x'
self.assertFalse(self.validate_notes(too_long))
|
Add python test for notes length validationimport unittest
from werkzeug.datastructures import MultiDict
from cla_public.app import create_app
from cla_public.apps.contact.forms import ContactForm
def submit(**kwargs):
return ContactForm(MultiDict(kwargs), csrf_enabled=False)
class NotesTest(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
app.test_request_context().push()
def validate_notes(self, notes):
form = submit(
extra_notes=notes)
form.validate()
return u'Your notes must be 4000 characters or less' not in \
form.extra_notes.errors
def test_notes_max_length(self):
longest_allowed = 'x' * 4000
self.assertTrue(self.validate_notes(longest_allowed))
too_long = longest_allowed + 'x'
self.assertFalse(self.validate_notes(too_long))
|
<commit_before><commit_msg>Add python test for notes length validation<commit_after>import unittest
from werkzeug.datastructures import MultiDict
from cla_public.app import create_app
from cla_public.apps.contact.forms import ContactForm
def submit(**kwargs):
return ContactForm(MultiDict(kwargs), csrf_enabled=False)
class NotesTest(unittest.TestCase):
def setUp(self):
app = create_app('config/testing.py')
app.test_request_context().push()
def validate_notes(self, notes):
form = submit(
extra_notes=notes)
form.validate()
return u'Your notes must be 4000 characters or less' not in \
form.extra_notes.errors
def test_notes_max_length(self):
longest_allowed = 'x' * 4000
self.assertTrue(self.validate_notes(longest_allowed))
too_long = longest_allowed + 'x'
self.assertFalse(self.validate_notes(too_long))
|
|
7f4de89928de1580dceedff3c1a581660e70f954
|
tests/sentry/utils/test_cursors.py
|
tests/sentry/utils/test_cursors.py
|
from __future__ import absolute_import
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor = build_cursor(results, key='id', limit=1)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
Add test exhibiting cursor failure
|
Add test exhibiting cursor failure
|
Python
|
bsd-3-clause
|
BayanGroup/sentry,kevinlondon/sentry,imankulov/sentry,fotinakis/sentry,BuildingLink/sentry,BuildingLink/sentry,mvaled/sentry,fuziontech/sentry,nicholasserra/sentry,fotinakis/sentry,jean/sentry,kevinlondon/sentry,ngonzalvez/sentry,mitsuhiko/sentry,ifduyue/sentry,fotinakis/sentry,alexm92/sentry,zenefits/sentry,alexm92/sentry,jean/sentry,gencer/sentry,nicholasserra/sentry,jean/sentry,mvaled/sentry,mvaled/sentry,Kryz/sentry,korealerts1/sentry,daevaorn/sentry,Natim/sentry,zenefits/sentry,hongliang5623/sentry,looker/sentry,gencer/sentry,mvaled/sentry,ngonzalvez/sentry,beeftornado/sentry,looker/sentry,Kryz/sentry,hongliang5623/sentry,JamesMura/sentry,beeftornado/sentry,zenefits/sentry,mvaled/sentry,beeftornado/sentry,songyi199111/sentry,gencer/sentry,felixbuenemann/sentry,songyi199111/sentry,jean/sentry,wong2/sentry,felixbuenemann/sentry,hongliang5623/sentry,Natim/sentry,fotinakis/sentry,alexm92/sentry,zenefits/sentry,looker/sentry,songyi199111/sentry,gencer/sentry,Kryz/sentry,daevaorn/sentry,BayanGroup/sentry,wong2/sentry,BayanGroup/sentry,fuziontech/sentry,looker/sentry,imankulov/sentry,korealerts1/sentry,JackDanger/sentry,nicholasserra/sentry,JamesMura/sentry,ifduyue/sentry,korealerts1/sentry,gencer/sentry,fuziontech/sentry,JackDanger/sentry,wong2/sentry,felixbuenemann/sentry,kevinlondon/sentry,looker/sentry,mitsuhiko/sentry,JamesMura/sentry,daevaorn/sentry,mvaled/sentry,jean/sentry,imankulov/sentry,JamesMura/sentry,BuildingLink/sentry,zenefits/sentry,JamesMura/sentry,ifduyue/sentry,Natim/sentry,BuildingLink/sentry,JackDanger/sentry,ngonzalvez/sentry,ifduyue/sentry,daevaorn/sentry,BuildingLink/sentry,ifduyue/sentry
|
Add test exhibiting cursor failure
|
from __future__ import absolute_import
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor = build_cursor(results, key='id', limit=1)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
<commit_before><commit_msg>Add test exhibiting cursor failure<commit_after>
|
from __future__ import absolute_import
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor = build_cursor(results, key='id', limit=1)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
Add test exhibiting cursor failurefrom __future__ import absolute_import
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor = build_cursor(results, key='id', limit=1)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
<commit_before><commit_msg>Add test exhibiting cursor failure<commit_after>from __future__ import absolute_import
from mock import Mock
from sentry.utils.cursors import build_cursor, Cursor
def build_mock(**attrs):
obj = Mock()
for key, value in attrs.items():
setattr(obj, key, value)
obj.__repr__ = lambda x: repr(attrs)
return obj
def test_build_cursor():
event1 = build_mock(id=1.1, message='one')
event2 = build_mock(id=1.1, message='two')
event3 = build_mock(id=2.1, message='three')
results = [event1, event2, event3]
cursor = build_cursor(results, key='id', limit=1)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert not cursor.prev
assert list(cursor) == [event1]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event2]
cursor = build_cursor(results, key='id', limit=1, cursor=cursor.next)
assert isinstance(cursor.next, Cursor)
assert cursor.next
assert isinstance(cursor.prev, Cursor)
assert cursor.prev
assert list(cursor) == [event3]
|
|
dd287c4edfb29e848eaaba93c769ebfaab0db58c
|
tests/non_nose_tests/test_interop_client.py
|
tests/non_nose_tests/test_interop_client.py
|
from time import sleep
from SUASSystem import InteropClientConverter
if __name__ == '__main__':
interop_client = InteropClientConverter()
while True:
print(interop_client.get_obstacles())
sleep(1)
|
Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competition
|
Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competition
|
Python
|
mit
|
FlintHill/SUAS-Competition,FlintHill/SUAS-Competition,FlintHill/SUAS-Competition,FlintHill/SUAS-Competition,FlintHill/SUAS-Competition
|
Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competition
|
from time import sleep
from SUASSystem import InteropClientConverter
if __name__ == '__main__':
interop_client = InteropClientConverter()
while True:
print(interop_client.get_obstacles())
sleep(1)
|
<commit_before><commit_msg>Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competition<commit_after>
|
from time import sleep
from SUASSystem import InteropClientConverter
if __name__ == '__main__':
interop_client = InteropClientConverter()
while True:
print(interop_client.get_obstacles())
sleep(1)
|
Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competitionfrom time import sleep
from SUASSystem import InteropClientConverter
if __name__ == '__main__':
interop_client = InteropClientConverter()
while True:
print(interop_client.get_obstacles())
sleep(1)
|
<commit_before><commit_msg>Add test for communicating with the interoperability server. This will allow us to verify we can make connection with the judges' server at competition<commit_after>from time import sleep
from SUASSystem import InteropClientConverter
if __name__ == '__main__':
interop_client = InteropClientConverter()
while True:
print(interop_client.get_obstacles())
sleep(1)
|
|
3592cd622e61ee689b096c99d46b6d936109e383
|
tests/qtcore/qstring_buffer_protocol_test.py
|
tests/qtcore/qstring_buffer_protocol_test.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Tests QString implementation of Python buffer protocol'''
import unittest
from os.path import isdir
from PySide.QtCore import QString
class QStringBufferProtocolTest(unittest.TestCase):
'''Tests QString implementation of Python buffer protocol'''
def testQStringBufferProtocol(self):
#Tests QString implementation of Python buffer protocol using the os.path.isdir
#function which an unicode object or other object implementing the Python buffer protocol
os_path_isdir_function_correctly_called_with_a_qstring = True
try:
isdir(QString('/tmp'))
except:
os_path_isdir_function_correctly_called_with_a_qstring = False
self.assertTrue(os_path_isdir_function_correctly_called_with_a_qstring)
if __name__ == '__main__':
unittest.main()
|
Revert "We do not support character buffer protocol on QStrings."
|
Revert "We do not support character buffer protocol on QStrings."
This reverts commit 1a7cbb2473327abad936447c47818ee13df2992c.
|
Python
|
lgpl-2.1
|
M4rtinK/pyside-bb10,gbaty/pyside2,qtproject/pyside-pyside,M4rtinK/pyside-android,M4rtinK/pyside-bb10,PySide/PySide,qtproject/pyside-pyside,M4rtinK/pyside-android,IronManMark20/pyside2,pankajp/pyside,pankajp/pyside,IronManMark20/pyside2,enthought/pyside,M4rtinK/pyside-bb10,enthought/pyside,qtproject/pyside-pyside,enthought/pyside,RobinD42/pyside,BadSingleton/pyside2,PySide/PySide,M4rtinK/pyside-bb10,IronManMark20/pyside2,gbaty/pyside2,M4rtinK/pyside-android,RobinD42/pyside,enthought/pyside,enthought/pyside,enthought/pyside,RobinD42/pyside,M4rtinK/pyside-android,gbaty/pyside2,RobinD42/pyside,M4rtinK/pyside-android,M4rtinK/pyside-android,qtproject/pyside-pyside,BadSingleton/pyside2,gbaty/pyside2,pankajp/pyside,pankajp/pyside,M4rtinK/pyside-bb10,IronManMark20/pyside2,RobinD42/pyside,enthought/pyside,BadSingleton/pyside2,qtproject/pyside-pyside,BadSingleton/pyside2,M4rtinK/pyside-bb10,PySide/PySide,RobinD42/pyside,RobinD42/pyside,pankajp/pyside,PySide/PySide,IronManMark20/pyside2,PySide/PySide,BadSingleton/pyside2,gbaty/pyside2
|
Revert "We do not support character buffer protocol on QStrings."
This reverts commit 1a7cbb2473327abad936447c47818ee13df2992c.
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Tests QString implementation of Python buffer protocol'''
import unittest
from os.path import isdir
from PySide.QtCore import QString
class QStringBufferProtocolTest(unittest.TestCase):
'''Tests QString implementation of Python buffer protocol'''
def testQStringBufferProtocol(self):
#Tests QString implementation of Python buffer protocol using the os.path.isdir
#function which an unicode object or other object implementing the Python buffer protocol
os_path_isdir_function_correctly_called_with_a_qstring = True
try:
isdir(QString('/tmp'))
except:
os_path_isdir_function_correctly_called_with_a_qstring = False
self.assertTrue(os_path_isdir_function_correctly_called_with_a_qstring)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Revert "We do not support character buffer protocol on QStrings."
This reverts commit 1a7cbb2473327abad936447c47818ee13df2992c.<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Tests QString implementation of Python buffer protocol'''
import unittest
from os.path import isdir
from PySide.QtCore import QString
class QStringBufferProtocolTest(unittest.TestCase):
'''Tests QString implementation of Python buffer protocol'''
def testQStringBufferProtocol(self):
#Tests QString implementation of Python buffer protocol using the os.path.isdir
#function which an unicode object or other object implementing the Python buffer protocol
os_path_isdir_function_correctly_called_with_a_qstring = True
try:
isdir(QString('/tmp'))
except:
os_path_isdir_function_correctly_called_with_a_qstring = False
self.assertTrue(os_path_isdir_function_correctly_called_with_a_qstring)
if __name__ == '__main__':
unittest.main()
|
Revert "We do not support character buffer protocol on QStrings."
This reverts commit 1a7cbb2473327abad936447c47818ee13df2992c.#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Tests QString implementation of Python buffer protocol'''
import unittest
from os.path import isdir
from PySide.QtCore import QString
class QStringBufferProtocolTest(unittest.TestCase):
'''Tests QString implementation of Python buffer protocol'''
def testQStringBufferProtocol(self):
#Tests QString implementation of Python buffer protocol using the os.path.isdir
#function which an unicode object or other object implementing the Python buffer protocol
os_path_isdir_function_correctly_called_with_a_qstring = True
try:
isdir(QString('/tmp'))
except:
os_path_isdir_function_correctly_called_with_a_qstring = False
self.assertTrue(os_path_isdir_function_correctly_called_with_a_qstring)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Revert "We do not support character buffer protocol on QStrings."
This reverts commit 1a7cbb2473327abad936447c47818ee13df2992c.<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Tests QString implementation of Python buffer protocol'''
import unittest
from os.path import isdir
from PySide.QtCore import QString
class QStringBufferProtocolTest(unittest.TestCase):
'''Tests QString implementation of Python buffer protocol'''
def testQStringBufferProtocol(self):
#Tests QString implementation of Python buffer protocol using the os.path.isdir
#function which an unicode object or other object implementing the Python buffer protocol
os_path_isdir_function_correctly_called_with_a_qstring = True
try:
isdir(QString('/tmp'))
except:
os_path_isdir_function_correctly_called_with_a_qstring = False
self.assertTrue(os_path_isdir_function_correctly_called_with_a_qstring)
if __name__ == '__main__':
unittest.main()
|
|
64f13a006412112830e1d7cc2ff71ea32cb4c1b6
|
migrations/versions/0150_another_letter_org.py
|
migrations/versions/0150_another_letter_org.py
|
"""empty message
Revision ID: 0150_another_letter_org
Revises: 0149_add_crown_to_services
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0150_another_letter_org'
down_revision = '0149_add_crown_to_services'
from alembic import op
NEW_ORGANISATIONS = [
('006', 'DWP (Welsh)'),
('007', 'Department for Communities'),
('008', 'Marine Management Organisation'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add a new organisation for letter branding
|
Add a new organisation for letter branding
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add a new organisation for letter branding
|
"""empty message
Revision ID: 0150_another_letter_org
Revises: 0149_add_crown_to_services
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0150_another_letter_org'
down_revision = '0149_add_crown_to_services'
from alembic import op
NEW_ORGANISATIONS = [
('006', 'DWP (Welsh)'),
('007', 'Department for Communities'),
('008', 'Marine Management Organisation'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add a new organisation for letter branding<commit_after>
|
"""empty message
Revision ID: 0150_another_letter_org
Revises: 0149_add_crown_to_services
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0150_another_letter_org'
down_revision = '0149_add_crown_to_services'
from alembic import op
NEW_ORGANISATIONS = [
('006', 'DWP (Welsh)'),
('007', 'Department for Communities'),
('008', 'Marine Management Organisation'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add a new organisation for letter branding"""empty message
Revision ID: 0150_another_letter_org
Revises: 0149_add_crown_to_services
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0150_another_letter_org'
down_revision = '0149_add_crown_to_services'
from alembic import op
NEW_ORGANISATIONS = [
('006', 'DWP (Welsh)'),
('007', 'Department for Communities'),
('008', 'Marine Management Organisation'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add a new organisation for letter branding<commit_after>"""empty message
Revision ID: 0150_another_letter_org
Revises: 0149_add_crown_to_services
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0150_another_letter_org'
down_revision = '0149_add_crown_to_services'
from alembic import op
NEW_ORGANISATIONS = [
('006', 'DWP (Welsh)'),
('007', 'Department for Communities'),
('008', 'Marine Management Organisation'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
cb87c6e2c58593d5ef5710c4d7db0c18831e354c
|
tools/clear_zk.py
|
tools/clear_zk.py
|
#!/usr/bin/env python
import contextlib
import os
import re
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
from taskflow.utils import kazoo_utils
@contextlib.contextmanager
def finalize_client(client):
try:
yield client
finally:
kazoo_utils.finalize_client(client)
def iter_children(client, path):
if client.exists(path):
for child_path in client.get_children(path):
if path == "/":
child_path = "/%s" % (child_path)
else:
child_path = "%s/%s" % (path, child_path)
yield child_path
for child_child_path in iter_children(client, child_path):
yield child_child_path
def main():
conf = {}
if len(sys.argv) > 1:
conf['hosts'] = sys.argv[1:]
with finalize_client(kazoo_utils.make_client(conf)) as client:
client.start(timeout=1.0)
children = list(iter_children(client, "/taskflow"))
for child_path in reversed(children):
if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path):
continue
print("Deleting %s" % child_path)
client.delete(child_path)
if __name__ == "__main__":
main()
|
Add a helper tool which clears zookeeper test dirs
|
Add a helper tool which clears zookeeper test dirs
Create a tool that can clear any leftover garbage left
by the testing of taskflow unit tests with zookeeper for
when this is needed (for example, a test does not clean up
correctly on its own).
Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055
|
Python
|
apache-2.0
|
openstack/taskflow,pombredanne/taskflow-1,junneyang/taskflow,junneyang/taskflow,jimbobhickville/taskflow,openstack/taskflow,jimbobhickville/taskflow,pombredanne/taskflow-1
|
Add a helper tool which clears zookeeper test dirs
Create a tool that can clear any leftover garbage left
by the testing of taskflow unit tests with zookeeper for
when this is needed (for example, a test does not clean up
correctly on its own).
Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055
|
#!/usr/bin/env python
import contextlib
import os
import re
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
from taskflow.utils import kazoo_utils
@contextlib.contextmanager
def finalize_client(client):
try:
yield client
finally:
kazoo_utils.finalize_client(client)
def iter_children(client, path):
if client.exists(path):
for child_path in client.get_children(path):
if path == "/":
child_path = "/%s" % (child_path)
else:
child_path = "%s/%s" % (path, child_path)
yield child_path
for child_child_path in iter_children(client, child_path):
yield child_child_path
def main():
conf = {}
if len(sys.argv) > 1:
conf['hosts'] = sys.argv[1:]
with finalize_client(kazoo_utils.make_client(conf)) as client:
client.start(timeout=1.0)
children = list(iter_children(client, "/taskflow"))
for child_path in reversed(children):
if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path):
continue
print("Deleting %s" % child_path)
client.delete(child_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a helper tool which clears zookeeper test dirs
Create a tool that can clear any leftover garbage left
by the testing of taskflow unit tests with zookeeper for
when this is needed (for example, a test does not clean up
correctly on its own).
Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055<commit_after>
|
#!/usr/bin/env python
import contextlib
import os
import re
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
from taskflow.utils import kazoo_utils
@contextlib.contextmanager
def finalize_client(client):
try:
yield client
finally:
kazoo_utils.finalize_client(client)
def iter_children(client, path):
if client.exists(path):
for child_path in client.get_children(path):
if path == "/":
child_path = "/%s" % (child_path)
else:
child_path = "%s/%s" % (path, child_path)
yield child_path
for child_child_path in iter_children(client, child_path):
yield child_child_path
def main():
conf = {}
if len(sys.argv) > 1:
conf['hosts'] = sys.argv[1:]
with finalize_client(kazoo_utils.make_client(conf)) as client:
client.start(timeout=1.0)
children = list(iter_children(client, "/taskflow"))
for child_path in reversed(children):
if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path):
continue
print("Deleting %s" % child_path)
client.delete(child_path)
if __name__ == "__main__":
main()
|
Add a helper tool which clears zookeeper test dirs
Create a tool that can clear any leftover garbage left
by the testing of taskflow unit tests with zookeeper for
when this is needed (for example, a test does not clean up
correctly on its own).
Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055#!/usr/bin/env python
import contextlib
import os
import re
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
from taskflow.utils import kazoo_utils
@contextlib.contextmanager
def finalize_client(client):
try:
yield client
finally:
kazoo_utils.finalize_client(client)
def iter_children(client, path):
if client.exists(path):
for child_path in client.get_children(path):
if path == "/":
child_path = "/%s" % (child_path)
else:
child_path = "%s/%s" % (path, child_path)
yield child_path
for child_child_path in iter_children(client, child_path):
yield child_child_path
def main():
conf = {}
if len(sys.argv) > 1:
conf['hosts'] = sys.argv[1:]
with finalize_client(kazoo_utils.make_client(conf)) as client:
client.start(timeout=1.0)
children = list(iter_children(client, "/taskflow"))
for child_path in reversed(children):
if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path):
continue
print("Deleting %s" % child_path)
client.delete(child_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a helper tool which clears zookeeper test dirs
Create a tool that can clear any leftover garbage left
by the testing of taskflow unit tests with zookeeper for
when this is needed (for example, a test does not clean up
correctly on its own).
Change-Id: Icfaf28273b76a6ca27683d174f111fba2858f055<commit_after>#!/usr/bin/env python
import contextlib
import os
import re
import sys
top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
sys.path.insert(0, top_dir)
from taskflow.utils import kazoo_utils
@contextlib.contextmanager
def finalize_client(client):
try:
yield client
finally:
kazoo_utils.finalize_client(client)
def iter_children(client, path):
if client.exists(path):
for child_path in client.get_children(path):
if path == "/":
child_path = "/%s" % (child_path)
else:
child_path = "%s/%s" % (path, child_path)
yield child_path
for child_child_path in iter_children(client, child_path):
yield child_child_path
def main():
conf = {}
if len(sys.argv) > 1:
conf['hosts'] = sys.argv[1:]
with finalize_client(kazoo_utils.make_client(conf)) as client:
client.start(timeout=1.0)
children = list(iter_children(client, "/taskflow"))
for child_path in reversed(children):
if not re.match(r"^/taskflow/(.*?)-test/(.*)$", child_path):
continue
print("Deleting %s" % child_path)
client.delete(child_path)
if __name__ == "__main__":
main()
|
|
b1bf21e7331ef4c6191ac27a7621deb04b086ffc
|
tests/test_tileutils.py
|
tests/test_tileutils.py
|
import pytest
import numpy as np
from pytilemap.tileutils import posFromLonLat, lonLatFromPos
LATITUDES = np.arange(-90, 90).astype(np.float64)
LONGITUDES = np.arange(-180, 180).astype(np.float64)
ZOOMS = np.arange(1, 20).astype(np.int64)
def referencePosFromLonLat(lon, lat, zoom, tileSize):
tx = (lon + 180.0) / 360.0
ty = (1.0 - np.log(np.tan(np.deg2rad(lat)) + 1.0 / np.cos(np.deg2rad(lat))) / np.pi) / 2.0
zn = (1 << zoom) * float(tileSize)
tx *= zn
ty *= zn
return tx, ty
def referenceLonLatFromPos(x, y, zoom, tileSize):
tx = x / tileSize
ty = y / tileSize
zn = 1 << zoom
lon = tx / zn * 360.0 - 180.0
n = np.pi - np.pi * 2. * ty / zn
lat = np.rad2deg(np.arctan(0.5 * (np.exp(n) - np.exp(-n))))
return lon, lat
@pytest.mark.parametrize('lons,lats,zooms', [(LONGITUDES[::2], LATITUDES[::2], ZOOMS[::2])])
def test_posFromLonLat_viceversa(lons, lats, zooms):
for lon in lons:
for lat in lats:
for zoom in zooms:
tx, ty = posFromLonLat(lon, lat, zoom, 256)
refTx, refTy = posFromLonLat(lon, lat, zoom, 256)
assert tx == refTx
assert ty == refTy
ll = lonLatFromPos(tx, ty, zoom, 256)
assert abs(ll[0] - lon) < 1e-13
assert abs(ll[1] - lat) < 1e-13
|
Add tests for tileutils module
|
Add tests for tileutils module
|
Python
|
mit
|
allebacco/PyTileMap
|
Add tests for tileutils module
|
import pytest
import numpy as np
from pytilemap.tileutils import posFromLonLat, lonLatFromPos
LATITUDES = np.arange(-90, 90).astype(np.float64)
LONGITUDES = np.arange(-180, 180).astype(np.float64)
ZOOMS = np.arange(1, 20).astype(np.int64)
def referencePosFromLonLat(lon, lat, zoom, tileSize):
tx = (lon + 180.0) / 360.0
ty = (1.0 - np.log(np.tan(np.deg2rad(lat)) + 1.0 / np.cos(np.deg2rad(lat))) / np.pi) / 2.0
zn = (1 << zoom) * float(tileSize)
tx *= zn
ty *= zn
return tx, ty
def referenceLonLatFromPos(x, y, zoom, tileSize):
tx = x / tileSize
ty = y / tileSize
zn = 1 << zoom
lon = tx / zn * 360.0 - 180.0
n = np.pi - np.pi * 2. * ty / zn
lat = np.rad2deg(np.arctan(0.5 * (np.exp(n) - np.exp(-n))))
return lon, lat
@pytest.mark.parametrize('lons,lats,zooms', [(LONGITUDES[::2], LATITUDES[::2], ZOOMS[::2])])
def test_posFromLonLat_viceversa(lons, lats, zooms):
for lon in lons:
for lat in lats:
for zoom in zooms:
tx, ty = posFromLonLat(lon, lat, zoom, 256)
refTx, refTy = posFromLonLat(lon, lat, zoom, 256)
assert tx == refTx
assert ty == refTy
ll = lonLatFromPos(tx, ty, zoom, 256)
assert abs(ll[0] - lon) < 1e-13
assert abs(ll[1] - lat) < 1e-13
|
<commit_before><commit_msg>Add tests for tileutils module<commit_after>
|
import pytest
import numpy as np
from pytilemap.tileutils import posFromLonLat, lonLatFromPos
LATITUDES = np.arange(-90, 90).astype(np.float64)
LONGITUDES = np.arange(-180, 180).astype(np.float64)
ZOOMS = np.arange(1, 20).astype(np.int64)
def referencePosFromLonLat(lon, lat, zoom, tileSize):
tx = (lon + 180.0) / 360.0
ty = (1.0 - np.log(np.tan(np.deg2rad(lat)) + 1.0 / np.cos(np.deg2rad(lat))) / np.pi) / 2.0
zn = (1 << zoom) * float(tileSize)
tx *= zn
ty *= zn
return tx, ty
def referenceLonLatFromPos(x, y, zoom, tileSize):
tx = x / tileSize
ty = y / tileSize
zn = 1 << zoom
lon = tx / zn * 360.0 - 180.0
n = np.pi - np.pi * 2. * ty / zn
lat = np.rad2deg(np.arctan(0.5 * (np.exp(n) - np.exp(-n))))
return lon, lat
@pytest.mark.parametrize('lons,lats,zooms', [(LONGITUDES[::2], LATITUDES[::2], ZOOMS[::2])])
def test_posFromLonLat_viceversa(lons, lats, zooms):
for lon in lons:
for lat in lats:
for zoom in zooms:
tx, ty = posFromLonLat(lon, lat, zoom, 256)
refTx, refTy = posFromLonLat(lon, lat, zoom, 256)
assert tx == refTx
assert ty == refTy
ll = lonLatFromPos(tx, ty, zoom, 256)
assert abs(ll[0] - lon) < 1e-13
assert abs(ll[1] - lat) < 1e-13
|
Add tests for tileutils moduleimport pytest
import numpy as np
from pytilemap.tileutils import posFromLonLat, lonLatFromPos
LATITUDES = np.arange(-90, 90).astype(np.float64)
LONGITUDES = np.arange(-180, 180).astype(np.float64)
ZOOMS = np.arange(1, 20).astype(np.int64)
def referencePosFromLonLat(lon, lat, zoom, tileSize):
tx = (lon + 180.0) / 360.0
ty = (1.0 - np.log(np.tan(np.deg2rad(lat)) + 1.0 / np.cos(np.deg2rad(lat))) / np.pi) / 2.0
zn = (1 << zoom) * float(tileSize)
tx *= zn
ty *= zn
return tx, ty
def referenceLonLatFromPos(x, y, zoom, tileSize):
tx = x / tileSize
ty = y / tileSize
zn = 1 << zoom
lon = tx / zn * 360.0 - 180.0
n = np.pi - np.pi * 2. * ty / zn
lat = np.rad2deg(np.arctan(0.5 * (np.exp(n) - np.exp(-n))))
return lon, lat
@pytest.mark.parametrize('lons,lats,zooms', [(LONGITUDES[::2], LATITUDES[::2], ZOOMS[::2])])
def test_posFromLonLat_viceversa(lons, lats, zooms):
for lon in lons:
for lat in lats:
for zoom in zooms:
tx, ty = posFromLonLat(lon, lat, zoom, 256)
refTx, refTy = posFromLonLat(lon, lat, zoom, 256)
assert tx == refTx
assert ty == refTy
ll = lonLatFromPos(tx, ty, zoom, 256)
assert abs(ll[0] - lon) < 1e-13
assert abs(ll[1] - lat) < 1e-13
|
<commit_before><commit_msg>Add tests for tileutils module<commit_after>import pytest
import numpy as np
from pytilemap.tileutils import posFromLonLat, lonLatFromPos
LATITUDES = np.arange(-90, 90).astype(np.float64)
LONGITUDES = np.arange(-180, 180).astype(np.float64)
ZOOMS = np.arange(1, 20).astype(np.int64)
def referencePosFromLonLat(lon, lat, zoom, tileSize):
tx = (lon + 180.0) / 360.0
ty = (1.0 - np.log(np.tan(np.deg2rad(lat)) + 1.0 / np.cos(np.deg2rad(lat))) / np.pi) / 2.0
zn = (1 << zoom) * float(tileSize)
tx *= zn
ty *= zn
return tx, ty
def referenceLonLatFromPos(x, y, zoom, tileSize):
tx = x / tileSize
ty = y / tileSize
zn = 1 << zoom
lon = tx / zn * 360.0 - 180.0
n = np.pi - np.pi * 2. * ty / zn
lat = np.rad2deg(np.arctan(0.5 * (np.exp(n) - np.exp(-n))))
return lon, lat
@pytest.mark.parametrize('lons,lats,zooms', [(LONGITUDES[::2], LATITUDES[::2], ZOOMS[::2])])
def test_posFromLonLat_viceversa(lons, lats, zooms):
for lon in lons:
for lat in lats:
for zoom in zooms:
tx, ty = posFromLonLat(lon, lat, zoom, 256)
refTx, refTy = posFromLonLat(lon, lat, zoom, 256)
assert tx == refTx
assert ty == refTy
ll = lonLatFromPos(tx, ty, zoom, 256)
assert abs(ll[0] - lon) < 1e-13
assert abs(ll[1] - lat) < 1e-13
|
|
cfe5f9240c2ccbb41c1dd3e0ea7b22ffddf80a09
|
app/api_v1/serializers.py
|
app/api_v1/serializers.py
|
"""This module defines the format used by marshall to map the models."""
from flask_restful import fields
bucketlistitem_serializer = {
'id': fields.Integer,
'item_name': fields.String,
'priority': fields.String,
'done': fields.Boolean,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
}
bucketlist_serializer = {
'id': fields.Integer,
'list_name': fields.String,
'bucketlist_items': fields.Nested(bucketlistitem_serializer),
'created_by': fields.Integer
}
user_serializer = {
'id': fields.Integer,
'username': fields.String,
'bucketlists': fields.Nested(bucketlist_serializer)
}
|
Add marshal fields to map with models.
|
[Feature] Add marshal fields to map with models.
|
Python
|
mit
|
andela-akiura/bucketlist
|
[Feature] Add marshal fields to map with models.
|
"""This module defines the format used by marshall to map the models."""
from flask_restful import fields
bucketlistitem_serializer = {
'id': fields.Integer,
'item_name': fields.String,
'priority': fields.String,
'done': fields.Boolean,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
}
bucketlist_serializer = {
'id': fields.Integer,
'list_name': fields.String,
'bucketlist_items': fields.Nested(bucketlistitem_serializer),
'created_by': fields.Integer
}
user_serializer = {
'id': fields.Integer,
'username': fields.String,
'bucketlists': fields.Nested(bucketlist_serializer)
}
|
<commit_before><commit_msg>[Feature] Add marshal fields to map with models.<commit_after>
|
"""This module defines the format used by marshall to map the models."""
from flask_restful import fields
bucketlistitem_serializer = {
'id': fields.Integer,
'item_name': fields.String,
'priority': fields.String,
'done': fields.Boolean,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
}
bucketlist_serializer = {
'id': fields.Integer,
'list_name': fields.String,
'bucketlist_items': fields.Nested(bucketlistitem_serializer),
'created_by': fields.Integer
}
user_serializer = {
'id': fields.Integer,
'username': fields.String,
'bucketlists': fields.Nested(bucketlist_serializer)
}
|
[Feature] Add marshal fields to map with models."""This module defines the format used by marshall to map the models."""
from flask_restful import fields
bucketlistitem_serializer = {
'id': fields.Integer,
'item_name': fields.String,
'priority': fields.String,
'done': fields.Boolean,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
}
bucketlist_serializer = {
'id': fields.Integer,
'list_name': fields.String,
'bucketlist_items': fields.Nested(bucketlistitem_serializer),
'created_by': fields.Integer
}
user_serializer = {
'id': fields.Integer,
'username': fields.String,
'bucketlists': fields.Nested(bucketlist_serializer)
}
|
<commit_before><commit_msg>[Feature] Add marshal fields to map with models.<commit_after>"""This module defines the format used by marshall to map the models."""
from flask_restful import fields
bucketlistitem_serializer = {
'id': fields.Integer,
'item_name': fields.String,
'priority': fields.String,
'done': fields.Boolean,
'date_created': fields.DateTime,
'date_modified': fields.DateTime
}
bucketlist_serializer = {
'id': fields.Integer,
'list_name': fields.String,
'bucketlist_items': fields.Nested(bucketlistitem_serializer),
'created_by': fields.Integer
}
user_serializer = {
'id': fields.Integer,
'username': fields.String,
'bucketlists': fields.Nested(bucketlist_serializer)
}
|
|
779f4323636d2476b3e51b6e57f2c130e9d4d466
|
src/tools/make_dates.py
|
src/tools/make_dates.py
|
#!/usr/bin/env python
# coding: utf-8
"""Make dates on second Tuesday of the month for full year"""
import datetime
import sys
def second_tuesday(year, month):
"""Find the second Tuesday in a month."""
date = datetime.date(year, month, 1)
seen = 0
day = datetime.timedelta(days=1)
while True:
date += day
if date.weekday() == 1:
seen += 1
if seen == 2:
return date
def make_dates(year):
"""Make list of second Tuesdays of `year`."""
return [second_tuesday(year, month) for month in range(1, 13)]
def show_date_table(year, comment_out=True):
"""Print out a table with all second Tuesdays of `year`.
`comment_out` commennts out all lines with `#` so it does not show
up in markdown.
"""
comment = '# ' if comment_out else ''
for date in make_dates(year):
print(f'{comment}{date:%d.%m.%Y} | TBD |')
def main(year=None):
"""Show table of second Tuesdays for `year`.
`year` needs to be supplied via command line if `None`.
"""
if year is None:
try:
year = int(sys.argv[1])
except (IndexError, ValueError):
print('Must specify year as command first line argument.')
return
show_date_table(year)
if __name__ == '__main__':
main()
|
Add tool to make dates for second Tuesday in a month
|
Add tool to make dates for second Tuesday in a month
|
Python
|
mit
|
LPUG/LPUG.github.io,LPUG/LPUG.github.io
|
Add tool to make dates for second Tuesday in a month
|
#!/usr/bin/env python
# coding: utf-8
"""Make dates on second Tuesday of the month for full year"""
import datetime
import sys
def second_tuesday(year, month):
"""Find the second Tuesday in a month."""
date = datetime.date(year, month, 1)
seen = 0
day = datetime.timedelta(days=1)
while True:
date += day
if date.weekday() == 1:
seen += 1
if seen == 2:
return date
def make_dates(year):
"""Make list of second Tuesdays of `year`."""
return [second_tuesday(year, month) for month in range(1, 13)]
def show_date_table(year, comment_out=True):
"""Print out a table with all second Tuesdays of `year`.
`comment_out` commennts out all lines with `#` so it does not show
up in markdown.
"""
comment = '# ' if comment_out else ''
for date in make_dates(year):
print(f'{comment}{date:%d.%m.%Y} | TBD |')
def main(year=None):
"""Show table of second Tuesdays for `year`.
`year` needs to be supplied via command line if `None`.
"""
if year is None:
try:
year = int(sys.argv[1])
except (IndexError, ValueError):
print('Must specify year as command first line argument.')
return
show_date_table(year)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to make dates for second Tuesday in a month<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
"""Make dates on second Tuesday of the month for full year"""
import datetime
import sys
def second_tuesday(year, month):
"""Find the second Tuesday in a month."""
date = datetime.date(year, month, 1)
seen = 0
day = datetime.timedelta(days=1)
while True:
date += day
if date.weekday() == 1:
seen += 1
if seen == 2:
return date
def make_dates(year):
"""Make list of second Tuesdays of `year`."""
return [second_tuesday(year, month) for month in range(1, 13)]
def show_date_table(year, comment_out=True):
"""Print out a table with all second Tuesdays of `year`.
`comment_out` commennts out all lines with `#` so it does not show
up in markdown.
"""
comment = '# ' if comment_out else ''
for date in make_dates(year):
print(f'{comment}{date:%d.%m.%Y} | TBD |')
def main(year=None):
"""Show table of second Tuesdays for `year`.
`year` needs to be supplied via command line if `None`.
"""
if year is None:
try:
year = int(sys.argv[1])
except (IndexError, ValueError):
print('Must specify year as command first line argument.')
return
show_date_table(year)
if __name__ == '__main__':
main()
|
Add tool to make dates for second Tuesday in a month#!/usr/bin/env python
# coding: utf-8
"""Make dates on second Tuesday of the month for full year"""
import datetime
import sys
def second_tuesday(year, month):
"""Find the second Tuesday in a month."""
date = datetime.date(year, month, 1)
seen = 0
day = datetime.timedelta(days=1)
while True:
date += day
if date.weekday() == 1:
seen += 1
if seen == 2:
return date
def make_dates(year):
"""Make list of second Tuesdays of `year`."""
return [second_tuesday(year, month) for month in range(1, 13)]
def show_date_table(year, comment_out=True):
"""Print out a table with all second Tuesdays of `year`.
`comment_out` commennts out all lines with `#` so it does not show
up in markdown.
"""
comment = '# ' if comment_out else ''
for date in make_dates(year):
print(f'{comment}{date:%d.%m.%Y} | TBD |')
def main(year=None):
"""Show table of second Tuesdays for `year`.
`year` needs to be supplied via command line if `None`.
"""
if year is None:
try:
year = int(sys.argv[1])
except (IndexError, ValueError):
print('Must specify year as command first line argument.')
return
show_date_table(year)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to make dates for second Tuesday in a month<commit_after>#!/usr/bin/env python
# coding: utf-8
"""Make dates on second Tuesday of the month for full year"""
import datetime
import sys
def second_tuesday(year, month):
"""Find the second Tuesday in a month."""
date = datetime.date(year, month, 1)
seen = 0
day = datetime.timedelta(days=1)
while True:
date += day
if date.weekday() == 1:
seen += 1
if seen == 2:
return date
def make_dates(year):
"""Make list of second Tuesdays of `year`."""
return [second_tuesday(year, month) for month in range(1, 13)]
def show_date_table(year, comment_out=True):
"""Print out a table with all second Tuesdays of `year`.
`comment_out` commennts out all lines with `#` so it does not show
up in markdown.
"""
comment = '# ' if comment_out else ''
for date in make_dates(year):
print(f'{comment}{date:%d.%m.%Y} | TBD |')
def main(year=None):
"""Show table of second Tuesdays for `year`.
`year` needs to be supplied via command line if `None`.
"""
if year is None:
try:
year = int(sys.argv[1])
except (IndexError, ValueError):
print('Must specify year as command first line argument.')
return
show_date_table(year)
if __name__ == '__main__':
main()
|
|
af9bf2a6db96b069ef62054a4ab09f66f2dd048a
|
plugin_manager.py
|
plugin_manager.py
|
"""
Copyright 2011 Ryan Fobel
This file is part of dmf_control_board.
Microdrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Microdrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Microdrop. If not, see <http://www.gnu.org/licenses/>.
"""
from pyutilib.component.core import Interface, ExtensionPoint, \
SingletonPlugin, implements
import pyutilib.component.loader
class PluginManager():
def __init__(self):
pyutilib.component.loader.PluginGlobals.load_services(
path="plugins", auto_disable=False)
class IPlugin(Interface):
def edit_options():
"""
Edit the options for this plugin.
"""
pass
def on_app_init(app=None):
"""
Handler called once when the Microdrop application starts.
Plugins should store a reference to the app object if they need to
access other components.
"""
pass
def on_app_exit(self):
"""
Handler called just before the Microdrop application exists.
"""
pass
def on_protocol_update():
"""
Handler called whenever the current protocol step changes.
"""
pass
def on_delete_protocol_step():
"""
Handler called whenever a protocol step is deleted.
"""
pass
def on_insert_protocol_step():
"""
Handler called whenever a protocol step is inserted.
"""
pass
def on_protocol_run():
"""
Handler called when a protocol starts running.
"""
pass
def on_protocol_pause():
"""
Handler called when a protocol is paused.
"""
pass
|
Add PluginManager class and IPlugin interface
|
Add PluginManager class and IPlugin interface
This file should have been included in commit 46c1c31a2bf3c3d953d35195667ccc1af7df3c04
References #4
|
Python
|
bsd-3-clause
|
wheeler-microfluidics/microdrop
|
Add PluginManager class and IPlugin interface
This file should have been included in commit 46c1c31a2bf3c3d953d35195667ccc1af7df3c04
References #4
|
"""
Copyright 2011 Ryan Fobel
This file is part of dmf_control_board.
Microdrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Microdrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Microdrop. If not, see <http://www.gnu.org/licenses/>.
"""
from pyutilib.component.core import Interface, ExtensionPoint, \
SingletonPlugin, implements
import pyutilib.component.loader
class PluginManager():
def __init__(self):
pyutilib.component.loader.PluginGlobals.load_services(
path="plugins", auto_disable=False)
class IPlugin(Interface):
def edit_options():
"""
Edit the options for this plugin.
"""
pass
def on_app_init(app=None):
"""
Handler called once when the Microdrop application starts.
Plugins should store a reference to the app object if they need to
access other components.
"""
pass
def on_app_exit(self):
"""
Handler called just before the Microdrop application exists.
"""
pass
def on_protocol_update():
"""
Handler called whenever the current protocol step changes.
"""
pass
def on_delete_protocol_step():
"""
Handler called whenever a protocol step is deleted.
"""
pass
def on_insert_protocol_step():
"""
Handler called whenever a protocol step is inserted.
"""
pass
def on_protocol_run():
"""
Handler called when a protocol starts running.
"""
pass
def on_protocol_pause():
"""
Handler called when a protocol is paused.
"""
pass
|
<commit_before><commit_msg>Add PluginManager class and IPlugin interface
This file should have been included in commit 46c1c31a2bf3c3d953d35195667ccc1af7df3c04
References #4<commit_after>
|
"""
Copyright 2011 Ryan Fobel
This file is part of dmf_control_board.
Microdrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Microdrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Microdrop. If not, see <http://www.gnu.org/licenses/>.
"""
from pyutilib.component.core import Interface, ExtensionPoint, \
SingletonPlugin, implements
import pyutilib.component.loader
class PluginManager():
def __init__(self):
pyutilib.component.loader.PluginGlobals.load_services(
path="plugins", auto_disable=False)
class IPlugin(Interface):
def edit_options():
"""
Edit the options for this plugin.
"""
pass
def on_app_init(app=None):
"""
Handler called once when the Microdrop application starts.
Plugins should store a reference to the app object if they need to
access other components.
"""
pass
def on_app_exit(self):
"""
Handler called just before the Microdrop application exists.
"""
pass
def on_protocol_update():
"""
Handler called whenever the current protocol step changes.
"""
pass
def on_delete_protocol_step():
"""
Handler called whenever a protocol step is deleted.
"""
pass
def on_insert_protocol_step():
"""
Handler called whenever a protocol step is inserted.
"""
pass
def on_protocol_run():
"""
Handler called when a protocol starts running.
"""
pass
def on_protocol_pause():
"""
Handler called when a protocol is paused.
"""
pass
|
Add PluginManager class and IPlugin interface
This file should have been included in commit 46c1c31a2bf3c3d953d35195667ccc1af7df3c04
References #4"""
Copyright 2011 Ryan Fobel
This file is part of dmf_control_board.
Microdrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Microdrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Microdrop. If not, see <http://www.gnu.org/licenses/>.
"""
from pyutilib.component.core import Interface, ExtensionPoint, \
SingletonPlugin, implements
import pyutilib.component.loader
class PluginManager():
def __init__(self):
pyutilib.component.loader.PluginGlobals.load_services(
path="plugins", auto_disable=False)
class IPlugin(Interface):
def edit_options():
"""
Edit the options for this plugin.
"""
pass
def on_app_init(app=None):
"""
Handler called once when the Microdrop application starts.
Plugins should store a reference to the app object if they need to
access other components.
"""
pass
def on_app_exit(self):
"""
Handler called just before the Microdrop application exists.
"""
pass
def on_protocol_update():
"""
Handler called whenever the current protocol step changes.
"""
pass
def on_delete_protocol_step():
"""
Handler called whenever a protocol step is deleted.
"""
pass
def on_insert_protocol_step():
"""
Handler called whenever a protocol step is inserted.
"""
pass
def on_protocol_run():
"""
Handler called when a protocol starts running.
"""
pass
def on_protocol_pause():
"""
Handler called when a protocol is paused.
"""
pass
|
<commit_before><commit_msg>Add PluginManager class and IPlugin interface
This file should have been included in commit 46c1c31a2bf3c3d953d35195667ccc1af7df3c04
References #4<commit_after>"""
Copyright 2011 Ryan Fobel
This file is part of dmf_control_board.
Microdrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Microdrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Microdrop. If not, see <http://www.gnu.org/licenses/>.
"""
from pyutilib.component.core import Interface, ExtensionPoint, \
SingletonPlugin, implements
import pyutilib.component.loader
class PluginManager():
def __init__(self):
pyutilib.component.loader.PluginGlobals.load_services(
path="plugins", auto_disable=False)
class IPlugin(Interface):
def edit_options():
"""
Edit the options for this plugin.
"""
pass
def on_app_init(app=None):
"""
Handler called once when the Microdrop application starts.
Plugins should store a reference to the app object if they need to
access other components.
"""
pass
def on_app_exit(self):
"""
Handler called just before the Microdrop application exists.
"""
pass
def on_protocol_update():
"""
Handler called whenever the current protocol step changes.
"""
pass
def on_delete_protocol_step():
"""
Handler called whenever a protocol step is deleted.
"""
pass
def on_insert_protocol_step():
"""
Handler called whenever a protocol step is inserted.
"""
pass
def on_protocol_run():
"""
Handler called when a protocol starts running.
"""
pass
def on_protocol_pause():
"""
Handler called when a protocol is paused.
"""
pass
|
|
c3eba3d5983472da3434d4c6acb4a7ab15036ee6
|
python/generators/fibonacci.py
|
python/generators/fibonacci.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def fibonacci(max_value):
"""Fibonacci series
The sum of two elements defines the next."""
a, b = 0, 1
while b < max_value:
yield b
a, b = b, a+b
def main():
"""Main function"""
for x in fibonacci(10):
print x
if __name__ == '__main__':
main()
|
Add a new snippet 'python/generators' (Fibonacci).
|
Add a new snippet 'python/generators' (Fibonacci).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a new snippet 'python/generators' (Fibonacci).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def fibonacci(max_value):
"""Fibonacci series
The sum of two elements defines the next."""
a, b = 0, 1
while b < max_value:
yield b
a, b = b, a+b
def main():
"""Main function"""
for x in fibonacci(10):
print x
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new snippet 'python/generators' (Fibonacci).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def fibonacci(max_value):
"""Fibonacci series
The sum of two elements defines the next."""
a, b = 0, 1
while b < max_value:
yield b
a, b = b, a+b
def main():
"""Main function"""
for x in fibonacci(10):
print x
if __name__ == '__main__':
main()
|
Add a new snippet 'python/generators' (Fibonacci).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def fibonacci(max_value):
"""Fibonacci series
The sum of two elements defines the next."""
a, b = 0, 1
while b < max_value:
yield b
a, b = b, a+b
def main():
"""Main function"""
for x in fibonacci(10):
print x
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a new snippet 'python/generators' (Fibonacci).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def fibonacci(max_value):
"""Fibonacci series
The sum of two elements defines the next."""
a, b = 0, 1
while b < max_value:
yield b
a, b = b, a+b
def main():
"""Main function"""
for x in fibonacci(10):
print x
if __name__ == '__main__':
main()
|
|
50dd055975732755f2bcb9424bd2d5df9e1ae87a
|
kboard/core/tests/test_context_processors.py
|
kboard/core/tests/test_context_processors.py
|
from django.test import TestCase
from django.http import HttpRequest
from core.context_processors import navbar
from board.models import Board
class TestNavbarContextProcessor(TestCase):
def test_return_board_list_correctly(self):
test_board = Board.objects.create(
slug='slug',
name='board'
)
request = HttpRequest()
response = navbar(request)
boards = response['boards']
self.assertEqual(boards[0].slug, test_board.slug)
self.assertEqual(boards[0].name, test_board.name)
|
Add unit test of navbar context processor
|
Add unit test of navbar context processor
|
Python
|
mit
|
guswnsxodlf/k-board,hyesun03/k-board,darjeeling/k-board,cjh5414/kboard,hyesun03/k-board,guswnsxodlf/k-board,cjh5414/kboard,cjh5414/kboard,kboard/kboard,guswnsxodlf/k-board,kboard/kboard,kboard/kboard,hyesun03/k-board
|
Add unit test of navbar context processor
|
from django.test import TestCase
from django.http import HttpRequest
from core.context_processors import navbar
from board.models import Board
class TestNavbarContextProcessor(TestCase):
def test_return_board_list_correctly(self):
test_board = Board.objects.create(
slug='slug',
name='board'
)
request = HttpRequest()
response = navbar(request)
boards = response['boards']
self.assertEqual(boards[0].slug, test_board.slug)
self.assertEqual(boards[0].name, test_board.name)
|
<commit_before><commit_msg>Add unit test of navbar context processor<commit_after>
|
from django.test import TestCase
from django.http import HttpRequest
from core.context_processors import navbar
from board.models import Board
class TestNavbarContextProcessor(TestCase):
def test_return_board_list_correctly(self):
test_board = Board.objects.create(
slug='slug',
name='board'
)
request = HttpRequest()
response = navbar(request)
boards = response['boards']
self.assertEqual(boards[0].slug, test_board.slug)
self.assertEqual(boards[0].name, test_board.name)
|
Add unit test of navbar context processorfrom django.test import TestCase
from django.http import HttpRequest
from core.context_processors import navbar
from board.models import Board
class TestNavbarContextProcessor(TestCase):
def test_return_board_list_correctly(self):
test_board = Board.objects.create(
slug='slug',
name='board'
)
request = HttpRequest()
response = navbar(request)
boards = response['boards']
self.assertEqual(boards[0].slug, test_board.slug)
self.assertEqual(boards[0].name, test_board.name)
|
<commit_before><commit_msg>Add unit test of navbar context processor<commit_after>from django.test import TestCase
from django.http import HttpRequest
from core.context_processors import navbar
from board.models import Board
class TestNavbarContextProcessor(TestCase):
def test_return_board_list_correctly(self):
test_board = Board.objects.create(
slug='slug',
name='board'
)
request = HttpRequest()
response = navbar(request)
boards = response['boards']
self.assertEqual(boards[0].slug, test_board.slug)
self.assertEqual(boards[0].name, test_board.name)
|
|
b0a4a44d7c69875dc5890a8c62a25a8ae3c7900f
|
openfisca_core/scripts/find_placeholders.py
|
openfisca_core/scripts/find_placeholders.py
|
# -*- coding: utf-8 -*-
import os
import fnmatch
import sys
from bs4 import BeautifulSoup
def find_param_files(input_dir):
param_files = []
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, '*.xml'):
param_files.append(os.path.join(root, filename))
return param_files
def find_placeholders(filename_input):
with open(filename_input, 'r') as f:
xml_content = f.read()
xml_parsed = BeautifulSoup(xml_content, "lxml-xml")
placeholders = xml_parsed.find_all('PLACEHOLDER')
output_list = []
for placeholder in placeholders:
parent_list = list(placeholder.parents)[:-1]
path = '.'.join([p.attrs['code'] for p in parent_list if 'code' in p.attrs][::-1])
deb = placeholder.attrs['deb']
output_list.append((deb, path))
output_list = sorted(output_list, key = lambda x: x[0])
return output_list
if __name__ == "__main__":
print('''find_placeholders.py : Find nodes PLACEHOLDER in xml parameter files
Usage :
python find_placeholders /dir/to/search
''')
assert(len(sys.argv) == 2)
input_dir = sys.argv[1]
param_files = find_param_files(input_dir)
for filename_input in param_files:
output_list = find_placeholders(filename_input)
print('File {}'.format(filename_input))
for deb, path in output_list:
print('{} {}'.format(deb, path))
print('\n')
|
Add a script to find placeholers in legislation
|
Add a script to find placeholers in legislation
This script requires BeautifulSoup (bs4)
|
Python
|
agpl-3.0
|
openfisca/openfisca-core,openfisca/openfisca-core
|
Add a script to find placeholers in legislation
This script requires BeautifulSoup (bs4)
|
# -*- coding: utf-8 -*-
import os
import fnmatch
import sys
from bs4 import BeautifulSoup
def find_param_files(input_dir):
param_files = []
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, '*.xml'):
param_files.append(os.path.join(root, filename))
return param_files
def find_placeholders(filename_input):
with open(filename_input, 'r') as f:
xml_content = f.read()
xml_parsed = BeautifulSoup(xml_content, "lxml-xml")
placeholders = xml_parsed.find_all('PLACEHOLDER')
output_list = []
for placeholder in placeholders:
parent_list = list(placeholder.parents)[:-1]
path = '.'.join([p.attrs['code'] for p in parent_list if 'code' in p.attrs][::-1])
deb = placeholder.attrs['deb']
output_list.append((deb, path))
output_list = sorted(output_list, key = lambda x: x[0])
return output_list
if __name__ == "__main__":
print('''find_placeholders.py : Find nodes PLACEHOLDER in xml parameter files
Usage :
python find_placeholders /dir/to/search
''')
assert(len(sys.argv) == 2)
input_dir = sys.argv[1]
param_files = find_param_files(input_dir)
for filename_input in param_files:
output_list = find_placeholders(filename_input)
print('File {}'.format(filename_input))
for deb, path in output_list:
print('{} {}'.format(deb, path))
print('\n')
|
<commit_before><commit_msg>Add a script to find placeholers in legislation
This script requires BeautifulSoup (bs4)<commit_after>
|
# -*- coding: utf-8 -*-
import os
import fnmatch
import sys
from bs4 import BeautifulSoup
def find_param_files(input_dir):
param_files = []
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, '*.xml'):
param_files.append(os.path.join(root, filename))
return param_files
def find_placeholders(filename_input):
with open(filename_input, 'r') as f:
xml_content = f.read()
xml_parsed = BeautifulSoup(xml_content, "lxml-xml")
placeholders = xml_parsed.find_all('PLACEHOLDER')
output_list = []
for placeholder in placeholders:
parent_list = list(placeholder.parents)[:-1]
path = '.'.join([p.attrs['code'] for p in parent_list if 'code' in p.attrs][::-1])
deb = placeholder.attrs['deb']
output_list.append((deb, path))
output_list = sorted(output_list, key = lambda x: x[0])
return output_list
if __name__ == "__main__":
print('''find_placeholders.py : Find nodes PLACEHOLDER in xml parameter files
Usage :
python find_placeholders /dir/to/search
''')
assert(len(sys.argv) == 2)
input_dir = sys.argv[1]
param_files = find_param_files(input_dir)
for filename_input in param_files:
output_list = find_placeholders(filename_input)
print('File {}'.format(filename_input))
for deb, path in output_list:
print('{} {}'.format(deb, path))
print('\n')
|
Add a script to find placeholers in legislation
This script requires BeautifulSoup (bs4)# -*- coding: utf-8 -*-
import os
import fnmatch
import sys
from bs4 import BeautifulSoup
def find_param_files(input_dir):
param_files = []
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, '*.xml'):
param_files.append(os.path.join(root, filename))
return param_files
def find_placeholders(filename_input):
with open(filename_input, 'r') as f:
xml_content = f.read()
xml_parsed = BeautifulSoup(xml_content, "lxml-xml")
placeholders = xml_parsed.find_all('PLACEHOLDER')
output_list = []
for placeholder in placeholders:
parent_list = list(placeholder.parents)[:-1]
path = '.'.join([p.attrs['code'] for p in parent_list if 'code' in p.attrs][::-1])
deb = placeholder.attrs['deb']
output_list.append((deb, path))
output_list = sorted(output_list, key = lambda x: x[0])
return output_list
if __name__ == "__main__":
print('''find_placeholders.py : Find nodes PLACEHOLDER in xml parameter files
Usage :
python find_placeholders /dir/to/search
''')
assert(len(sys.argv) == 2)
input_dir = sys.argv[1]
param_files = find_param_files(input_dir)
for filename_input in param_files:
output_list = find_placeholders(filename_input)
print('File {}'.format(filename_input))
for deb, path in output_list:
print('{} {}'.format(deb, path))
print('\n')
|
<commit_before><commit_msg>Add a script to find placeholers in legislation
This script requires BeautifulSoup (bs4)<commit_after># -*- coding: utf-8 -*-
import os
import fnmatch
import sys
from bs4 import BeautifulSoup
def find_param_files(input_dir):
param_files = []
for root, dirnames, filenames in os.walk(input_dir):
for filename in fnmatch.filter(filenames, '*.xml'):
param_files.append(os.path.join(root, filename))
return param_files
def find_placeholders(filename_input):
with open(filename_input, 'r') as f:
xml_content = f.read()
xml_parsed = BeautifulSoup(xml_content, "lxml-xml")
placeholders = xml_parsed.find_all('PLACEHOLDER')
output_list = []
for placeholder in placeholders:
parent_list = list(placeholder.parents)[:-1]
path = '.'.join([p.attrs['code'] for p in parent_list if 'code' in p.attrs][::-1])
deb = placeholder.attrs['deb']
output_list.append((deb, path))
output_list = sorted(output_list, key = lambda x: x[0])
return output_list
if __name__ == "__main__":
print('''find_placeholders.py : Find nodes PLACEHOLDER in xml parameter files
Usage :
python find_placeholders /dir/to/search
''')
assert(len(sys.argv) == 2)
input_dir = sys.argv[1]
param_files = find_param_files(input_dir)
for filename_input in param_files:
output_list = find_placeholders(filename_input)
print('File {}'.format(filename_input))
for deb, path in output_list:
print('{} {}'.format(deb, path))
print('\n')
|
|
31a453c2fe3a668f4c826a2b5bced7ee50f58f2e
|
samples/http2/simple_server.py
|
samples/http2/simple_server.py
|
"""A simple HTTP/2 file server."""
import asyncio
import logging
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import os.path
import urllib.parse
from http import HTTPStatus
from http2 import HttpError, Protocol
LOG = logging.getLogger(__name__)
class Handler:
def __init__(self, root_path=os.path.curdir, *, loop=None):
self.root_path = root_path
self.executor = ThreadPoolExecutor(max_workers=8)
self.loop = loop or asyncio.get_event_loop()
async def __call__(self, request, response):
try:
path = request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
path = urllib.parse.unquote(path.decode('ascii'))
assert path.startswith('/')
local_path = os.path.join(self.root_path, path[1:])
if not os.path.isfile(local_path):
raise HttpError(HTTPStatus.NOT_FOUND)
LOG.info('GET %s', path)
with open(local_path, 'rb') as data:
contents = await self.loop.run_in_executor(
self.executor, data.read)
response.headers[b':status'] = b'200'
await response.write(contents)
await response.close()
except HttpError:
raise
except Exception:
LOG.exception('error when processing request')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
def main(argv):
if len(argv) < 2 or argv[1] == '-h':
print('Usage: %s [-h] port')
return 0
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
handler = Handler()
server = loop.run_until_complete(loop.create_server(
lambda: Protocol(lambda: handler),
'0.0.0.0',
int(argv[1]),
))
print('Serving on port %s' % argv[1])
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add a simple HTTP/2 file server for testing
|
Add a simple HTTP/2 file server for testing
|
Python
|
mit
|
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
|
Add a simple HTTP/2 file server for testing
|
"""A simple HTTP/2 file server."""
import asyncio
import logging
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import os.path
import urllib.parse
from http import HTTPStatus
from http2 import HttpError, Protocol
LOG = logging.getLogger(__name__)
class Handler:
def __init__(self, root_path=os.path.curdir, *, loop=None):
self.root_path = root_path
self.executor = ThreadPoolExecutor(max_workers=8)
self.loop = loop or asyncio.get_event_loop()
async def __call__(self, request, response):
try:
path = request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
path = urllib.parse.unquote(path.decode('ascii'))
assert path.startswith('/')
local_path = os.path.join(self.root_path, path[1:])
if not os.path.isfile(local_path):
raise HttpError(HTTPStatus.NOT_FOUND)
LOG.info('GET %s', path)
with open(local_path, 'rb') as data:
contents = await self.loop.run_in_executor(
self.executor, data.read)
response.headers[b':status'] = b'200'
await response.write(contents)
await response.close()
except HttpError:
raise
except Exception:
LOG.exception('error when processing request')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
def main(argv):
if len(argv) < 2 or argv[1] == '-h':
print('Usage: %s [-h] port')
return 0
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
handler = Handler()
server = loop.run_until_complete(loop.create_server(
lambda: Protocol(lambda: handler),
'0.0.0.0',
int(argv[1]),
))
print('Serving on port %s' % argv[1])
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add a simple HTTP/2 file server for testing<commit_after>
|
"""A simple HTTP/2 file server."""
import asyncio
import logging
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import os.path
import urllib.parse
from http import HTTPStatus
from http2 import HttpError, Protocol
LOG = logging.getLogger(__name__)
class Handler:
def __init__(self, root_path=os.path.curdir, *, loop=None):
self.root_path = root_path
self.executor = ThreadPoolExecutor(max_workers=8)
self.loop = loop or asyncio.get_event_loop()
async def __call__(self, request, response):
try:
path = request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
path = urllib.parse.unquote(path.decode('ascii'))
assert path.startswith('/')
local_path = os.path.join(self.root_path, path[1:])
if not os.path.isfile(local_path):
raise HttpError(HTTPStatus.NOT_FOUND)
LOG.info('GET %s', path)
with open(local_path, 'rb') as data:
contents = await self.loop.run_in_executor(
self.executor, data.read)
response.headers[b':status'] = b'200'
await response.write(contents)
await response.close()
except HttpError:
raise
except Exception:
LOG.exception('error when processing request')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
def main(argv):
if len(argv) < 2 or argv[1] == '-h':
print('Usage: %s [-h] port')
return 0
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
handler = Handler()
server = loop.run_until_complete(loop.create_server(
lambda: Protocol(lambda: handler),
'0.0.0.0',
int(argv[1]),
))
print('Serving on port %s' % argv[1])
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add a simple HTTP/2 file server for testing"""A simple HTTP/2 file server."""
import asyncio
import logging
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import os.path
import urllib.parse
from http import HTTPStatus
from http2 import HttpError, Protocol
LOG = logging.getLogger(__name__)
class Handler:
def __init__(self, root_path=os.path.curdir, *, loop=None):
self.root_path = root_path
self.executor = ThreadPoolExecutor(max_workers=8)
self.loop = loop or asyncio.get_event_loop()
async def __call__(self, request, response):
try:
path = request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
path = urllib.parse.unquote(path.decode('ascii'))
assert path.startswith('/')
local_path = os.path.join(self.root_path, path[1:])
if not os.path.isfile(local_path):
raise HttpError(HTTPStatus.NOT_FOUND)
LOG.info('GET %s', path)
with open(local_path, 'rb') as data:
contents = await self.loop.run_in_executor(
self.executor, data.read)
response.headers[b':status'] = b'200'
await response.write(contents)
await response.close()
except HttpError:
raise
except Exception:
LOG.exception('error when processing request')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
def main(argv):
if len(argv) < 2 or argv[1] == '-h':
print('Usage: %s [-h] port')
return 0
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
handler = Handler()
server = loop.run_until_complete(loop.create_server(
lambda: Protocol(lambda: handler),
'0.0.0.0',
int(argv[1]),
))
print('Serving on port %s' % argv[1])
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add a simple HTTP/2 file server for testing<commit_after>"""A simple HTTP/2 file server."""
import asyncio
import logging
import sys
from concurrent.futures.thread import ThreadPoolExecutor
import os.path
import urllib.parse
from http import HTTPStatus
from http2 import HttpError, Protocol
LOG = logging.getLogger(__name__)
class Handler:
def __init__(self, root_path=os.path.curdir, *, loop=None):
self.root_path = root_path
self.executor = ThreadPoolExecutor(max_workers=8)
self.loop = loop or asyncio.get_event_loop()
async def __call__(self, request, response):
try:
path = request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
path = urllib.parse.unquote(path.decode('ascii'))
assert path.startswith('/')
local_path = os.path.join(self.root_path, path[1:])
if not os.path.isfile(local_path):
raise HttpError(HTTPStatus.NOT_FOUND)
LOG.info('GET %s', path)
with open(local_path, 'rb') as data:
contents = await self.loop.run_in_executor(
self.executor, data.read)
response.headers[b':status'] = b'200'
await response.write(contents)
await response.close()
except HttpError:
raise
except Exception:
LOG.exception('error when processing request')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
def main(argv):
if len(argv) < 2 or argv[1] == '-h':
print('Usage: %s [-h] port')
return 0
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
handler = Handler()
server = loop.run_until_complete(loop.create_server(
lambda: Protocol(lambda: handler),
'0.0.0.0',
int(argv[1]),
))
print('Serving on port %s' % argv[1])
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
bfe38efc63ead69eb57c74b882f4072dd80cd469
|
alert.py
|
alert.py
|
class Alert:
def __init__(self, description, rule, action):
self.description = description
self.rule = rule
self.action = action
self.exchange = None
def connect(self, exchange):
self.exchange = exchange
dependent_stocks = self.rule.depends_on()
for stock in dependent_stocks:
exchange[stock].updated.connect(self.check_rule)
def check_rule(self, stock):
if self.rule.matches(self.exchange):
self.action.execute(self.description)
|
Add Alert class as well as connect and check_rule methods.
|
Add Alert class as well as connect and check_rule methods.
|
Python
|
mit
|
bsmukasa/stock_alerter
|
Add Alert class as well as connect and check_rule methods.
|
class Alert:
def __init__(self, description, rule, action):
self.description = description
self.rule = rule
self.action = action
self.exchange = None
def connect(self, exchange):
self.exchange = exchange
dependent_stocks = self.rule.depends_on()
for stock in dependent_stocks:
exchange[stock].updated.connect(self.check_rule)
def check_rule(self, stock):
if self.rule.matches(self.exchange):
self.action.execute(self.description)
|
<commit_before><commit_msg>Add Alert class as well as connect and check_rule methods.<commit_after>
|
class Alert:
def __init__(self, description, rule, action):
self.description = description
self.rule = rule
self.action = action
self.exchange = None
def connect(self, exchange):
self.exchange = exchange
dependent_stocks = self.rule.depends_on()
for stock in dependent_stocks:
exchange[stock].updated.connect(self.check_rule)
def check_rule(self, stock):
if self.rule.matches(self.exchange):
self.action.execute(self.description)
|
Add Alert class as well as connect and check_rule methods.class Alert:
def __init__(self, description, rule, action):
self.description = description
self.rule = rule
self.action = action
self.exchange = None
def connect(self, exchange):
self.exchange = exchange
dependent_stocks = self.rule.depends_on()
for stock in dependent_stocks:
exchange[stock].updated.connect(self.check_rule)
def check_rule(self, stock):
if self.rule.matches(self.exchange):
self.action.execute(self.description)
|
<commit_before><commit_msg>Add Alert class as well as connect and check_rule methods.<commit_after>class Alert:
def __init__(self, description, rule, action):
self.description = description
self.rule = rule
self.action = action
self.exchange = None
def connect(self, exchange):
self.exchange = exchange
dependent_stocks = self.rule.depends_on()
for stock in dependent_stocks:
exchange[stock].updated.connect(self.check_rule)
def check_rule(self, stock):
if self.rule.matches(self.exchange):
self.action.execute(self.description)
|
|
52467da69ebab6f2884bfd791d7e18bd4e593266
|
aovek.py
|
aovek.py
|
import argparse
import json
import tensorflow as tf
from aovek.preprocess.download_dataset import download_dataset
from aovek.preprocess.data_processing import DataProcessing
from aovek.training.train import Train
from aovek.visualization.predict import Predict
from aovek.validate.eval_metrics import EvalMetrics
parser = argparse.ArgumentParser(description='4ovek')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-dataset_download', help='Download dataset.',
action='store_true')
optional.add_argument('-processes_dataset', help='Processes dataset.',
action='store_true')
optional.add_argument('-train', help='Train convolutional neural network.',
action='store_true')
optional.add_argument('-predict', help='Make predictions for entire dataset.',
action='store_true')
optional.add_argument('-evaluate', help='Evaluate trained model.',
action='store_true')
required.add_argument('-config_file', help='Path to config file.',
required=True)
def dataset_download(config):
download_dataset(config)
def processes_dataset(config):
dp = DataProcessing(config)
dp.set_normalizer(1)
dp.pickle_dataset()
print('Taken time: {}'.format(str(dp.get_time())))
def train(config):
config['network']['model_binary_data_file'] = './model/54/model.h5'
config['network']['json_model_structure'] = './model/54/model.json'
train = Train(config)
def predict(config):
with tf.Session():
predict = Predict(config)
predict.make_predictions_for_optimizers()
def evaluate(config):
with tf.Session():
eval_metrics = EvalMetrics(config)
eval_metrics.eval_pickles_metrics()
if __name__ == '__main__':
args = parser.parse_args()
config_file = args.config_file
with open(config_file) as c_f:
config = json.load(c_f)
if args.dataset_download:
dataset_download(config)
elif args.processes_dataset:
processes_dataset(config)
elif args.train:
train(config)
elif args.predict:
predict(config)
elif args.evaluate:
evaluate(config)
|
Add file for control person detection
|
Add file for control person detection
|
Python
|
mit
|
nikolaystanishev/traffic-sign-recognition
|
Add file for control person detection
|
import argparse
import json
import tensorflow as tf
from aovek.preprocess.download_dataset import download_dataset
from aovek.preprocess.data_processing import DataProcessing
from aovek.training.train import Train
from aovek.visualization.predict import Predict
from aovek.validate.eval_metrics import EvalMetrics
parser = argparse.ArgumentParser(description='4ovek')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-dataset_download', help='Download dataset.',
action='store_true')
optional.add_argument('-processes_dataset', help='Processes dataset.',
action='store_true')
optional.add_argument('-train', help='Train convolutional neural network.',
action='store_true')
optional.add_argument('-predict', help='Make predictions for entire dataset.',
action='store_true')
optional.add_argument('-evaluate', help='Evaluate trained model.',
action='store_true')
required.add_argument('-config_file', help='Path to config file.',
required=True)
def dataset_download(config):
download_dataset(config)
def processes_dataset(config):
dp = DataProcessing(config)
dp.set_normalizer(1)
dp.pickle_dataset()
print('Taken time: {}'.format(str(dp.get_time())))
def train(config):
config['network']['model_binary_data_file'] = './model/54/model.h5'
config['network']['json_model_structure'] = './model/54/model.json'
train = Train(config)
def predict(config):
with tf.Session():
predict = Predict(config)
predict.make_predictions_for_optimizers()
def evaluate(config):
with tf.Session():
eval_metrics = EvalMetrics(config)
eval_metrics.eval_pickles_metrics()
if __name__ == '__main__':
args = parser.parse_args()
config_file = args.config_file
with open(config_file) as c_f:
config = json.load(c_f)
if args.dataset_download:
dataset_download(config)
elif args.processes_dataset:
processes_dataset(config)
elif args.train:
train(config)
elif args.predict:
predict(config)
elif args.evaluate:
evaluate(config)
|
<commit_before><commit_msg>Add file for control person detection<commit_after>
|
import argparse
import json
import tensorflow as tf
from aovek.preprocess.download_dataset import download_dataset
from aovek.preprocess.data_processing import DataProcessing
from aovek.training.train import Train
from aovek.visualization.predict import Predict
from aovek.validate.eval_metrics import EvalMetrics
parser = argparse.ArgumentParser(description='4ovek')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-dataset_download', help='Download dataset.',
action='store_true')
optional.add_argument('-processes_dataset', help='Processes dataset.',
action='store_true')
optional.add_argument('-train', help='Train convolutional neural network.',
action='store_true')
optional.add_argument('-predict', help='Make predictions for entire dataset.',
action='store_true')
optional.add_argument('-evaluate', help='Evaluate trained model.',
action='store_true')
required.add_argument('-config_file', help='Path to config file.',
required=True)
def dataset_download(config):
download_dataset(config)
def processes_dataset(config):
dp = DataProcessing(config)
dp.set_normalizer(1)
dp.pickle_dataset()
print('Taken time: {}'.format(str(dp.get_time())))
def train(config):
config['network']['model_binary_data_file'] = './model/54/model.h5'
config['network']['json_model_structure'] = './model/54/model.json'
train = Train(config)
def predict(config):
with tf.Session():
predict = Predict(config)
predict.make_predictions_for_optimizers()
def evaluate(config):
with tf.Session():
eval_metrics = EvalMetrics(config)
eval_metrics.eval_pickles_metrics()
if __name__ == '__main__':
args = parser.parse_args()
config_file = args.config_file
with open(config_file) as c_f:
config = json.load(c_f)
if args.dataset_download:
dataset_download(config)
elif args.processes_dataset:
processes_dataset(config)
elif args.train:
train(config)
elif args.predict:
predict(config)
elif args.evaluate:
evaluate(config)
|
Add file for control person detectionimport argparse
import json
import tensorflow as tf
from aovek.preprocess.download_dataset import download_dataset
from aovek.preprocess.data_processing import DataProcessing
from aovek.training.train import Train
from aovek.visualization.predict import Predict
from aovek.validate.eval_metrics import EvalMetrics
parser = argparse.ArgumentParser(description='4ovek')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-dataset_download', help='Download dataset.',
action='store_true')
optional.add_argument('-processes_dataset', help='Processes dataset.',
action='store_true')
optional.add_argument('-train', help='Train convolutional neural network.',
action='store_true')
optional.add_argument('-predict', help='Make predictions for entire dataset.',
action='store_true')
optional.add_argument('-evaluate', help='Evaluate trained model.',
action='store_true')
required.add_argument('-config_file', help='Path to config file.',
required=True)
def dataset_download(config):
download_dataset(config)
def processes_dataset(config):
dp = DataProcessing(config)
dp.set_normalizer(1)
dp.pickle_dataset()
print('Taken time: {}'.format(str(dp.get_time())))
def train(config):
config['network']['model_binary_data_file'] = './model/54/model.h5'
config['network']['json_model_structure'] = './model/54/model.json'
train = Train(config)
def predict(config):
with tf.Session():
predict = Predict(config)
predict.make_predictions_for_optimizers()
def evaluate(config):
with tf.Session():
eval_metrics = EvalMetrics(config)
eval_metrics.eval_pickles_metrics()
if __name__ == '__main__':
args = parser.parse_args()
config_file = args.config_file
with open(config_file) as c_f:
config = json.load(c_f)
if args.dataset_download:
dataset_download(config)
elif args.processes_dataset:
processes_dataset(config)
elif args.train:
train(config)
elif args.predict:
predict(config)
elif args.evaluate:
evaluate(config)
|
<commit_before><commit_msg>Add file for control person detection<commit_after>import argparse
import json
import tensorflow as tf
from aovek.preprocess.download_dataset import download_dataset
from aovek.preprocess.data_processing import DataProcessing
from aovek.training.train import Train
from aovek.visualization.predict import Predict
from aovek.validate.eval_metrics import EvalMetrics
parser = argparse.ArgumentParser(description='4ovek')
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
optional.add_argument('-dataset_download', help='Download dataset.',
action='store_true')
optional.add_argument('-processes_dataset', help='Processes dataset.',
action='store_true')
optional.add_argument('-train', help='Train convolutional neural network.',
action='store_true')
optional.add_argument('-predict', help='Make predictions for entire dataset.',
action='store_true')
optional.add_argument('-evaluate', help='Evaluate trained model.',
action='store_true')
required.add_argument('-config_file', help='Path to config file.',
required=True)
def dataset_download(config):
download_dataset(config)
def processes_dataset(config):
dp = DataProcessing(config)
dp.set_normalizer(1)
dp.pickle_dataset()
print('Taken time: {}'.format(str(dp.get_time())))
def train(config):
config['network']['model_binary_data_file'] = './model/54/model.h5'
config['network']['json_model_structure'] = './model/54/model.json'
train = Train(config)
def predict(config):
with tf.Session():
predict = Predict(config)
predict.make_predictions_for_optimizers()
def evaluate(config):
with tf.Session():
eval_metrics = EvalMetrics(config)
eval_metrics.eval_pickles_metrics()
if __name__ == '__main__':
args = parser.parse_args()
config_file = args.config_file
with open(config_file) as c_f:
config = json.load(c_f)
if args.dataset_download:
dataset_download(config)
elif args.processes_dataset:
processes_dataset(config)
elif args.train:
train(config)
elif args.predict:
predict(config)
elif args.evaluate:
evaluate(config)
|
|
a694c8449f63cbe37ceadc98a5836cfaff8fa992
|
tests/RemoveEpsilonRules/Reverse/OverTwoRuleTest.py
|
tests/RemoveEpsilonRules/Reverse/OverTwoRuleTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree, InverseContextFree
from pyparsers import cyk
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0A(Rule): rule = ([S], [0, A])
class RuleABC(Rule): rule = ([A], [B, C])
class RuleBC(Rule): rule = ([B], [C])
class RuleCEps(Rule): rule = ([C], [EPS])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTestoverUnitRule(TestCase):
def test_simpleTestOverUnitRule(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0A, RuleABC, RuleBC, RuleCEps],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0A)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
a = s.to_rule.to_symbols[1]
self.assertIsInstance(a, A)
self.assertIsInstance(a.to_rule, RuleABC)
c = a.to_rule.to_symbols[1]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
b = a.to_rule.to_symbols[0]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBC)
c = b.to_rule.to_symbols[0]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
if __name__ == '__main__':
main()
|
Add more complicated test of epsilon rules restoration
|
Add more complicated test of epsilon rules restoration
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add more complicated test of epsilon rules restoration
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree, InverseContextFree
from pyparsers import cyk
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0A(Rule): rule = ([S], [0, A])
class RuleABC(Rule): rule = ([A], [B, C])
class RuleBC(Rule): rule = ([B], [C])
class RuleCEps(Rule): rule = ([C], [EPS])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTestoverUnitRule(TestCase):
def test_simpleTestOverUnitRule(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0A, RuleABC, RuleBC, RuleCEps],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0A)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
a = s.to_rule.to_symbols[1]
self.assertIsInstance(a, A)
self.assertIsInstance(a.to_rule, RuleABC)
c = a.to_rule.to_symbols[1]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
b = a.to_rule.to_symbols[0]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBC)
c = b.to_rule.to_symbols[0]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add more complicated test of epsilon rules restoration<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree, InverseContextFree
from pyparsers import cyk
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0A(Rule): rule = ([S], [0, A])
class RuleABC(Rule): rule = ([A], [B, C])
class RuleBC(Rule): rule = ([B], [C])
class RuleCEps(Rule): rule = ([C], [EPS])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTestoverUnitRule(TestCase):
def test_simpleTestOverUnitRule(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0A, RuleABC, RuleBC, RuleCEps],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0A)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
a = s.to_rule.to_symbols[1]
self.assertIsInstance(a, A)
self.assertIsInstance(a.to_rule, RuleABC)
c = a.to_rule.to_symbols[1]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
b = a.to_rule.to_symbols[0]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBC)
c = b.to_rule.to_symbols[0]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
if __name__ == '__main__':
main()
|
Add more complicated test of epsilon rules restoration#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree, InverseContextFree
from pyparsers import cyk
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0A(Rule): rule = ([S], [0, A])
class RuleABC(Rule): rule = ([A], [B, C])
class RuleBC(Rule): rule = ([B], [C])
class RuleCEps(Rule): rule = ([C], [EPS])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTestoverUnitRule(TestCase):
def test_simpleTestOverUnitRule(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0A, RuleABC, RuleBC, RuleCEps],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0A)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
a = s.to_rule.to_symbols[1]
self.assertIsInstance(a, A)
self.assertIsInstance(a.to_rule, RuleABC)
c = a.to_rule.to_symbols[1]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
b = a.to_rule.to_symbols[0]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBC)
c = b.to_rule.to_symbols[0]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add more complicated test of epsilon rules restoration<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 16:01
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree, InverseContextFree
from pyparsers import cyk
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class RuleS0A(Rule): rule = ([S], [0, A])
class RuleABC(Rule): rule = ([A], [B, C])
class RuleBC(Rule): rule = ([B], [C])
class RuleCEps(Rule): rule = ([C], [EPS])
"""
S->1B A->1B A->eps B->eps B->1C C->11
ToEpsilon: A,B
S->1B A->1B A->eps B->eps B->1C C->11 S->1 A->1
------ ------ ++++ ++++
"""
class SimpleTestoverUnitRule(TestCase):
def test_simpleTestOverUnitRule(self):
g = Grammar(terminals=[0, 1, 2, 3],
nonterminals=[S, A, B, C],
rules=[RuleS0A, RuleABC, RuleBC, RuleCEps],
start_symbol=S)
gr = ContextFree.transform_to_chomsky_normal_form(ContextFree.remove_unit_rules(ContextFree.remove_rules_with_epsilon(g)))
pars = cyk(gr, [0])
s = InverseContextFree.epsilon_rules_restore(InverseContextFree.unit_rules_restore(InverseContextFree.transform_from_chomsky_normal_form(pars)))
self.assertIsInstance(s, S)
self.assertIsInstance(s.to_rule, RuleS0A)
self.assertIsInstance(s.to_rule.to_symbols[0], Terminal)
self.assertEqual(s.to_rule.to_symbols[0].s, 0)
a = s.to_rule.to_symbols[1]
self.assertIsInstance(a, A)
self.assertIsInstance(a.to_rule, RuleABC)
c = a.to_rule.to_symbols[1]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
b = a.to_rule.to_symbols[0]
self.assertIsInstance(b, B)
self.assertIsInstance(b.to_rule, RuleBC)
c = b.to_rule.to_symbols[0]
self.assertIsInstance(c, C)
self.assertIsInstance(c.to_rule, RuleCEps)
self.assertIs(c.to_rule.to_symbols[0], EPS)
if __name__ == '__main__':
main()
|
|
a6e92e2464a50a7c4159958e1b60c0aa2dea96a9
|
res/os_x_app_creation.py
|
res/os_x_app_creation.py
|
#! /usr/bin/env python
import os
import shutil
import subprocess
MAIN_WINDOW_UI_FILE = 'src/opencmiss/neon/ui/ui_mainwindow.py'
def remove_parent_of_menubar():
with open(MAIN_WINDOW_UI_FILE, 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('self.menubar = QtGui.QMenuBar(MainWindow)', 'self.menubar = QtGui.QMenuBar(None)')
f.write(c)
f.truncate()
def create_softlink_to_zinc(directory):
subprocess.call(['ln', '-s', directory, 'zinc'])
def execute_py2app_build():
subprocess.call(['python', 'setup.py2app.py', 'py2app'])
def rename_app():
shutil.move('neon.app', 'Neon.app')
shutil.move('Neon.app/Contents/MacOS/neon', 'Neon.app/Contents/MacOS/Neon')
with open('Neon.app/Contents/Info.plist', 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('neon', 'Neon')
f.write(c)
f.truncate()
def mv_app(destination):
target_location = os.path.join(destination, 'Neon.app')
if os.path.exists(target_location):
shutil.rmtree(target_location)
shutil.move('Neon.app', target_location)
def rm_build_dist():
shutil.rmtree('build')
shutil.rmtree('dist')
def rm_softlink():
os.remove('zinc')
def undo_code_change():
subprocess.call(['git', 'co', '--', MAIN_WINDOW_UI_FILE])
def main():
import opencmiss.zinc.context
directory = os.path.dirname(opencmiss.zinc.context.__file__)
base_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
destination = os.environ['HOME']
pwd = os.getcwd()
os.chdir(base_dir)
remove_parent_of_menubar()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
create_softlink_to_zinc(directory)
os.chdir(os.path.join(base_dir, 'src'))
execute_py2app_build()
os.chdir(os.path.join(base_dir, 'src', 'dist'))
rename_app()
mv_app(destination)
os.chdir(os.path.join(base_dir, 'src'))
rm_build_dist()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
rm_softlink()
os.chdir(base_dir)
undo_code_change()
os.chdir(pwd)
print('Created new Neon application at: ', os.path.join(destination, 'Neon.app'))
if __name__ == '__main__':
main()
|
Add script to automate the creation of the Neon app.
|
Add script to automate the creation of the Neon app.
|
Python
|
apache-2.0
|
alan-wu/neon
|
Add script to automate the creation of the Neon app.
|
#! /usr/bin/env python
import os
import shutil
import subprocess
MAIN_WINDOW_UI_FILE = 'src/opencmiss/neon/ui/ui_mainwindow.py'
def remove_parent_of_menubar():
with open(MAIN_WINDOW_UI_FILE, 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('self.menubar = QtGui.QMenuBar(MainWindow)', 'self.menubar = QtGui.QMenuBar(None)')
f.write(c)
f.truncate()
def create_softlink_to_zinc(directory):
subprocess.call(['ln', '-s', directory, 'zinc'])
def execute_py2app_build():
subprocess.call(['python', 'setup.py2app.py', 'py2app'])
def rename_app():
shutil.move('neon.app', 'Neon.app')
shutil.move('Neon.app/Contents/MacOS/neon', 'Neon.app/Contents/MacOS/Neon')
with open('Neon.app/Contents/Info.plist', 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('neon', 'Neon')
f.write(c)
f.truncate()
def mv_app(destination):
target_location = os.path.join(destination, 'Neon.app')
if os.path.exists(target_location):
shutil.rmtree(target_location)
shutil.move('Neon.app', target_location)
def rm_build_dist():
shutil.rmtree('build')
shutil.rmtree('dist')
def rm_softlink():
os.remove('zinc')
def undo_code_change():
subprocess.call(['git', 'co', '--', MAIN_WINDOW_UI_FILE])
def main():
import opencmiss.zinc.context
directory = os.path.dirname(opencmiss.zinc.context.__file__)
base_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
destination = os.environ['HOME']
pwd = os.getcwd()
os.chdir(base_dir)
remove_parent_of_menubar()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
create_softlink_to_zinc(directory)
os.chdir(os.path.join(base_dir, 'src'))
execute_py2app_build()
os.chdir(os.path.join(base_dir, 'src', 'dist'))
rename_app()
mv_app(destination)
os.chdir(os.path.join(base_dir, 'src'))
rm_build_dist()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
rm_softlink()
os.chdir(base_dir)
undo_code_change()
os.chdir(pwd)
print('Created new Neon application at: ', os.path.join(destination, 'Neon.app'))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to automate the creation of the Neon app.<commit_after>
|
#! /usr/bin/env python
import os
import shutil
import subprocess
MAIN_WINDOW_UI_FILE = 'src/opencmiss/neon/ui/ui_mainwindow.py'
def remove_parent_of_menubar():
with open(MAIN_WINDOW_UI_FILE, 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('self.menubar = QtGui.QMenuBar(MainWindow)', 'self.menubar = QtGui.QMenuBar(None)')
f.write(c)
f.truncate()
def create_softlink_to_zinc(directory):
subprocess.call(['ln', '-s', directory, 'zinc'])
def execute_py2app_build():
subprocess.call(['python', 'setup.py2app.py', 'py2app'])
def rename_app():
shutil.move('neon.app', 'Neon.app')
shutil.move('Neon.app/Contents/MacOS/neon', 'Neon.app/Contents/MacOS/Neon')
with open('Neon.app/Contents/Info.plist', 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('neon', 'Neon')
f.write(c)
f.truncate()
def mv_app(destination):
target_location = os.path.join(destination, 'Neon.app')
if os.path.exists(target_location):
shutil.rmtree(target_location)
shutil.move('Neon.app', target_location)
def rm_build_dist():
shutil.rmtree('build')
shutil.rmtree('dist')
def rm_softlink():
os.remove('zinc')
def undo_code_change():
subprocess.call(['git', 'co', '--', MAIN_WINDOW_UI_FILE])
def main():
import opencmiss.zinc.context
directory = os.path.dirname(opencmiss.zinc.context.__file__)
base_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
destination = os.environ['HOME']
pwd = os.getcwd()
os.chdir(base_dir)
remove_parent_of_menubar()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
create_softlink_to_zinc(directory)
os.chdir(os.path.join(base_dir, 'src'))
execute_py2app_build()
os.chdir(os.path.join(base_dir, 'src', 'dist'))
rename_app()
mv_app(destination)
os.chdir(os.path.join(base_dir, 'src'))
rm_build_dist()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
rm_softlink()
os.chdir(base_dir)
undo_code_change()
os.chdir(pwd)
print('Created new Neon application at: ', os.path.join(destination, 'Neon.app'))
if __name__ == '__main__':
main()
|
Add script to automate the creation of the Neon app.#! /usr/bin/env python
import os
import shutil
import subprocess
MAIN_WINDOW_UI_FILE = 'src/opencmiss/neon/ui/ui_mainwindow.py'
def remove_parent_of_menubar():
with open(MAIN_WINDOW_UI_FILE, 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('self.menubar = QtGui.QMenuBar(MainWindow)', 'self.menubar = QtGui.QMenuBar(None)')
f.write(c)
f.truncate()
def create_softlink_to_zinc(directory):
subprocess.call(['ln', '-s', directory, 'zinc'])
def execute_py2app_build():
subprocess.call(['python', 'setup.py2app.py', 'py2app'])
def rename_app():
shutil.move('neon.app', 'Neon.app')
shutil.move('Neon.app/Contents/MacOS/neon', 'Neon.app/Contents/MacOS/Neon')
with open('Neon.app/Contents/Info.plist', 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('neon', 'Neon')
f.write(c)
f.truncate()
def mv_app(destination):
target_location = os.path.join(destination, 'Neon.app')
if os.path.exists(target_location):
shutil.rmtree(target_location)
shutil.move('Neon.app', target_location)
def rm_build_dist():
shutil.rmtree('build')
shutil.rmtree('dist')
def rm_softlink():
os.remove('zinc')
def undo_code_change():
subprocess.call(['git', 'co', '--', MAIN_WINDOW_UI_FILE])
def main():
import opencmiss.zinc.context
directory = os.path.dirname(opencmiss.zinc.context.__file__)
base_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
destination = os.environ['HOME']
pwd = os.getcwd()
os.chdir(base_dir)
remove_parent_of_menubar()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
create_softlink_to_zinc(directory)
os.chdir(os.path.join(base_dir, 'src'))
execute_py2app_build()
os.chdir(os.path.join(base_dir, 'src', 'dist'))
rename_app()
mv_app(destination)
os.chdir(os.path.join(base_dir, 'src'))
rm_build_dist()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
rm_softlink()
os.chdir(base_dir)
undo_code_change()
os.chdir(pwd)
print('Created new Neon application at: ', os.path.join(destination, 'Neon.app'))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to automate the creation of the Neon app.<commit_after>#! /usr/bin/env python
import os
import shutil
import subprocess
MAIN_WINDOW_UI_FILE = 'src/opencmiss/neon/ui/ui_mainwindow.py'
def remove_parent_of_menubar():
with open(MAIN_WINDOW_UI_FILE, 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('self.menubar = QtGui.QMenuBar(MainWindow)', 'self.menubar = QtGui.QMenuBar(None)')
f.write(c)
f.truncate()
def create_softlink_to_zinc(directory):
subprocess.call(['ln', '-s', directory, 'zinc'])
def execute_py2app_build():
subprocess.call(['python', 'setup.py2app.py', 'py2app'])
def rename_app():
shutil.move('neon.app', 'Neon.app')
shutil.move('Neon.app/Contents/MacOS/neon', 'Neon.app/Contents/MacOS/Neon')
with open('Neon.app/Contents/Info.plist', 'r+') as f:
s = f.read()
f.seek(0)
c = s.replace('neon', 'Neon')
f.write(c)
f.truncate()
def mv_app(destination):
target_location = os.path.join(destination, 'Neon.app')
if os.path.exists(target_location):
shutil.rmtree(target_location)
shutil.move('Neon.app', target_location)
def rm_build_dist():
shutil.rmtree('build')
shutil.rmtree('dist')
def rm_softlink():
os.remove('zinc')
def undo_code_change():
subprocess.call(['git', 'co', '--', MAIN_WINDOW_UI_FILE])
def main():
import opencmiss.zinc.context
directory = os.path.dirname(opencmiss.zinc.context.__file__)
base_dir = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
destination = os.environ['HOME']
pwd = os.getcwd()
os.chdir(base_dir)
remove_parent_of_menubar()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
create_softlink_to_zinc(directory)
os.chdir(os.path.join(base_dir, 'src'))
execute_py2app_build()
os.chdir(os.path.join(base_dir, 'src', 'dist'))
rename_app()
mv_app(destination)
os.chdir(os.path.join(base_dir, 'src'))
rm_build_dist()
os.chdir(os.path.join(base_dir, 'src', 'opencmiss'))
rm_softlink()
os.chdir(base_dir)
undo_code_change()
os.chdir(pwd)
print('Created new Neon application at: ', os.path.join(destination, 'Neon.app'))
if __name__ == '__main__':
main()
|
|
519cb3ebf56c7841240779e170fb3bc657d87830
|
tempest/tests/test_list_tests.py
|
tempest/tests/test_list_tests.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
|
Add tempest unit test to verify the test list
|
Add tempest unit test to verify the test list
If there is an error in a test file it could potentially not get run
in the tox jobs used for gating. This commit adds a test which looks
for ImportFailures in the test list to ensure that all the test files
will get run in the gate.
Change-Id: Ia0a5831810d04f2201bd856039362b4a30f39319
|
Python
|
apache-2.0
|
FujitsuEnablingSoftwareTechnologyGmbH/tempest,hayderimran7/tempest,tonyli71/tempest,alinbalutoiu/tempest,xbezdick/tempest,danielmellado/tempest,openstack/tempest,eggmaster/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,xbezdick/tempest,bigswitch/tempest,afaheem88/tempest,eggmaster/tempest,pczerkas/tempest,redhat-cip/tempest,Vaidyanath/tempest,manasi24/jiocloud-tempest-qatempest,cloudbase/lis-tempest,manasi24/jiocloud-tempest-qatempest,cisco-openstack/tempest,hpcloud-mon/tempest,vedujoshi/os_tempest,hpcloud-mon/tempest,tudorvio/tempest,roopali8/tempest,armando-migliaccio/tempest,zsoltdudas/lis-tempest,rzarzynski/tempest,cloudbase/lis-tempest,jamielennox/tempest,BeenzSyed/tempest,akash1808/tempest,Mirantis/tempest,zsoltdudas/lis-tempest,tonyli71/tempest,Juraci/tempest,JioCloud/tempest,dkalashnik/tempest,vedujoshi/os_tempest,queria/my-tempest,vedujoshi/tempest,roopali8/tempest,bigswitch/tempest,vedujoshi/tempest,JioCloud/tempest,alinbalutoiu/tempest,hayderimran7/tempest,ntymtsiv/tempest,Lilywei123/tempest,yamt/tempest,Vaidyanath/tempest,masayukig/tempest,NexusIS/tempest,redhat-cip/tempest,vmahuli/tempest,varunarya10/tempest,CiscoSystems/tempest,sebrandon1/tempest,afaheem88/tempest_neutron,Lilywei123/tempest,izadorozhna/tempest,neerja28/Tempest,CiscoSystems/tempest,vmahuli/tempest,Tesora/tesora-tempest,afaheem88/tempest_neutron,adkerr/tempest,afaheem88/tempest,Juniper/tempest,flyingfish007/tempest,LIS/lis-tempest,LIS/lis-tempest,openstack/tempest,queria/my-tempest,manasi24/tempest,sebrandon1/tempest,dkalashnik/tempest,pandeyop/tempest,NexusIS/tempest,rakeshmi/tempest,varunarya10/tempest,rakeshmi/tempest,Mirantis/tempest,manasi24/tempest,ebagdasa/tempest,adkerr/tempest,rzarzynski/tempest,ntymtsiv/tempest,masayukig/tempest,nunogt/tempest,armando-migliaccio/tempest,BeenzSyed/tempest,tudorvio/tempest,jaspreetw/tempest,Juraci/tempest,cisco-openstack/tempest,izadorozhna/tempest,akash1808/tempest,yamt/tempest,neerja28/Tempest,pandeyop/tempest,Tesora/tesora-tempest,flyingfish007/tempest,jamielennox/tempest,danielmellado/tempest,nunogt/tempest,ebagdasa/tempest,pczerkas/tempest,jaspreetw/tempest,Juniper/tempest
|
Add tempest unit test to verify the test list
If there is an error in a test file it could potentially not get run
in the tox jobs used for gating. This commit adds a test which looks
for ImportFailures in the test list to ensure that all the test files
will get run in the gate.
Change-Id: Ia0a5831810d04f2201bd856039362b4a30f39319
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
|
<commit_before><commit_msg>Add tempest unit test to verify the test list
If there is an error in a test file it could potentially not get run
in the tox jobs used for gating. This commit adds a test which looks
for ImportFailures in the test list to ensure that all the test files
will get run in the gate.
Change-Id: Ia0a5831810d04f2201bd856039362b4a30f39319<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
|
Add tempest unit test to verify the test list
If there is an error in a test file it could potentially not get run
in the tox jobs used for gating. This commit adds a test which looks
for ImportFailures in the test list to ensure that all the test files
will get run in the gate.
Change-Id: Ia0a5831810d04f2201bd856039362b4a30f39319# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
|
<commit_before><commit_msg>Add tempest unit test to verify the test list
If there is an error in a test file it could potentially not get run
in the tox jobs used for gating. This commit adds a test which looks
for ImportFailures in the test list to ensure that all the test files
will get run in the gate.
Change-Id: Ia0a5831810d04f2201bd856039362b4a30f39319<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
|
|
0aee93c8730625ea0efa5a6ff1e48b29f0161dd5
|
buhmm/tests/test_misc.py
|
buhmm/tests/test_misc.py
|
from nose.tools import *
import numpy as np
import buhmm.misc as module
def test_logspace_int_smoke():
# Smoke test
x = module.logspace_int(2000, 50)
y = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11,
13, 14, 17, 19, 22, 25, 29, 33, 38, 43, 49,
56, 65, 74, 84, 96, 110, 125, 143, 164, 187, 213,
243, 277, 316, 361, 412, 470, 536, 612, 698, 796, 908,
1035, 1181, 1347, 1537, 1753, 1999])
np.testing.assert_allclose(x, y)
def test_logspace_int_corner():
# Corner cases
x = module.logspace_int(2000, 0)
np.testing.assert_allclose(x, [])
x = module.logspace_int(2000, 1)
np.testing.assert_allclose(x, [0])
# Must have nonzero limit
assert_raises(Exception, module.logspace_int, 0)
# Not enough integers
assert_raises(Exception, module.logspace_int, 3, 10)
|
Add unit tests for logspace_int.
|
Add unit tests for logspace_int.
|
Python
|
mit
|
chebee7i/buhmm
|
Add unit tests for logspace_int.
|
from nose.tools import *
import numpy as np
import buhmm.misc as module
def test_logspace_int_smoke():
# Smoke test
x = module.logspace_int(2000, 50)
y = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11,
13, 14, 17, 19, 22, 25, 29, 33, 38, 43, 49,
56, 65, 74, 84, 96, 110, 125, 143, 164, 187, 213,
243, 277, 316, 361, 412, 470, 536, 612, 698, 796, 908,
1035, 1181, 1347, 1537, 1753, 1999])
np.testing.assert_allclose(x, y)
def test_logspace_int_corner():
# Corner cases
x = module.logspace_int(2000, 0)
np.testing.assert_allclose(x, [])
x = module.logspace_int(2000, 1)
np.testing.assert_allclose(x, [0])
# Must have nonzero limit
assert_raises(Exception, module.logspace_int, 0)
# Not enough integers
assert_raises(Exception, module.logspace_int, 3, 10)
|
<commit_before><commit_msg>Add unit tests for logspace_int.<commit_after>
|
from nose.tools import *
import numpy as np
import buhmm.misc as module
def test_logspace_int_smoke():
# Smoke test
x = module.logspace_int(2000, 50)
y = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11,
13, 14, 17, 19, 22, 25, 29, 33, 38, 43, 49,
56, 65, 74, 84, 96, 110, 125, 143, 164, 187, 213,
243, 277, 316, 361, 412, 470, 536, 612, 698, 796, 908,
1035, 1181, 1347, 1537, 1753, 1999])
np.testing.assert_allclose(x, y)
def test_logspace_int_corner():
# Corner cases
x = module.logspace_int(2000, 0)
np.testing.assert_allclose(x, [])
x = module.logspace_int(2000, 1)
np.testing.assert_allclose(x, [0])
# Must have nonzero limit
assert_raises(Exception, module.logspace_int, 0)
# Not enough integers
assert_raises(Exception, module.logspace_int, 3, 10)
|
Add unit tests for logspace_int.from nose.tools import *
import numpy as np
import buhmm.misc as module
def test_logspace_int_smoke():
# Smoke test
x = module.logspace_int(2000, 50)
y = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11,
13, 14, 17, 19, 22, 25, 29, 33, 38, 43, 49,
56, 65, 74, 84, 96, 110, 125, 143, 164, 187, 213,
243, 277, 316, 361, 412, 470, 536, 612, 698, 796, 908,
1035, 1181, 1347, 1537, 1753, 1999])
np.testing.assert_allclose(x, y)
def test_logspace_int_corner():
# Corner cases
x = module.logspace_int(2000, 0)
np.testing.assert_allclose(x, [])
x = module.logspace_int(2000, 1)
np.testing.assert_allclose(x, [0])
# Must have nonzero limit
assert_raises(Exception, module.logspace_int, 0)
# Not enough integers
assert_raises(Exception, module.logspace_int, 3, 10)
|
<commit_before><commit_msg>Add unit tests for logspace_int.<commit_after>from nose.tools import *
import numpy as np
import buhmm.misc as module
def test_logspace_int_smoke():
# Smoke test
x = module.logspace_int(2000, 50)
y = np.array([
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11,
13, 14, 17, 19, 22, 25, 29, 33, 38, 43, 49,
56, 65, 74, 84, 96, 110, 125, 143, 164, 187, 213,
243, 277, 316, 361, 412, 470, 536, 612, 698, 796, 908,
1035, 1181, 1347, 1537, 1753, 1999])
np.testing.assert_allclose(x, y)
def test_logspace_int_corner():
# Corner cases
x = module.logspace_int(2000, 0)
np.testing.assert_allclose(x, [])
x = module.logspace_int(2000, 1)
np.testing.assert_allclose(x, [0])
# Must have nonzero limit
assert_raises(Exception, module.logspace_int, 0)
# Not enough integers
assert_raises(Exception, module.logspace_int, 3, 10)
|
|
9a811ffcd7bb743b4c739667187d31f32429338a
|
tests/test_exception_wrapping.py
|
tests/test_exception_wrapping.py
|
import safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
|
Add a very simple test
|
Add a very simple test
|
Python
|
mpl-2.0
|
sangoma/safepy2,leonardolang/safepy2
|
Add a very simple test
|
import safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
|
<commit_before><commit_msg>Add a very simple test<commit_after>
|
import safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
|
Add a very simple testimport safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
|
<commit_before><commit_msg>Add a very simple test<commit_after>import safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
|
|
8ef0f8058854b2ef55d2d42bbe84487a9aadae12
|
.ycm_extra_conf.py
|
.ycm_extra_conf.py
|
def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-x', 'c',
'-g', '-Wall', '-Wextra',
'-D_REENTRANT', '-D__NO_MATH_INLINES', '-fsigned-char'
],
}
|
Add build flags for YouCompleteMe.
|
Add build flags for YouCompleteMe.
Add a .ycm_extra.conf.py script to return the same CFLAGS
we pass for `make debug`. These are passed to libclang
so symbol lookup works correctly.
Note this doesn't pick up changes to the build config,
including non-default locations for the ogg headers,
but it's better than nothing.
|
Python
|
bsd-3-clause
|
ShiftMediaProject/vorbis,ShiftMediaProject/vorbis,ShiftMediaProject/vorbis,ShiftMediaProject/vorbis,ShiftMediaProject/vorbis,ShiftMediaProject/vorbis
|
Add build flags for YouCompleteMe.
Add a .ycm_extra.conf.py script to return the same CFLAGS
we pass for `make debug`. These are passed to libclang
so symbol lookup works correctly.
Note this doesn't pick up changes to the build config,
including non-default locations for the ogg headers,
but it's better than nothing.
|
def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-x', 'c',
'-g', '-Wall', '-Wextra',
'-D_REENTRANT', '-D__NO_MATH_INLINES', '-fsigned-char'
],
}
|
<commit_before><commit_msg>Add build flags for YouCompleteMe.
Add a .ycm_extra.conf.py script to return the same CFLAGS
we pass for `make debug`. These are passed to libclang
so symbol lookup works correctly.
Note this doesn't pick up changes to the build config,
including non-default locations for the ogg headers,
but it's better than nothing.<commit_after>
|
def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-x', 'c',
'-g', '-Wall', '-Wextra',
'-D_REENTRANT', '-D__NO_MATH_INLINES', '-fsigned-char'
],
}
|
Add build flags for YouCompleteMe.
Add a .ycm_extra.conf.py script to return the same CFLAGS
we pass for `make debug`. These are passed to libclang
so symbol lookup works correctly.
Note this doesn't pick up changes to the build config,
including non-default locations for the ogg headers,
but it's better than nothing.def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-x', 'c',
'-g', '-Wall', '-Wextra',
'-D_REENTRANT', '-D__NO_MATH_INLINES', '-fsigned-char'
],
}
|
<commit_before><commit_msg>Add build flags for YouCompleteMe.
Add a .ycm_extra.conf.py script to return the same CFLAGS
we pass for `make debug`. These are passed to libclang
so symbol lookup works correctly.
Note this doesn't pick up changes to the build config,
including non-default locations for the ogg headers,
but it's better than nothing.<commit_after>def FlagsForFile(filename, **kwargs):
return {
'flags': [
'-x', 'c',
'-g', '-Wall', '-Wextra',
'-D_REENTRANT', '-D__NO_MATH_INLINES', '-fsigned-char'
],
}
|
|
ae7a2a3809ebe54c13a9ced4da5bbd48cc8d0e3a
|
Apollonian/main.py
|
Apollonian/main.py
|
# Circle Inversion Fractals (Apollonian Gasket) (Escape-time Algorithm)
# FB36 - 20131031
import math
import random
from collections import deque
from PIL import Image
imgx = 512 * 2
imgy = 512 * 2
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
#n = random.randint(3, 6) # of main circles
n = 3
a = math.pi * 2.0 / n
r = math.sin(a) / math.sin((math.pi - a) / 2.0) / 2.0 # r of main circles
h = math.sqrt(1.0 - r * r)
xa = -h; xb = h; ya = -h; yb = h # viewing area
cx = [0.0]; cy = [0.0]; cr = [1.0 - r] # center circle
for i in range(n): # add main circles
cx.append(math.cos(a * i))
cy.append(math.sin(a * i))
cr.append(r)
maxIt = 128 # of iterations
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
for k in range(n + 1):
dx = x - cx[k]; dy = y - cy[k]
d = math.hypot(dx, dy)
if d <= cr[k]:
dx = dx / d; dy = dy / d
dnew = cr[k] ** 2.0 / d
xnew = dnew * dx + cx[k]
ynew = dnew * dy + cy[k]
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
pixels[kx, ky] = (i % 16 * 16 , i % 8 * 32, i % 4 * 64)
image.save('result.png')
|
Add "Circle Inversion Fractals (Apollonian Gasket)"
|
Add "Circle Inversion Fractals (Apollonian Gasket)"
|
Python
|
mit
|
leovp/graphics
|
Add "Circle Inversion Fractals (Apollonian Gasket)"
|
# Circle Inversion Fractals (Apollonian Gasket) (Escape-time Algorithm)
# FB36 - 20131031
import math
import random
from collections import deque
from PIL import Image
imgx = 512 * 2
imgy = 512 * 2
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
#n = random.randint(3, 6) # of main circles
n = 3
a = math.pi * 2.0 / n
r = math.sin(a) / math.sin((math.pi - a) / 2.0) / 2.0 # r of main circles
h = math.sqrt(1.0 - r * r)
xa = -h; xb = h; ya = -h; yb = h # viewing area
cx = [0.0]; cy = [0.0]; cr = [1.0 - r] # center circle
for i in range(n): # add main circles
cx.append(math.cos(a * i))
cy.append(math.sin(a * i))
cr.append(r)
maxIt = 128 # of iterations
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
for k in range(n + 1):
dx = x - cx[k]; dy = y - cy[k]
d = math.hypot(dx, dy)
if d <= cr[k]:
dx = dx / d; dy = dy / d
dnew = cr[k] ** 2.0 / d
xnew = dnew * dx + cx[k]
ynew = dnew * dy + cy[k]
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
pixels[kx, ky] = (i % 16 * 16 , i % 8 * 32, i % 4 * 64)
image.save('result.png')
|
<commit_before><commit_msg>Add "Circle Inversion Fractals (Apollonian Gasket)"<commit_after>
|
# Circle Inversion Fractals (Apollonian Gasket) (Escape-time Algorithm)
# FB36 - 20131031
import math
import random
from collections import deque
from PIL import Image
imgx = 512 * 2
imgy = 512 * 2
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
#n = random.randint(3, 6) # of main circles
n = 3
a = math.pi * 2.0 / n
r = math.sin(a) / math.sin((math.pi - a) / 2.0) / 2.0 # r of main circles
h = math.sqrt(1.0 - r * r)
xa = -h; xb = h; ya = -h; yb = h # viewing area
cx = [0.0]; cy = [0.0]; cr = [1.0 - r] # center circle
for i in range(n): # add main circles
cx.append(math.cos(a * i))
cy.append(math.sin(a * i))
cr.append(r)
maxIt = 128 # of iterations
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
for k in range(n + 1):
dx = x - cx[k]; dy = y - cy[k]
d = math.hypot(dx, dy)
if d <= cr[k]:
dx = dx / d; dy = dy / d
dnew = cr[k] ** 2.0 / d
xnew = dnew * dx + cx[k]
ynew = dnew * dy + cy[k]
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
pixels[kx, ky] = (i % 16 * 16 , i % 8 * 32, i % 4 * 64)
image.save('result.png')
|
Add "Circle Inversion Fractals (Apollonian Gasket)"# Circle Inversion Fractals (Apollonian Gasket) (Escape-time Algorithm)
# FB36 - 20131031
import math
import random
from collections import deque
from PIL import Image
imgx = 512 * 2
imgy = 512 * 2
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
#n = random.randint(3, 6) # of main circles
n = 3
a = math.pi * 2.0 / n
r = math.sin(a) / math.sin((math.pi - a) / 2.0) / 2.0 # r of main circles
h = math.sqrt(1.0 - r * r)
xa = -h; xb = h; ya = -h; yb = h # viewing area
cx = [0.0]; cy = [0.0]; cr = [1.0 - r] # center circle
for i in range(n): # add main circles
cx.append(math.cos(a * i))
cy.append(math.sin(a * i))
cr.append(r)
maxIt = 128 # of iterations
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
for k in range(n + 1):
dx = x - cx[k]; dy = y - cy[k]
d = math.hypot(dx, dy)
if d <= cr[k]:
dx = dx / d; dy = dy / d
dnew = cr[k] ** 2.0 / d
xnew = dnew * dx + cx[k]
ynew = dnew * dy + cy[k]
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
pixels[kx, ky] = (i % 16 * 16 , i % 8 * 32, i % 4 * 64)
image.save('result.png')
|
<commit_before><commit_msg>Add "Circle Inversion Fractals (Apollonian Gasket)"<commit_after># Circle Inversion Fractals (Apollonian Gasket) (Escape-time Algorithm)
# FB36 - 20131031
import math
import random
from collections import deque
from PIL import Image
imgx = 512 * 2
imgy = 512 * 2
image = Image.new("RGB", (imgx, imgy))
pixels = image.load()
#n = random.randint(3, 6) # of main circles
n = 3
a = math.pi * 2.0 / n
r = math.sin(a) / math.sin((math.pi - a) / 2.0) / 2.0 # r of main circles
h = math.sqrt(1.0 - r * r)
xa = -h; xb = h; ya = -h; yb = h # viewing area
cx = [0.0]; cy = [0.0]; cr = [1.0 - r] # center circle
for i in range(n): # add main circles
cx.append(math.cos(a * i))
cy.append(math.sin(a * i))
cr.append(r)
maxIt = 128 # of iterations
for ky in range(imgy):
for kx in range(imgx):
x = float(kx) / (imgx - 1) * (xb - xa) + xa
y = float(ky) / (imgy - 1) * (yb - ya) + ya
queue = deque([])
queue.append((x, y, 0))
while len(queue) > 0: # iterate points until none left
(x, y, i) = queue.popleft()
for k in range(n + 1):
dx = x - cx[k]; dy = y - cy[k]
d = math.hypot(dx, dy)
if d <= cr[k]:
dx = dx / d; dy = dy / d
dnew = cr[k] ** 2.0 / d
xnew = dnew * dx + cx[k]
ynew = dnew * dy + cy[k]
if xnew >= xa and xnew <= xb and ynew >= ya and ynew <= yb:
if i + 1 == maxIt: break
queue.append((xnew, ynew, i + 1))
pixels[kx, ky] = (i % 16 * 16 , i % 8 * 32, i % 4 * 64)
image.save('result.png')
|
|
59ca3b5e97e2186f439f3f2fc82259ba56a3b78f
|
numpy/typing/tests/data/pass/modules.py
|
numpy/typing/tests/data/pass/modules.py
|
import numpy as np
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.__all__
np.__path__
np.__version__
np.__git_version__
np.__NUMPY_SETUP__
np.__deprecated_attrs__
np.__expired_functions__
|
Add module-based tests to the `pass` tests
|
TST: Add module-based tests to the `pass` tests
|
Python
|
bsd-3-clause
|
endolith/numpy,jakirkham/numpy,anntzer/numpy,simongibbons/numpy,anntzer/numpy,numpy/numpy,numpy/numpy,seberg/numpy,mattip/numpy,charris/numpy,seberg/numpy,jakirkham/numpy,simongibbons/numpy,mhvk/numpy,mhvk/numpy,pdebuyl/numpy,pbrod/numpy,pbrod/numpy,pdebuyl/numpy,endolith/numpy,seberg/numpy,pdebuyl/numpy,charris/numpy,mhvk/numpy,numpy/numpy,mhvk/numpy,rgommers/numpy,madphysicist/numpy,charris/numpy,mhvk/numpy,mattip/numpy,madphysicist/numpy,pbrod/numpy,madphysicist/numpy,anntzer/numpy,madphysicist/numpy,pbrod/numpy,numpy/numpy,rgommers/numpy,simongibbons/numpy,rgommers/numpy,endolith/numpy,anntzer/numpy,madphysicist/numpy,pbrod/numpy,pdebuyl/numpy,simongibbons/numpy,simongibbons/numpy,mattip/numpy,jakirkham/numpy,jakirkham/numpy,seberg/numpy,jakirkham/numpy,endolith/numpy,charris/numpy,mattip/numpy,rgommers/numpy
|
TST: Add module-based tests to the `pass` tests
|
import numpy as np
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.__all__
np.__path__
np.__version__
np.__git_version__
np.__NUMPY_SETUP__
np.__deprecated_attrs__
np.__expired_functions__
|
<commit_before><commit_msg>TST: Add module-based tests to the `pass` tests<commit_after>
|
import numpy as np
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.__all__
np.__path__
np.__version__
np.__git_version__
np.__NUMPY_SETUP__
np.__deprecated_attrs__
np.__expired_functions__
|
TST: Add module-based tests to the `pass` testsimport numpy as np
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.__all__
np.__path__
np.__version__
np.__git_version__
np.__NUMPY_SETUP__
np.__deprecated_attrs__
np.__expired_functions__
|
<commit_before><commit_msg>TST: Add module-based tests to the `pass` tests<commit_after>import numpy as np
np.char
np.ctypeslib
np.emath
np.fft
np.lib
np.linalg
np.ma
np.matrixlib
np.polynomial
np.random
np.rec
np.testing
np.version
np.__all__
np.__path__
np.__version__
np.__git_version__
np.__NUMPY_SETUP__
np.__deprecated_attrs__
np.__expired_functions__
|
|
8ff7becc414e2969cf89468c6af95c0356abebae
|
src/axe-shrink.py
|
src/axe-shrink.py
|
#!/usr/bin/env python
# Given an axe trace that fails a given consistency model, try to find
# the smallest subset of the trace that also fails the model. This
# makes it easier to determine *why* a trace does not satisfy a model.
# This simple shrinker is only really effective when each store of a
# value to an address appears textually before the loads of that value
# from that address. Fortunately, this property is quite likely for
# real hardware trace-generators (but sadly not for the random traces
# present in the 'tests' directory). This shinker is also rather slow
# for large traces.
import subprocess
import sys
# Check args
if len(sys.argv) != 3:
print "Usage: axe-shrink.py [MODEL] [FILE]"
sys.exit()
# Open trace
f = open(sys.argv[2], 'r')
if f == None:
print "File not found: ", sys.argv[2]
sys.exit()
# Read trace
lines = []
omit = []
omitted = 0
for line in f:
lines.append(line)
omit.append(False)
# Play a trace to axe: return true if axe responds 'NO';
# otherwise return false.
def play():
try:
p = subprocess.Popen(['axe', 'check', sys.argv[1], '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(0, len(lines)):
if not omit[i]: p.stdin.write(lines[i] + '\n')
p.stdin.close()
out = p.stdout.read()
if out == "NO\n": return True
else: return False
except subprocess.CalledProcessError:
return False
# Simple shrinking procedure
def shrink():
global omit, omitted
total = len(lines)
for i in reversed(range(0, len(lines))):
print >> sys.stderr, "Omitted", omitted, "of", total, " \r",
if not omit[i]:
omit[i] = True
if not play(): omit[i] = False
else: omitted = omitted + 1
# Shrink until fixed-point
def shrinkFixedPoint():
count = 0
while True:
before = omitted
print >> sys.stderr, "Pass", count
shrink()
print >> sys.stderr
after = omitted
count = count+1
if before == after: break
# Shrink and print trace
shrinkFixedPoint()
sys.stderr.flush()
for i in range(0, len(lines)):
if not omit[i]: print lines[i],
|
Add a rudimentary trace shrinker, implemented in python
|
Add a rudimentary trace shrinker, implemented in python
|
Python
|
apache-2.0
|
CTSRD-CHERI/axe,CTSRD-CHERI/axe,CTSRD-CHERI/axe,CTSRD-CHERI/axe
|
Add a rudimentary trace shrinker, implemented in python
|
#!/usr/bin/env python
# Given an axe trace that fails a given consistency model, try to find
# the smallest subset of the trace that also fails the model. This
# makes it easier to determine *why* a trace does not satisfy a model.
# This simple shrinker is only really effective when each store of a
# value to an address appears textually before the loads of that value
# from that address. Fortunately, this property is quite likely for
# real hardware trace-generators (but sadly not for the random traces
# present in the 'tests' directory). This shinker is also rather slow
# for large traces.
import subprocess
import sys
# Check args
if len(sys.argv) != 3:
print "Usage: axe-shrink.py [MODEL] [FILE]"
sys.exit()
# Open trace
f = open(sys.argv[2], 'r')
if f == None:
print "File not found: ", sys.argv[2]
sys.exit()
# Read trace
lines = []
omit = []
omitted = 0
for line in f:
lines.append(line)
omit.append(False)
# Play a trace to axe: return true if axe responds 'NO';
# otherwise return false.
def play():
try:
p = subprocess.Popen(['axe', 'check', sys.argv[1], '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(0, len(lines)):
if not omit[i]: p.stdin.write(lines[i] + '\n')
p.stdin.close()
out = p.stdout.read()
if out == "NO\n": return True
else: return False
except subprocess.CalledProcessError:
return False
# Simple shrinking procedure
def shrink():
global omit, omitted
total = len(lines)
for i in reversed(range(0, len(lines))):
print >> sys.stderr, "Omitted", omitted, "of", total, " \r",
if not omit[i]:
omit[i] = True
if not play(): omit[i] = False
else: omitted = omitted + 1
# Shrink until fixed-point
def shrinkFixedPoint():
count = 0
while True:
before = omitted
print >> sys.stderr, "Pass", count
shrink()
print >> sys.stderr
after = omitted
count = count+1
if before == after: break
# Shrink and print trace
shrinkFixedPoint()
sys.stderr.flush()
for i in range(0, len(lines)):
if not omit[i]: print lines[i],
|
<commit_before><commit_msg>Add a rudimentary trace shrinker, implemented in python<commit_after>
|
#!/usr/bin/env python
# Given an axe trace that fails a given consistency model, try to find
# the smallest subset of the trace that also fails the model. This
# makes it easier to determine *why* a trace does not satisfy a model.
# This simple shrinker is only really effective when each store of a
# value to an address appears textually before the loads of that value
# from that address. Fortunately, this property is quite likely for
# real hardware trace-generators (but sadly not for the random traces
# present in the 'tests' directory). This shinker is also rather slow
# for large traces.
import subprocess
import sys
# Check args
if len(sys.argv) != 3:
print "Usage: axe-shrink.py [MODEL] [FILE]"
sys.exit()
# Open trace
f = open(sys.argv[2], 'r')
if f == None:
print "File not found: ", sys.argv[2]
sys.exit()
# Read trace
lines = []
omit = []
omitted = 0
for line in f:
lines.append(line)
omit.append(False)
# Play a trace to axe: return true if axe responds 'NO';
# otherwise return false.
def play():
try:
p = subprocess.Popen(['axe', 'check', sys.argv[1], '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(0, len(lines)):
if not omit[i]: p.stdin.write(lines[i] + '\n')
p.stdin.close()
out = p.stdout.read()
if out == "NO\n": return True
else: return False
except subprocess.CalledProcessError:
return False
# Simple shrinking procedure
def shrink():
global omit, omitted
total = len(lines)
for i in reversed(range(0, len(lines))):
print >> sys.stderr, "Omitted", omitted, "of", total, " \r",
if not omit[i]:
omit[i] = True
if not play(): omit[i] = False
else: omitted = omitted + 1
# Shrink until fixed-point
def shrinkFixedPoint():
count = 0
while True:
before = omitted
print >> sys.stderr, "Pass", count
shrink()
print >> sys.stderr
after = omitted
count = count+1
if before == after: break
# Shrink and print trace
shrinkFixedPoint()
sys.stderr.flush()
for i in range(0, len(lines)):
if not omit[i]: print lines[i],
|
Add a rudimentary trace shrinker, implemented in python#!/usr/bin/env python
# Given an axe trace that fails a given consistency model, try to find
# the smallest subset of the trace that also fails the model. This
# makes it easier to determine *why* a trace does not satisfy a model.
# This simple shrinker is only really effective when each store of a
# value to an address appears textually before the loads of that value
# from that address. Fortunately, this property is quite likely for
# real hardware trace-generators (but sadly not for the random traces
# present in the 'tests' directory). This shinker is also rather slow
# for large traces.
import subprocess
import sys
# Check args
if len(sys.argv) != 3:
print "Usage: axe-shrink.py [MODEL] [FILE]"
sys.exit()
# Open trace
f = open(sys.argv[2], 'r')
if f == None:
print "File not found: ", sys.argv[2]
sys.exit()
# Read trace
lines = []
omit = []
omitted = 0
for line in f:
lines.append(line)
omit.append(False)
# Play a trace to axe: return true if axe responds 'NO';
# otherwise return false.
def play():
try:
p = subprocess.Popen(['axe', 'check', sys.argv[1], '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(0, len(lines)):
if not omit[i]: p.stdin.write(lines[i] + '\n')
p.stdin.close()
out = p.stdout.read()
if out == "NO\n": return True
else: return False
except subprocess.CalledProcessError:
return False
# Simple shrinking procedure
def shrink():
global omit, omitted
total = len(lines)
for i in reversed(range(0, len(lines))):
print >> sys.stderr, "Omitted", omitted, "of", total, " \r",
if not omit[i]:
omit[i] = True
if not play(): omit[i] = False
else: omitted = omitted + 1
# Shrink until fixed-point
def shrinkFixedPoint():
count = 0
while True:
before = omitted
print >> sys.stderr, "Pass", count
shrink()
print >> sys.stderr
after = omitted
count = count+1
if before == after: break
# Shrink and print trace
shrinkFixedPoint()
sys.stderr.flush()
for i in range(0, len(lines)):
if not omit[i]: print lines[i],
|
<commit_before><commit_msg>Add a rudimentary trace shrinker, implemented in python<commit_after>#!/usr/bin/env python
# Given an axe trace that fails a given consistency model, try to find
# the smallest subset of the trace that also fails the model. This
# makes it easier to determine *why* a trace does not satisfy a model.
# This simple shrinker is only really effective when each store of a
# value to an address appears textually before the loads of that value
# from that address. Fortunately, this property is quite likely for
# real hardware trace-generators (but sadly not for the random traces
# present in the 'tests' directory). This shinker is also rather slow
# for large traces.
import subprocess
import sys
# Check args
if len(sys.argv) != 3:
print "Usage: axe-shrink.py [MODEL] [FILE]"
sys.exit()
# Open trace
f = open(sys.argv[2], 'r')
if f == None:
print "File not found: ", sys.argv[2]
sys.exit()
# Read trace
lines = []
omit = []
omitted = 0
for line in f:
lines.append(line)
omit.append(False)
# Play a trace to axe: return true if axe responds 'NO';
# otherwise return false.
def play():
try:
p = subprocess.Popen(['axe', 'check', sys.argv[1], '-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for i in range(0, len(lines)):
if not omit[i]: p.stdin.write(lines[i] + '\n')
p.stdin.close()
out = p.stdout.read()
if out == "NO\n": return True
else: return False
except subprocess.CalledProcessError:
return False
# Simple shrinking procedure
def shrink():
global omit, omitted
total = len(lines)
for i in reversed(range(0, len(lines))):
print >> sys.stderr, "Omitted", omitted, "of", total, " \r",
if not omit[i]:
omit[i] = True
if not play(): omit[i] = False
else: omitted = omitted + 1
# Shrink until fixed-point
def shrinkFixedPoint():
count = 0
while True:
before = omitted
print >> sys.stderr, "Pass", count
shrink()
print >> sys.stderr
after = omitted
count = count+1
if before == after: break
# Shrink and print trace
shrinkFixedPoint()
sys.stderr.flush()
for i in range(0, len(lines)):
if not omit[i]: print lines[i],
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.