commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d0786d711e7fa49afff973d27e0689c3e42ed575
|
ospan/csv_to_sesame.py
|
ospan/csv_to_sesame.py
|
import argparse
from collections import namedtuple
import csv
from itertools import groupby
class Loop(object):
def __init__(self, name, repeat=1, cycles=None, description="A Loop",
item="sequence", order="random", variable_rows=None):
self.name = name
self.repeat = 1
self.variable_rows = variable_rows or []
self.cycles = cycles or len(variable_rows)
self.description = description
self.item = item
self.order = order
def __str__(self):
column_order = ";".join(variable.name for variable
in self.variable_rows[0])
s = """define loop {name}
\tset repeat "{repeat}"
\tset description "{description}"
\tset item "{item}"
\tset column_order "{column_order}"
\tset cycles "{cycles}"
\tset order "{order}"
""".format(
name=self.name,
repeat=self.repeat,
description=self.description,
item=self.item,
column_order=column_order,
cycles=self.cycles,
order=self.order)
# Now print each variable
for c, variable_row in enumerate(self.variable_rows):
for variable in variable_row:
s += '\tsetcycle {cycle} {name} "{value}"\n'.format(
cycle=c, name=variable.name, value=variable.value)
s += "\trun {item}\n".format(item=self.item)
return s
Variable = namedtuple("Variable", ["name", "value"])
def line_to_variables(line):
pass
def run(infile, outfile):
csv_file = csv.DictReader(open(infile, 'r'))
with open(outfile, 'w') as out:
for (loop_name, group) in groupby(
csv_file, lambda x: x['Loop_Name']):
variable_rows = []
for row in group:
variable_rows.append([Variable(name=key, value=val)
for (key, val) in row.items()
if key != "Loop_Name"])
loop = Loop(loop_name, variable_rows=variable_rows)
out.write(str(loop))
out.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert a CSV file to loops.')
parser.add_argument('infile',
help=("The csv file to parse. The csv file should "
"have a column named 'Loop_Name' which will "
"be used to name the loop."))
parser.add_argument('outfile',
help="The file to write to")
args = parser.parse_args()
run(args.infile, args.outfile)
|
Add script to turn a CSV file into OpenSesame style Loops
|
Add script to turn a CSV file into OpenSesame style Loops
|
Python
|
mit
|
sbuss/operation-span-test
|
Add script to turn a CSV file into OpenSesame style Loops
|
import argparse
from collections import namedtuple
import csv
from itertools import groupby
class Loop(object):
def __init__(self, name, repeat=1, cycles=None, description="A Loop",
item="sequence", order="random", variable_rows=None):
self.name = name
self.repeat = 1
self.variable_rows = variable_rows or []
self.cycles = cycles or len(variable_rows)
self.description = description
self.item = item
self.order = order
def __str__(self):
column_order = ";".join(variable.name for variable
in self.variable_rows[0])
s = """define loop {name}
\tset repeat "{repeat}"
\tset description "{description}"
\tset item "{item}"
\tset column_order "{column_order}"
\tset cycles "{cycles}"
\tset order "{order}"
""".format(
name=self.name,
repeat=self.repeat,
description=self.description,
item=self.item,
column_order=column_order,
cycles=self.cycles,
order=self.order)
# Now print each variable
for c, variable_row in enumerate(self.variable_rows):
for variable in variable_row:
s += '\tsetcycle {cycle} {name} "{value}"\n'.format(
cycle=c, name=variable.name, value=variable.value)
s += "\trun {item}\n".format(item=self.item)
return s
Variable = namedtuple("Variable", ["name", "value"])
def line_to_variables(line):
pass
def run(infile, outfile):
csv_file = csv.DictReader(open(infile, 'r'))
with open(outfile, 'w') as out:
for (loop_name, group) in groupby(
csv_file, lambda x: x['Loop_Name']):
variable_rows = []
for row in group:
variable_rows.append([Variable(name=key, value=val)
for (key, val) in row.items()
if key != "Loop_Name"])
loop = Loop(loop_name, variable_rows=variable_rows)
out.write(str(loop))
out.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert a CSV file to loops.')
parser.add_argument('infile',
help=("The csv file to parse. The csv file should "
"have a column named 'Loop_Name' which will "
"be used to name the loop."))
parser.add_argument('outfile',
help="The file to write to")
args = parser.parse_args()
run(args.infile, args.outfile)
|
<commit_before><commit_msg>Add script to turn a CSV file into OpenSesame style Loops<commit_after>
|
import argparse
from collections import namedtuple
import csv
from itertools import groupby
class Loop(object):
def __init__(self, name, repeat=1, cycles=None, description="A Loop",
item="sequence", order="random", variable_rows=None):
self.name = name
self.repeat = 1
self.variable_rows = variable_rows or []
self.cycles = cycles or len(variable_rows)
self.description = description
self.item = item
self.order = order
def __str__(self):
column_order = ";".join(variable.name for variable
in self.variable_rows[0])
s = """define loop {name}
\tset repeat "{repeat}"
\tset description "{description}"
\tset item "{item}"
\tset column_order "{column_order}"
\tset cycles "{cycles}"
\tset order "{order}"
""".format(
name=self.name,
repeat=self.repeat,
description=self.description,
item=self.item,
column_order=column_order,
cycles=self.cycles,
order=self.order)
# Now print each variable
for c, variable_row in enumerate(self.variable_rows):
for variable in variable_row:
s += '\tsetcycle {cycle} {name} "{value}"\n'.format(
cycle=c, name=variable.name, value=variable.value)
s += "\trun {item}\n".format(item=self.item)
return s
Variable = namedtuple("Variable", ["name", "value"])
def line_to_variables(line):
pass
def run(infile, outfile):
csv_file = csv.DictReader(open(infile, 'r'))
with open(outfile, 'w') as out:
for (loop_name, group) in groupby(
csv_file, lambda x: x['Loop_Name']):
variable_rows = []
for row in group:
variable_rows.append([Variable(name=key, value=val)
for (key, val) in row.items()
if key != "Loop_Name"])
loop = Loop(loop_name, variable_rows=variable_rows)
out.write(str(loop))
out.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert a CSV file to loops.')
parser.add_argument('infile',
help=("The csv file to parse. The csv file should "
"have a column named 'Loop_Name' which will "
"be used to name the loop."))
parser.add_argument('outfile',
help="The file to write to")
args = parser.parse_args()
run(args.infile, args.outfile)
|
Add script to turn a CSV file into OpenSesame style Loopsimport argparse
from collections import namedtuple
import csv
from itertools import groupby
class Loop(object):
def __init__(self, name, repeat=1, cycles=None, description="A Loop",
item="sequence", order="random", variable_rows=None):
self.name = name
self.repeat = 1
self.variable_rows = variable_rows or []
self.cycles = cycles or len(variable_rows)
self.description = description
self.item = item
self.order = order
def __str__(self):
column_order = ";".join(variable.name for variable
in self.variable_rows[0])
s = """define loop {name}
\tset repeat "{repeat}"
\tset description "{description}"
\tset item "{item}"
\tset column_order "{column_order}"
\tset cycles "{cycles}"
\tset order "{order}"
""".format(
name=self.name,
repeat=self.repeat,
description=self.description,
item=self.item,
column_order=column_order,
cycles=self.cycles,
order=self.order)
# Now print each variable
for c, variable_row in enumerate(self.variable_rows):
for variable in variable_row:
s += '\tsetcycle {cycle} {name} "{value}"\n'.format(
cycle=c, name=variable.name, value=variable.value)
s += "\trun {item}\n".format(item=self.item)
return s
Variable = namedtuple("Variable", ["name", "value"])
def line_to_variables(line):
pass
def run(infile, outfile):
csv_file = csv.DictReader(open(infile, 'r'))
with open(outfile, 'w') as out:
for (loop_name, group) in groupby(
csv_file, lambda x: x['Loop_Name']):
variable_rows = []
for row in group:
variable_rows.append([Variable(name=key, value=val)
for (key, val) in row.items()
if key != "Loop_Name"])
loop = Loop(loop_name, variable_rows=variable_rows)
out.write(str(loop))
out.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert a CSV file to loops.')
parser.add_argument('infile',
help=("The csv file to parse. The csv file should "
"have a column named 'Loop_Name' which will "
"be used to name the loop."))
parser.add_argument('outfile',
help="The file to write to")
args = parser.parse_args()
run(args.infile, args.outfile)
|
<commit_before><commit_msg>Add script to turn a CSV file into OpenSesame style Loops<commit_after>import argparse
from collections import namedtuple
import csv
from itertools import groupby
class Loop(object):
def __init__(self, name, repeat=1, cycles=None, description="A Loop",
item="sequence", order="random", variable_rows=None):
self.name = name
self.repeat = 1
self.variable_rows = variable_rows or []
self.cycles = cycles or len(variable_rows)
self.description = description
self.item = item
self.order = order
def __str__(self):
column_order = ";".join(variable.name for variable
in self.variable_rows[0])
s = """define loop {name}
\tset repeat "{repeat}"
\tset description "{description}"
\tset item "{item}"
\tset column_order "{column_order}"
\tset cycles "{cycles}"
\tset order "{order}"
""".format(
name=self.name,
repeat=self.repeat,
description=self.description,
item=self.item,
column_order=column_order,
cycles=self.cycles,
order=self.order)
# Now print each variable
for c, variable_row in enumerate(self.variable_rows):
for variable in variable_row:
s += '\tsetcycle {cycle} {name} "{value}"\n'.format(
cycle=c, name=variable.name, value=variable.value)
s += "\trun {item}\n".format(item=self.item)
return s
Variable = namedtuple("Variable", ["name", "value"])
def line_to_variables(line):
pass
def run(infile, outfile):
csv_file = csv.DictReader(open(infile, 'r'))
with open(outfile, 'w') as out:
for (loop_name, group) in groupby(
csv_file, lambda x: x['Loop_Name']):
variable_rows = []
for row in group:
variable_rows.append([Variable(name=key, value=val)
for (key, val) in row.items()
if key != "Loop_Name"])
loop = Loop(loop_name, variable_rows=variable_rows)
out.write(str(loop))
out.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Convert a CSV file to loops.')
parser.add_argument('infile',
help=("The csv file to parse. The csv file should "
"have a column named 'Loop_Name' which will "
"be used to name the loop."))
parser.add_argument('outfile',
help="The file to write to")
args = parser.parse_args()
run(args.infile, args.outfile)
|
|
2adcb39fa52e4be6dd800a56747f29ed6d76d9be
|
corpus_curl_opt_http_auth.py
|
corpus_curl_opt_http_auth.py
|
#!/usr/bin/env python
# Allow using strings to represent CURLOPT_HTTPAUTH values when
# generating corpus files. The original defines live in curl.h.
from enum import Enum
class CurlOptHttpAuth(Enum):
#define CURLAUTH_NONE ((unsigned long)0)
CURLAUTH_NONE = 0
#define CURLAUTH_BASIC (((unsigned long)1)<<0)
CURLAUTH_BASIC = 1
#define CURLAUTH_DIGEST (((unsigned long)1)<<1)
CURLAUTH_DIGEST = 2
#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2)
CURLAUTH_NEGOTIATE = 4
#define CURLAUTH_NTLM (((unsigned long)1)<<3)
CURLAUTH_NTLM = 8
#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4)
CURLAUTH_DIGEST_IE = 16
#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5)
CURLAUTH_NTLM_WB = 32
#define CURLAUTH_BEARER (((unsigned long)1)<<6)
CURLAUTH_BEARER = 64
#define CURLAUTH_AWS_SIGV4 (((unsigned long)1)<<7)
CURLAUTH_AWS_SIGV4 = 128
#define CURLAUTH_ONLY (((unsigned long)1)<<31)
CURLAUTH_ONLY = 2147483648
#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE)
CURLAUTH_ANY = 4294967279
#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
CURLAUTH_ANYSAFE = 4294967278
|
Add enum for HTTP auth strings
|
corpus: Add enum for HTTP auth strings
Taken from curl/curl.h and converted to Python, for convenience when
writing corpus seed cases.
|
Python
|
mit
|
curl/curl-fuzzer,curl/curl-fuzzer,curl/curl-fuzzer,curl/curl-fuzzer
|
corpus: Add enum for HTTP auth strings
Taken from curl/curl.h and converted to Python, for convenience when
writing corpus seed cases.
|
#!/usr/bin/env python
# Allow using strings to represent CURLOPT_HTTPAUTH values when
# generating corpus files. The original defines live in curl.h.
from enum import Enum
class CurlOptHttpAuth(Enum):
#define CURLAUTH_NONE ((unsigned long)0)
CURLAUTH_NONE = 0
#define CURLAUTH_BASIC (((unsigned long)1)<<0)
CURLAUTH_BASIC = 1
#define CURLAUTH_DIGEST (((unsigned long)1)<<1)
CURLAUTH_DIGEST = 2
#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2)
CURLAUTH_NEGOTIATE = 4
#define CURLAUTH_NTLM (((unsigned long)1)<<3)
CURLAUTH_NTLM = 8
#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4)
CURLAUTH_DIGEST_IE = 16
#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5)
CURLAUTH_NTLM_WB = 32
#define CURLAUTH_BEARER (((unsigned long)1)<<6)
CURLAUTH_BEARER = 64
#define CURLAUTH_AWS_SIGV4 (((unsigned long)1)<<7)
CURLAUTH_AWS_SIGV4 = 128
#define CURLAUTH_ONLY (((unsigned long)1)<<31)
CURLAUTH_ONLY = 2147483648
#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE)
CURLAUTH_ANY = 4294967279
#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
CURLAUTH_ANYSAFE = 4294967278
|
<commit_before><commit_msg>corpus: Add enum for HTTP auth strings
Taken from curl/curl.h and converted to Python, for convenience when
writing corpus seed cases.<commit_after>
|
#!/usr/bin/env python
# Allow using strings to represent CURLOPT_HTTPAUTH values when
# generating corpus files. The original defines live in curl.h.
from enum import Enum
class CurlOptHttpAuth(Enum):
#define CURLAUTH_NONE ((unsigned long)0)
CURLAUTH_NONE = 0
#define CURLAUTH_BASIC (((unsigned long)1)<<0)
CURLAUTH_BASIC = 1
#define CURLAUTH_DIGEST (((unsigned long)1)<<1)
CURLAUTH_DIGEST = 2
#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2)
CURLAUTH_NEGOTIATE = 4
#define CURLAUTH_NTLM (((unsigned long)1)<<3)
CURLAUTH_NTLM = 8
#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4)
CURLAUTH_DIGEST_IE = 16
#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5)
CURLAUTH_NTLM_WB = 32
#define CURLAUTH_BEARER (((unsigned long)1)<<6)
CURLAUTH_BEARER = 64
#define CURLAUTH_AWS_SIGV4 (((unsigned long)1)<<7)
CURLAUTH_AWS_SIGV4 = 128
#define CURLAUTH_ONLY (((unsigned long)1)<<31)
CURLAUTH_ONLY = 2147483648
#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE)
CURLAUTH_ANY = 4294967279
#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
CURLAUTH_ANYSAFE = 4294967278
|
corpus: Add enum for HTTP auth strings
Taken from curl/curl.h and converted to Python, for convenience when
writing corpus seed cases.#!/usr/bin/env python
# Allow using strings to represent CURLOPT_HTTPAUTH values when
# generating corpus files. The original defines live in curl.h.
from enum import Enum
class CurlOptHttpAuth(Enum):
#define CURLAUTH_NONE ((unsigned long)0)
CURLAUTH_NONE = 0
#define CURLAUTH_BASIC (((unsigned long)1)<<0)
CURLAUTH_BASIC = 1
#define CURLAUTH_DIGEST (((unsigned long)1)<<1)
CURLAUTH_DIGEST = 2
#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2)
CURLAUTH_NEGOTIATE = 4
#define CURLAUTH_NTLM (((unsigned long)1)<<3)
CURLAUTH_NTLM = 8
#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4)
CURLAUTH_DIGEST_IE = 16
#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5)
CURLAUTH_NTLM_WB = 32
#define CURLAUTH_BEARER (((unsigned long)1)<<6)
CURLAUTH_BEARER = 64
#define CURLAUTH_AWS_SIGV4 (((unsigned long)1)<<7)
CURLAUTH_AWS_SIGV4 = 128
#define CURLAUTH_ONLY (((unsigned long)1)<<31)
CURLAUTH_ONLY = 2147483648
#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE)
CURLAUTH_ANY = 4294967279
#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
CURLAUTH_ANYSAFE = 4294967278
|
<commit_before><commit_msg>corpus: Add enum for HTTP auth strings
Taken from curl/curl.h and converted to Python, for convenience when
writing corpus seed cases.<commit_after>#!/usr/bin/env python
# Allow using strings to represent CURLOPT_HTTPAUTH values when
# generating corpus files. The original defines live in curl.h.
from enum import Enum
class CurlOptHttpAuth(Enum):
#define CURLAUTH_NONE ((unsigned long)0)
CURLAUTH_NONE = 0
#define CURLAUTH_BASIC (((unsigned long)1)<<0)
CURLAUTH_BASIC = 1
#define CURLAUTH_DIGEST (((unsigned long)1)<<1)
CURLAUTH_DIGEST = 2
#define CURLAUTH_NEGOTIATE (((unsigned long)1)<<2)
CURLAUTH_NEGOTIATE = 4
#define CURLAUTH_NTLM (((unsigned long)1)<<3)
CURLAUTH_NTLM = 8
#define CURLAUTH_DIGEST_IE (((unsigned long)1)<<4)
CURLAUTH_DIGEST_IE = 16
#define CURLAUTH_NTLM_WB (((unsigned long)1)<<5)
CURLAUTH_NTLM_WB = 32
#define CURLAUTH_BEARER (((unsigned long)1)<<6)
CURLAUTH_BEARER = 64
#define CURLAUTH_AWS_SIGV4 (((unsigned long)1)<<7)
CURLAUTH_AWS_SIGV4 = 128
#define CURLAUTH_ONLY (((unsigned long)1)<<31)
CURLAUTH_ONLY = 2147483648
#define CURLAUTH_ANY (~CURLAUTH_DIGEST_IE)
CURLAUTH_ANY = 4294967279
#define CURLAUTH_ANYSAFE (~(CURLAUTH_BASIC|CURLAUTH_DIGEST_IE))
CURLAUTH_ANYSAFE = 4294967278
|
|
696c1016284a4930f9df5379a5e01abce21df72c
|
InvenTree/company/migrations/0020_auto_20200413_0839.py
|
InvenTree/company/migrations/0020_auto_20200413_0839.py
|
# Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
Change related name for SuppleirPart.supplier
|
Change related name for SuppleirPart.supplier
(cherry picked from commit 635c4339e06db392f373f46539df38c914683d5a)
|
Python
|
mit
|
inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree
|
Change related name for SuppleirPart.supplier
(cherry picked from commit 635c4339e06db392f373f46539df38c914683d5a)
|
# Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
<commit_before><commit_msg>Change related name for SuppleirPart.supplier
(cherry picked from commit 635c4339e06db392f373f46539df38c914683d5a)<commit_after>
|
# Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
Change related name for SuppleirPart.supplier
(cherry picked from commit 635c4339e06db392f373f46539df38c914683d5a)# Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
<commit_before><commit_msg>Change related name for SuppleirPart.supplier
(cherry picked from commit 635c4339e06db392f373f46539df38c914683d5a)<commit_after># Generated by Django 2.2.10 on 2020-04-13 08:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0019_auto_20200413_0642'),
]
operations = [
migrations.AlterField(
model_name='supplierpart',
name='supplier',
field=models.ForeignKey(help_text='Select supplier', limit_choices_to={'is_supplier': True}, on_delete=django.db.models.deletion.CASCADE, related_name='supplied_parts', to='company.Company'),
),
]
|
|
010376704d66e3fadffe50709d479d31a2e8eb14
|
tool/collect-macaddr4ilo.py
|
tool/collect-macaddr4ilo.py
|
#!/usr/bin/python
import pexpect
import re
import sys
from optparse import OptionParser
macs=[]
err=0
op=OptionParser("fancheck [options] ip")
op.add_option("-l","--log",action="store_true",dest="log",default=False,help="whole logs to stdout")
op.add_option("-u","--user",action="store",dest="user",type="string",default=False,help="access user name")
op.add_option("-p","--password",action="store",dest="password",type="string",default=False,help="access user password")
(ops,ip)=op.parse_args()
if len(ip)!=1 or ops.user==False or ops.password==False :
op.print_help()
exit(1)
px=pexpect.spawn("ssh "+ ops.user + "@" +ip[0])
if ops.log==True:
px.logfile_read=sys.stdout
px.expect("password: ")
px.send(ops.password + "\n")
px.expect("-\> ")
px.send("show /system1/network1/Integrated_NICs\r")
px.expect("-\> ")
for res in px.before.split("\n"):
m=re.search("Port[0-9]NIC.*",res)
if m:
macs.append(m.group())
px.send("exit\r")
px.expect(pexpect.EOF)
if err==0:
for i in macs:
print i
else:
print "ERROR"
|
Add collection mac tool for ilo
|
[Add] Add collection mac tool for ilo
|
Python
|
mit
|
konono/equlipse,konono/equlipse
|
[Add] Add collection mac tool for ilo
|
#!/usr/bin/python
import pexpect
import re
import sys
from optparse import OptionParser
macs=[]
err=0
op=OptionParser("fancheck [options] ip")
op.add_option("-l","--log",action="store_true",dest="log",default=False,help="whole logs to stdout")
op.add_option("-u","--user",action="store",dest="user",type="string",default=False,help="access user name")
op.add_option("-p","--password",action="store",dest="password",type="string",default=False,help="access user password")
(ops,ip)=op.parse_args()
if len(ip)!=1 or ops.user==False or ops.password==False :
op.print_help()
exit(1)
px=pexpect.spawn("ssh "+ ops.user + "@" +ip[0])
if ops.log==True:
px.logfile_read=sys.stdout
px.expect("password: ")
px.send(ops.password + "\n")
px.expect("-\> ")
px.send("show /system1/network1/Integrated_NICs\r")
px.expect("-\> ")
for res in px.before.split("\n"):
m=re.search("Port[0-9]NIC.*",res)
if m:
macs.append(m.group())
px.send("exit\r")
px.expect(pexpect.EOF)
if err==0:
for i in macs:
print i
else:
print "ERROR"
|
<commit_before><commit_msg>[Add] Add collection mac tool for ilo<commit_after>
|
#!/usr/bin/python
import pexpect
import re
import sys
from optparse import OptionParser
macs=[]
err=0
op=OptionParser("fancheck [options] ip")
op.add_option("-l","--log",action="store_true",dest="log",default=False,help="whole logs to stdout")
op.add_option("-u","--user",action="store",dest="user",type="string",default=False,help="access user name")
op.add_option("-p","--password",action="store",dest="password",type="string",default=False,help="access user password")
(ops,ip)=op.parse_args()
if len(ip)!=1 or ops.user==False or ops.password==False :
op.print_help()
exit(1)
px=pexpect.spawn("ssh "+ ops.user + "@" +ip[0])
if ops.log==True:
px.logfile_read=sys.stdout
px.expect("password: ")
px.send(ops.password + "\n")
px.expect("-\> ")
px.send("show /system1/network1/Integrated_NICs\r")
px.expect("-\> ")
for res in px.before.split("\n"):
m=re.search("Port[0-9]NIC.*",res)
if m:
macs.append(m.group())
px.send("exit\r")
px.expect(pexpect.EOF)
if err==0:
for i in macs:
print i
else:
print "ERROR"
|
[Add] Add collection mac tool for ilo#!/usr/bin/python
import pexpect
import re
import sys
from optparse import OptionParser
macs=[]
err=0
op=OptionParser("fancheck [options] ip")
op.add_option("-l","--log",action="store_true",dest="log",default=False,help="whole logs to stdout")
op.add_option("-u","--user",action="store",dest="user",type="string",default=False,help="access user name")
op.add_option("-p","--password",action="store",dest="password",type="string",default=False,help="access user password")
(ops,ip)=op.parse_args()
if len(ip)!=1 or ops.user==False or ops.password==False :
op.print_help()
exit(1)
px=pexpect.spawn("ssh "+ ops.user + "@" +ip[0])
if ops.log==True:
px.logfile_read=sys.stdout
px.expect("password: ")
px.send(ops.password + "\n")
px.expect("-\> ")
px.send("show /system1/network1/Integrated_NICs\r")
px.expect("-\> ")
for res in px.before.split("\n"):
m=re.search("Port[0-9]NIC.*",res)
if m:
macs.append(m.group())
px.send("exit\r")
px.expect(pexpect.EOF)
if err==0:
for i in macs:
print i
else:
print "ERROR"
|
<commit_before><commit_msg>[Add] Add collection mac tool for ilo<commit_after>#!/usr/bin/python
import pexpect
import re
import sys
from optparse import OptionParser
macs=[]
err=0
op=OptionParser("fancheck [options] ip")
op.add_option("-l","--log",action="store_true",dest="log",default=False,help="whole logs to stdout")
op.add_option("-u","--user",action="store",dest="user",type="string",default=False,help="access user name")
op.add_option("-p","--password",action="store",dest="password",type="string",default=False,help="access user password")
(ops,ip)=op.parse_args()
if len(ip)!=1 or ops.user==False or ops.password==False :
op.print_help()
exit(1)
px=pexpect.spawn("ssh "+ ops.user + "@" +ip[0])
if ops.log==True:
px.logfile_read=sys.stdout
px.expect("password: ")
px.send(ops.password + "\n")
px.expect("-\> ")
px.send("show /system1/network1/Integrated_NICs\r")
px.expect("-\> ")
for res in px.before.split("\n"):
m=re.search("Port[0-9]NIC.*",res)
if m:
macs.append(m.group())
px.send("exit\r")
px.expect(pexpect.EOF)
if err==0:
for i in macs:
print i
else:
print "ERROR"
|
|
d3bafe885920e7b1d21e6d97456b39c471ed48d0
|
api/webview/tests.py
|
api/webview/tests.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import pytest
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
# TODO - make this work without Django.
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
@pytest.mark.postgres
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
Move api test back to django
|
Move api test back to django
|
Python
|
apache-2.0
|
CenterForOpenScience/scrapi,mehanig/scrapi,erinspace/scrapi,fabianvf/scrapi,felliott/scrapi,mehanig/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,felliott/scrapi
|
Move api test back to django
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import pytest
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
# TODO - make this work without Django.
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
@pytest.mark.postgres
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Move api test back to django<commit_after>
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import pytest
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
# TODO - make this work without Django.
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
@pytest.mark.postgres
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
Move api test back to djangoimport os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import pytest
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
# TODO - make this work without Django.
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
@pytest.mark.postgres
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Move api test back to django<commit_after>import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.api.settings")
import pytest
import django
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from api.webview.views import DocumentList
django.setup()
# TODO - make this work without Django.
class APIViewTests(TestCase):
def setUp(self):
self.factory = APIRequestFactory()
@pytest.mark.postgres
def test_document_view(self):
view = DocumentList.as_view()
request = self.factory.get(
'/documents/'
)
response = view(request)
self.assertEqual(response.status_code, 200)
|
|
dd6287903ccddf24edf600e3d30fba61efd9478f
|
distarray/core/tests/test_distributed_array_protocol.py
|
distarray/core/tests/test_distributed_array_protocol.py
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
def test_round_trip(self):
new_larr = da.localarray(self.larr)
self.assertEqual(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
@unittest.skip("Import not yet implemented.")
def test_round_trip(self):
new_larr = da.fromdap(self.larr)
self.assertIs(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
Modify round-trip test to use new function name.
|
Modify round-trip test to use new function name.
We've temporarily settled on `fromdap()` for importing from `__distarray__` interfaces.
|
Python
|
bsd-3-clause
|
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
def test_round_trip(self):
new_larr = da.localarray(self.larr)
self.assertEqual(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
Modify round-trip test to use new function name.
We've temporarily settled on `fromdap()` for importing from `__distarray__` interfaces.
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
@unittest.skip("Import not yet implemented.")
def test_round_trip(self):
new_larr = da.fromdap(self.larr)
self.assertIs(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
<commit_before>import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
def test_round_trip(self):
new_larr = da.localarray(self.larr)
self.assertEqual(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
<commit_msg>Modify round-trip test to use new function name.
We've temporarily settled on `fromdap()` for importing from `__distarray__` interfaces.<commit_after>
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
@unittest.skip("Import not yet implemented.")
def test_round_trip(self):
new_larr = da.fromdap(self.larr)
self.assertIs(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
def test_round_trip(self):
new_larr = da.localarray(self.larr)
self.assertEqual(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
Modify round-trip test to use new function name.
We've temporarily settled on `fromdap()` for importing from `__distarray__` interfaces.import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
@unittest.skip("Import not yet implemented.")
def test_round_trip(self):
new_larr = da.fromdap(self.larr)
self.assertIs(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
<commit_before>import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
def test_round_trip(self):
new_larr = da.localarray(self.larr)
self.assertEqual(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
<commit_msg>Modify round-trip test to use new function name.
We've temporarily settled on `fromdap()` for importing from `__distarray__` interfaces.<commit_after>import unittest
import distarray as da
from distarray.mpi.mpibase import create_comm_of_size, InvalidCommSizeError
class TestDistributedArrayProtocol(unittest.TestCase):
def setUp(self):
try:
comm = create_comm_of_size(4)
except InvalidCommSizeError:
raise unittest.SkipTest('Must run with comm size > 4.')
else:
self.larr = da.LocalArray((16,16),
grid_shape=(4,),
comm=comm, buf=None, offset=0)
def test_has_export(self):
self.assertTrue(hasattr(self.larr, '__distarray__'))
def test_export_keys(self):
required_keys = set(("buffer", "dimdata"))
export_data = self.larr.__distarray__()
exported_keys = set(export_data.keys())
self.assertEqual(required_keys, exported_keys)
def test_export_buffer(self):
"""See if we actually export a buffer."""
export_data = self.larr.__distarray__()
memoryview(export_data['buffer'])
@unittest.skip("Import not yet implemented.")
def test_round_trip(self):
new_larr = da.fromdap(self.larr)
self.assertIs(new_larr.local_array, self.larr.local_array)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
35f9f8ad0aa3b7474aeeb4094ff13b8312b9d326
|
experiments/reproduce-srcnn/9-1-5/run.py
|
experiments/reproduce-srcnn/9-1-5/run.py
|
from toolbox.datasets.sr import load_data
from toolbox.models import srcnn
from toolbox.training import train
(x_train, y_train), (x_test, y_test) = load_data()
model = srcnn(x_train.shape[1:], f1=9, f2=1, f3=5)
train(model, x_train, y_train, validation_data=(x_test, y_test),
nb_epoch=2, resume=True, save_dir='save')
|
Add a simple experiment script
|
Add a simple experiment script
|
Python
|
mit
|
qobilidop/srcnn,qobilidop/srcnn
|
Add a simple experiment script
|
from toolbox.datasets.sr import load_data
from toolbox.models import srcnn
from toolbox.training import train
(x_train, y_train), (x_test, y_test) = load_data()
model = srcnn(x_train.shape[1:], f1=9, f2=1, f3=5)
train(model, x_train, y_train, validation_data=(x_test, y_test),
nb_epoch=2, resume=True, save_dir='save')
|
<commit_before><commit_msg>Add a simple experiment script<commit_after>
|
from toolbox.datasets.sr import load_data
from toolbox.models import srcnn
from toolbox.training import train
(x_train, y_train), (x_test, y_test) = load_data()
model = srcnn(x_train.shape[1:], f1=9, f2=1, f3=5)
train(model, x_train, y_train, validation_data=(x_test, y_test),
nb_epoch=2, resume=True, save_dir='save')
|
Add a simple experiment scriptfrom toolbox.datasets.sr import load_data
from toolbox.models import srcnn
from toolbox.training import train
(x_train, y_train), (x_test, y_test) = load_data()
model = srcnn(x_train.shape[1:], f1=9, f2=1, f3=5)
train(model, x_train, y_train, validation_data=(x_test, y_test),
nb_epoch=2, resume=True, save_dir='save')
|
<commit_before><commit_msg>Add a simple experiment script<commit_after>from toolbox.datasets.sr import load_data
from toolbox.models import srcnn
from toolbox.training import train
(x_train, y_train), (x_test, y_test) = load_data()
model = srcnn(x_train.shape[1:], f1=9, f2=1, f3=5)
train(model, x_train, y_train, validation_data=(x_test, y_test),
nb_epoch=2, resume=True, save_dir='save')
|
|
e60d0fd405a93f517fd7478b862dfae5bfb87d4f
|
demos/mapbox.py
|
demos/mapbox.py
|
#!/usr/bin/env python
# To run this example, you need to set the GI_TYPELIB_PATH environment
# variable to point to the gir directory:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/usr/local/lib/girepository-1.0/
from gi.repository import GtkClutter
from gi.repository import GObject, Gtk, Champlain, GtkChamplain
ACCESS_TOKEN = "PUT YOUR ACCESS TOKEN HERE!!!"
CACHE_SIZE = 100000000 # size of cache stored on disk
MEMORY_CACHE_SIZE = 100 # in-memory cache size (tiles stored in memory)
MIN_ZOOM = 0
MAX_ZOOM = 19
TILE_SIZE = 256
LICENSE_TEXT = ""
LICENSE_URI = "https://www.mapbox.com/tos/"
def create_cached_source():
factory = Champlain.MapSourceFactory.dup_default()
tile_source = Champlain.NetworkTileSource.new_full(
"mapbox",
"mapbox",
LICENSE_TEXT,
LICENSE_URI,
MIN_ZOOM,
MAX_ZOOM,
TILE_SIZE,
Champlain.MapProjection.MAP_PROJECTION_MERCATOR,
"https://a.tiles.mapbox.com/v4/mapbox.streets/#Z#/#X#/#Y#.png?access_token=" + ACCESS_TOKEN,
Champlain.ImageRenderer())
tile_size = tile_source.get_tile_size()
error_source = factory.create_error_source(tile_size)
file_cache = Champlain.FileCache.new_full(CACHE_SIZE, None, Champlain.ImageRenderer())
memory_cache = Champlain.MemoryCache.new_full(MEMORY_CACHE_SIZE, Champlain.ImageRenderer())
source_chain = Champlain.MapSourceChain()
# tile is retrieved in this order:
# memory_cache -> file_cache -> tile_source -> error_source
# the first source that contains the tile returns it
source_chain.push(error_source)
source_chain.push(tile_source)
source_chain.push(file_cache)
source_chain.push(memory_cache)
return source_chain
GtkClutter.init([])
window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
window.connect("destroy", Gtk.main_quit)
widget = GtkChamplain.Embed()
widget.set_size_request(640, 480)
view = widget.get_view()
view.set_map_source(create_cached_source())
window.add(widget)
window.show_all()
Gtk.main()
|
Add python demo demonstrating custom tile source
|
Add python demo demonstrating custom tile source
|
Python
|
lgpl-2.1
|
GNOME/libchamplain,GNOME/libchamplain
|
Add python demo demonstrating custom tile source
|
#!/usr/bin/env python
# To run this example, you need to set the GI_TYPELIB_PATH environment
# variable to point to the gir directory:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/usr/local/lib/girepository-1.0/
from gi.repository import GtkClutter
from gi.repository import GObject, Gtk, Champlain, GtkChamplain
ACCESS_TOKEN = "PUT YOUR ACCESS TOKEN HERE!!!"
CACHE_SIZE = 100000000 # size of cache stored on disk
MEMORY_CACHE_SIZE = 100 # in-memory cache size (tiles stored in memory)
MIN_ZOOM = 0
MAX_ZOOM = 19
TILE_SIZE = 256
LICENSE_TEXT = ""
LICENSE_URI = "https://www.mapbox.com/tos/"
def create_cached_source():
factory = Champlain.MapSourceFactory.dup_default()
tile_source = Champlain.NetworkTileSource.new_full(
"mapbox",
"mapbox",
LICENSE_TEXT,
LICENSE_URI,
MIN_ZOOM,
MAX_ZOOM,
TILE_SIZE,
Champlain.MapProjection.MAP_PROJECTION_MERCATOR,
"https://a.tiles.mapbox.com/v4/mapbox.streets/#Z#/#X#/#Y#.png?access_token=" + ACCESS_TOKEN,
Champlain.ImageRenderer())
tile_size = tile_source.get_tile_size()
error_source = factory.create_error_source(tile_size)
file_cache = Champlain.FileCache.new_full(CACHE_SIZE, None, Champlain.ImageRenderer())
memory_cache = Champlain.MemoryCache.new_full(MEMORY_CACHE_SIZE, Champlain.ImageRenderer())
source_chain = Champlain.MapSourceChain()
# tile is retrieved in this order:
# memory_cache -> file_cache -> tile_source -> error_source
# the first source that contains the tile returns it
source_chain.push(error_source)
source_chain.push(tile_source)
source_chain.push(file_cache)
source_chain.push(memory_cache)
return source_chain
GtkClutter.init([])
window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
window.connect("destroy", Gtk.main_quit)
widget = GtkChamplain.Embed()
widget.set_size_request(640, 480)
view = widget.get_view()
view.set_map_source(create_cached_source())
window.add(widget)
window.show_all()
Gtk.main()
|
<commit_before><commit_msg>Add python demo demonstrating custom tile source<commit_after>
|
#!/usr/bin/env python
# To run this example, you need to set the GI_TYPELIB_PATH environment
# variable to point to the gir directory:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/usr/local/lib/girepository-1.0/
from gi.repository import GtkClutter
from gi.repository import GObject, Gtk, Champlain, GtkChamplain
ACCESS_TOKEN = "PUT YOUR ACCESS TOKEN HERE!!!"
CACHE_SIZE = 100000000 # size of cache stored on disk
MEMORY_CACHE_SIZE = 100 # in-memory cache size (tiles stored in memory)
MIN_ZOOM = 0
MAX_ZOOM = 19
TILE_SIZE = 256
LICENSE_TEXT = ""
LICENSE_URI = "https://www.mapbox.com/tos/"
def create_cached_source():
factory = Champlain.MapSourceFactory.dup_default()
tile_source = Champlain.NetworkTileSource.new_full(
"mapbox",
"mapbox",
LICENSE_TEXT,
LICENSE_URI,
MIN_ZOOM,
MAX_ZOOM,
TILE_SIZE,
Champlain.MapProjection.MAP_PROJECTION_MERCATOR,
"https://a.tiles.mapbox.com/v4/mapbox.streets/#Z#/#X#/#Y#.png?access_token=" + ACCESS_TOKEN,
Champlain.ImageRenderer())
tile_size = tile_source.get_tile_size()
error_source = factory.create_error_source(tile_size)
file_cache = Champlain.FileCache.new_full(CACHE_SIZE, None, Champlain.ImageRenderer())
memory_cache = Champlain.MemoryCache.new_full(MEMORY_CACHE_SIZE, Champlain.ImageRenderer())
source_chain = Champlain.MapSourceChain()
# tile is retrieved in this order:
# memory_cache -> file_cache -> tile_source -> error_source
# the first source that contains the tile returns it
source_chain.push(error_source)
source_chain.push(tile_source)
source_chain.push(file_cache)
source_chain.push(memory_cache)
return source_chain
GtkClutter.init([])
window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
window.connect("destroy", Gtk.main_quit)
widget = GtkChamplain.Embed()
widget.set_size_request(640, 480)
view = widget.get_view()
view.set_map_source(create_cached_source())
window.add(widget)
window.show_all()
Gtk.main()
|
Add python demo demonstrating custom tile source#!/usr/bin/env python
# To run this example, you need to set the GI_TYPELIB_PATH environment
# variable to point to the gir directory:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/usr/local/lib/girepository-1.0/
from gi.repository import GtkClutter
from gi.repository import GObject, Gtk, Champlain, GtkChamplain
ACCESS_TOKEN = "PUT YOUR ACCESS TOKEN HERE!!!"
CACHE_SIZE = 100000000 # size of cache stored on disk
MEMORY_CACHE_SIZE = 100 # in-memory cache size (tiles stored in memory)
MIN_ZOOM = 0
MAX_ZOOM = 19
TILE_SIZE = 256
LICENSE_TEXT = ""
LICENSE_URI = "https://www.mapbox.com/tos/"
def create_cached_source():
factory = Champlain.MapSourceFactory.dup_default()
tile_source = Champlain.NetworkTileSource.new_full(
"mapbox",
"mapbox",
LICENSE_TEXT,
LICENSE_URI,
MIN_ZOOM,
MAX_ZOOM,
TILE_SIZE,
Champlain.MapProjection.MAP_PROJECTION_MERCATOR,
"https://a.tiles.mapbox.com/v4/mapbox.streets/#Z#/#X#/#Y#.png?access_token=" + ACCESS_TOKEN,
Champlain.ImageRenderer())
tile_size = tile_source.get_tile_size()
error_source = factory.create_error_source(tile_size)
file_cache = Champlain.FileCache.new_full(CACHE_SIZE, None, Champlain.ImageRenderer())
memory_cache = Champlain.MemoryCache.new_full(MEMORY_CACHE_SIZE, Champlain.ImageRenderer())
source_chain = Champlain.MapSourceChain()
# tile is retrieved in this order:
# memory_cache -> file_cache -> tile_source -> error_source
# the first source that contains the tile returns it
source_chain.push(error_source)
source_chain.push(tile_source)
source_chain.push(file_cache)
source_chain.push(memory_cache)
return source_chain
GtkClutter.init([])
window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
window.connect("destroy", Gtk.main_quit)
widget = GtkChamplain.Embed()
widget.set_size_request(640, 480)
view = widget.get_view()
view.set_map_source(create_cached_source())
window.add(widget)
window.show_all()
Gtk.main()
|
<commit_before><commit_msg>Add python demo demonstrating custom tile source<commit_after>#!/usr/bin/env python
# To run this example, you need to set the GI_TYPELIB_PATH environment
# variable to point to the gir directory:
#
# export GI_TYPELIB_PATH=$GI_TYPELIB_PATH:/usr/local/lib/girepository-1.0/
from gi.repository import GtkClutter
from gi.repository import GObject, Gtk, Champlain, GtkChamplain
ACCESS_TOKEN = "PUT YOUR ACCESS TOKEN HERE!!!"
CACHE_SIZE = 100000000 # size of cache stored on disk
MEMORY_CACHE_SIZE = 100 # in-memory cache size (tiles stored in memory)
MIN_ZOOM = 0
MAX_ZOOM = 19
TILE_SIZE = 256
LICENSE_TEXT = ""
LICENSE_URI = "https://www.mapbox.com/tos/"
def create_cached_source():
factory = Champlain.MapSourceFactory.dup_default()
tile_source = Champlain.NetworkTileSource.new_full(
"mapbox",
"mapbox",
LICENSE_TEXT,
LICENSE_URI,
MIN_ZOOM,
MAX_ZOOM,
TILE_SIZE,
Champlain.MapProjection.MAP_PROJECTION_MERCATOR,
"https://a.tiles.mapbox.com/v4/mapbox.streets/#Z#/#X#/#Y#.png?access_token=" + ACCESS_TOKEN,
Champlain.ImageRenderer())
tile_size = tile_source.get_tile_size()
error_source = factory.create_error_source(tile_size)
file_cache = Champlain.FileCache.new_full(CACHE_SIZE, None, Champlain.ImageRenderer())
memory_cache = Champlain.MemoryCache.new_full(MEMORY_CACHE_SIZE, Champlain.ImageRenderer())
source_chain = Champlain.MapSourceChain()
# tile is retrieved in this order:
# memory_cache -> file_cache -> tile_source -> error_source
# the first source that contains the tile returns it
source_chain.push(error_source)
source_chain.push(tile_source)
source_chain.push(file_cache)
source_chain.push(memory_cache)
return source_chain
GtkClutter.init([])
window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
window.connect("destroy", Gtk.main_quit)
widget = GtkChamplain.Embed()
widget.set_size_request(640, 480)
view = widget.get_view()
view.set_map_source(create_cached_source())
window.add(widget)
window.show_all()
Gtk.main()
|
|
8ba9f82373ef0995bd3a18d6bc0153a2f7e71bcb
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='pybindgen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='PyBindGen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
Revert back to PyBindGen as package name because it's what is already registered in PyPI.
|
Revert back to PyBindGen as package name because it's what is already registered in PyPI.
|
Python
|
lgpl-2.1
|
caramucho/pybindgen,caramucho/pybindgen,caramucho/pybindgen,cawka/pybindgen,cawka/pybindgen,cawka/pybindgen,cawka/pybindgen,caramucho/pybindgen
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='pybindgen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
Revert back to PyBindGen as package name because it's what is already registered in PyPI.
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='PyBindGen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
<commit_before>#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='pybindgen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
<commit_msg>Revert back to PyBindGen as package name because it's what is already registered in PyPI.<commit_after>
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='PyBindGen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='pybindgen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
Revert back to PyBindGen as package name because it's what is already registered in PyPI.#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='PyBindGen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
<commit_before>#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='pybindgen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
<commit_msg>Revert back to PyBindGen as package name because it's what is already registered in PyPI.<commit_after>#!/usr/bin/env python
import sys
import os
from distutils.core import setup
sys.path.insert(0, os.path.dirname(__file__))
from wutils import get_version, generate_version_py
generate_version_py(force=False)
setup(name='PyBindGen',
version=get_version(),
description='Python Bindings Generator',
author='Gustavo Carneiro',
author_email='gjcarneiro@gmail.com',
url='https://launchpad.net/pybindgen',
packages=['pybindgen', 'pybindgen.typehandlers', 'pybindgen.typehandlers.ctypeparser'],
)
|
e8339d89a3c69938ddd8ddb7c775edc56c8524ff
|
asyncpg/encodings.py
|
asyncpg/encodings.py
|
'''Map PostgreSQL encoding names to Python encoding names
https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE
'''
_map = {
'abc': 'cp1258',
'alt': 'cp866',
'euc_cn': 'euccn',
'euc_jp': 'eucjp',
'euc_kr': 'euckr',
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'shift_jis_2004': 'euc_jis_2004',
'sjis': 'shift_jis',
'sql_ascii': 'ascii',
'vscii': 'cp1258',
'tcvn': 'cp1258',
'tcvn5712': 'cp1258',
'unicode': 'utf_8',
'win': 'cp1521',
'win1250': 'cp1250',
'win1251': 'cp1251',
'win1252': 'cp1252',
'win1253': 'cp1253',
'win1254': 'cp1254',
'win1255': 'cp1255',
'win1256': 'cp1256',
'win1257': 'cp1257',
'win1258': 'cp1258',
'win866': 'cp866',
'win874': 'cp874',
'win932': 'cp932',
'win936': 'cp936',
'win949': 'cp949',
'win950': 'cp950',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows866': 'cp866',
'windows874': 'cp874',
'windows932': 'cp932',
'windows936': 'cp936',
'windows949': 'cp949',
'windows950': 'cp950',
}
def get_python_encoding(pg_encoding):
return _map.get(pg_encoding, pg_encoding)
|
Add a module to map Postgres encoding names to Python
|
Add a module to map Postgres encoding names to Python
|
Python
|
apache-2.0
|
MagicStack/asyncpg,MagicStack/asyncpg
|
Add a module to map Postgres encoding names to Python
|
'''Map PostgreSQL encoding names to Python encoding names
https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE
'''
_map = {
'abc': 'cp1258',
'alt': 'cp866',
'euc_cn': 'euccn',
'euc_jp': 'eucjp',
'euc_kr': 'euckr',
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'shift_jis_2004': 'euc_jis_2004',
'sjis': 'shift_jis',
'sql_ascii': 'ascii',
'vscii': 'cp1258',
'tcvn': 'cp1258',
'tcvn5712': 'cp1258',
'unicode': 'utf_8',
'win': 'cp1521',
'win1250': 'cp1250',
'win1251': 'cp1251',
'win1252': 'cp1252',
'win1253': 'cp1253',
'win1254': 'cp1254',
'win1255': 'cp1255',
'win1256': 'cp1256',
'win1257': 'cp1257',
'win1258': 'cp1258',
'win866': 'cp866',
'win874': 'cp874',
'win932': 'cp932',
'win936': 'cp936',
'win949': 'cp949',
'win950': 'cp950',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows866': 'cp866',
'windows874': 'cp874',
'windows932': 'cp932',
'windows936': 'cp936',
'windows949': 'cp949',
'windows950': 'cp950',
}
def get_python_encoding(pg_encoding):
return _map.get(pg_encoding, pg_encoding)
|
<commit_before><commit_msg>Add a module to map Postgres encoding names to Python<commit_after>
|
'''Map PostgreSQL encoding names to Python encoding names
https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE
'''
_map = {
'abc': 'cp1258',
'alt': 'cp866',
'euc_cn': 'euccn',
'euc_jp': 'eucjp',
'euc_kr': 'euckr',
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'shift_jis_2004': 'euc_jis_2004',
'sjis': 'shift_jis',
'sql_ascii': 'ascii',
'vscii': 'cp1258',
'tcvn': 'cp1258',
'tcvn5712': 'cp1258',
'unicode': 'utf_8',
'win': 'cp1521',
'win1250': 'cp1250',
'win1251': 'cp1251',
'win1252': 'cp1252',
'win1253': 'cp1253',
'win1254': 'cp1254',
'win1255': 'cp1255',
'win1256': 'cp1256',
'win1257': 'cp1257',
'win1258': 'cp1258',
'win866': 'cp866',
'win874': 'cp874',
'win932': 'cp932',
'win936': 'cp936',
'win949': 'cp949',
'win950': 'cp950',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows866': 'cp866',
'windows874': 'cp874',
'windows932': 'cp932',
'windows936': 'cp936',
'windows949': 'cp949',
'windows950': 'cp950',
}
def get_python_encoding(pg_encoding):
return _map.get(pg_encoding, pg_encoding)
|
Add a module to map Postgres encoding names to Python'''Map PostgreSQL encoding names to Python encoding names
https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE
'''
_map = {
'abc': 'cp1258',
'alt': 'cp866',
'euc_cn': 'euccn',
'euc_jp': 'eucjp',
'euc_kr': 'euckr',
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'shift_jis_2004': 'euc_jis_2004',
'sjis': 'shift_jis',
'sql_ascii': 'ascii',
'vscii': 'cp1258',
'tcvn': 'cp1258',
'tcvn5712': 'cp1258',
'unicode': 'utf_8',
'win': 'cp1521',
'win1250': 'cp1250',
'win1251': 'cp1251',
'win1252': 'cp1252',
'win1253': 'cp1253',
'win1254': 'cp1254',
'win1255': 'cp1255',
'win1256': 'cp1256',
'win1257': 'cp1257',
'win1258': 'cp1258',
'win866': 'cp866',
'win874': 'cp874',
'win932': 'cp932',
'win936': 'cp936',
'win949': 'cp949',
'win950': 'cp950',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows866': 'cp866',
'windows874': 'cp874',
'windows932': 'cp932',
'windows936': 'cp936',
'windows949': 'cp949',
'windows950': 'cp950',
}
def get_python_encoding(pg_encoding):
return _map.get(pg_encoding, pg_encoding)
|
<commit_before><commit_msg>Add a module to map Postgres encoding names to Python<commit_after>'''Map PostgreSQL encoding names to Python encoding names
https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE
'''
_map = {
'abc': 'cp1258',
'alt': 'cp866',
'euc_cn': 'euccn',
'euc_jp': 'eucjp',
'euc_kr': 'euckr',
'koi8r': 'koi8_r',
'koi8u': 'koi8_u',
'shift_jis_2004': 'euc_jis_2004',
'sjis': 'shift_jis',
'sql_ascii': 'ascii',
'vscii': 'cp1258',
'tcvn': 'cp1258',
'tcvn5712': 'cp1258',
'unicode': 'utf_8',
'win': 'cp1521',
'win1250': 'cp1250',
'win1251': 'cp1251',
'win1252': 'cp1252',
'win1253': 'cp1253',
'win1254': 'cp1254',
'win1255': 'cp1255',
'win1256': 'cp1256',
'win1257': 'cp1257',
'win1258': 'cp1258',
'win866': 'cp866',
'win874': 'cp874',
'win932': 'cp932',
'win936': 'cp936',
'win949': 'cp949',
'win950': 'cp950',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows866': 'cp866',
'windows874': 'cp874',
'windows932': 'cp932',
'windows936': 'cp936',
'windows949': 'cp949',
'windows950': 'cp950',
}
def get_python_encoding(pg_encoding):
return _map.get(pg_encoding, pg_encoding)
|
|
52409670ac2d8f6161cdf884465fa75454280d1a
|
examples/pi2.py
|
examples/pi2.py
|
#!/usr/bin/python
from __future__ import division
import random
import sys
import mrs
class SamplePi(mrs.MapReduce):
"""A less numerically-intensive version of the pi calculator."""
def map(self, key, value):
inside = 0
outside = 0
for _ in range(int(value)):
x = random.random() - 0.5
y = random.random() - 0.5
if x * x + y * y > 0.25:
outside += 1
else:
inside += 1
yield (str(True), str(inside))
yield (str(False), str(outside))
def reduce(self, key, values):
values = list(values)
yield str(sum(int(x) for x in values))
def run(self, job):
points = self.opts.num_points
tasks = self.opts.num_tasks
kvpairs = ((str(i * points), str(points)) for i in range(tasks))
source = job.local_data(kvpairs, splits=tasks)
intermediate = job.map_data(source, self.map)
source.close()
output = job.reduce_data(intermediate, self.reduce)
intermediate.close()
job.wait(output)
output.fetchall()
for key, value in output.data():
if key == 'True':
inside = int(value)
else:
outside = int(value)
pi = 4 * inside / (inside + outside)
print pi
sys.stdout.flush()
return 0
def update_parser(parser):
parser.add_option('-p', '--points',
dest='num_points', type='int',
help='Number of points for each map task',
default=1000)
parser.add_option('-t', '--tasks',
dest='num_tasks', type='int',
help='Number of map tasks to use',
default=40)
return parser
if __name__ == '__main__':
mrs.main(SamplePi, update_parser)
|
Add a pi example using random.random instead of the halton sequence
|
Add a pi example using random.random instead of the halton sequence
|
Python
|
apache-2.0
|
byu-aml-lab/mrs-mapreduce,kseppi/mrs-mapreduce
|
Add a pi example using random.random instead of the halton sequence
|
#!/usr/bin/python
from __future__ import division
import random
import sys
import mrs
class SamplePi(mrs.MapReduce):
"""A less numerically-intensive version of the pi calculator."""
def map(self, key, value):
inside = 0
outside = 0
for _ in range(int(value)):
x = random.random() - 0.5
y = random.random() - 0.5
if x * x + y * y > 0.25:
outside += 1
else:
inside += 1
yield (str(True), str(inside))
yield (str(False), str(outside))
def reduce(self, key, values):
values = list(values)
yield str(sum(int(x) for x in values))
def run(self, job):
points = self.opts.num_points
tasks = self.opts.num_tasks
kvpairs = ((str(i * points), str(points)) for i in range(tasks))
source = job.local_data(kvpairs, splits=tasks)
intermediate = job.map_data(source, self.map)
source.close()
output = job.reduce_data(intermediate, self.reduce)
intermediate.close()
job.wait(output)
output.fetchall()
for key, value in output.data():
if key == 'True':
inside = int(value)
else:
outside = int(value)
pi = 4 * inside / (inside + outside)
print pi
sys.stdout.flush()
return 0
def update_parser(parser):
parser.add_option('-p', '--points',
dest='num_points', type='int',
help='Number of points for each map task',
default=1000)
parser.add_option('-t', '--tasks',
dest='num_tasks', type='int',
help='Number of map tasks to use',
default=40)
return parser
if __name__ == '__main__':
mrs.main(SamplePi, update_parser)
|
<commit_before><commit_msg>Add a pi example using random.random instead of the halton sequence<commit_after>
|
#!/usr/bin/python
from __future__ import division
import random
import sys
import mrs
class SamplePi(mrs.MapReduce):
"""A less numerically-intensive version of the pi calculator."""
def map(self, key, value):
inside = 0
outside = 0
for _ in range(int(value)):
x = random.random() - 0.5
y = random.random() - 0.5
if x * x + y * y > 0.25:
outside += 1
else:
inside += 1
yield (str(True), str(inside))
yield (str(False), str(outside))
def reduce(self, key, values):
values = list(values)
yield str(sum(int(x) for x in values))
def run(self, job):
points = self.opts.num_points
tasks = self.opts.num_tasks
kvpairs = ((str(i * points), str(points)) for i in range(tasks))
source = job.local_data(kvpairs, splits=tasks)
intermediate = job.map_data(source, self.map)
source.close()
output = job.reduce_data(intermediate, self.reduce)
intermediate.close()
job.wait(output)
output.fetchall()
for key, value in output.data():
if key == 'True':
inside = int(value)
else:
outside = int(value)
pi = 4 * inside / (inside + outside)
print pi
sys.stdout.flush()
return 0
def update_parser(parser):
parser.add_option('-p', '--points',
dest='num_points', type='int',
help='Number of points for each map task',
default=1000)
parser.add_option('-t', '--tasks',
dest='num_tasks', type='int',
help='Number of map tasks to use',
default=40)
return parser
if __name__ == '__main__':
mrs.main(SamplePi, update_parser)
|
Add a pi example using random.random instead of the halton sequence#!/usr/bin/python
from __future__ import division
import random
import sys
import mrs
class SamplePi(mrs.MapReduce):
"""A less numerically-intensive version of the pi calculator."""
def map(self, key, value):
inside = 0
outside = 0
for _ in range(int(value)):
x = random.random() - 0.5
y = random.random() - 0.5
if x * x + y * y > 0.25:
outside += 1
else:
inside += 1
yield (str(True), str(inside))
yield (str(False), str(outside))
def reduce(self, key, values):
values = list(values)
yield str(sum(int(x) for x in values))
def run(self, job):
points = self.opts.num_points
tasks = self.opts.num_tasks
kvpairs = ((str(i * points), str(points)) for i in range(tasks))
source = job.local_data(kvpairs, splits=tasks)
intermediate = job.map_data(source, self.map)
source.close()
output = job.reduce_data(intermediate, self.reduce)
intermediate.close()
job.wait(output)
output.fetchall()
for key, value in output.data():
if key == 'True':
inside = int(value)
else:
outside = int(value)
pi = 4 * inside / (inside + outside)
print pi
sys.stdout.flush()
return 0
def update_parser(parser):
parser.add_option('-p', '--points',
dest='num_points', type='int',
help='Number of points for each map task',
default=1000)
parser.add_option('-t', '--tasks',
dest='num_tasks', type='int',
help='Number of map tasks to use',
default=40)
return parser
if __name__ == '__main__':
mrs.main(SamplePi, update_parser)
|
<commit_before><commit_msg>Add a pi example using random.random instead of the halton sequence<commit_after>#!/usr/bin/python
from __future__ import division
import random
import sys
import mrs
class SamplePi(mrs.MapReduce):
"""A less numerically-intensive version of the pi calculator."""
def map(self, key, value):
inside = 0
outside = 0
for _ in range(int(value)):
x = random.random() - 0.5
y = random.random() - 0.5
if x * x + y * y > 0.25:
outside += 1
else:
inside += 1
yield (str(True), str(inside))
yield (str(False), str(outside))
def reduce(self, key, values):
values = list(values)
yield str(sum(int(x) for x in values))
def run(self, job):
points = self.opts.num_points
tasks = self.opts.num_tasks
kvpairs = ((str(i * points), str(points)) for i in range(tasks))
source = job.local_data(kvpairs, splits=tasks)
intermediate = job.map_data(source, self.map)
source.close()
output = job.reduce_data(intermediate, self.reduce)
intermediate.close()
job.wait(output)
output.fetchall()
for key, value in output.data():
if key == 'True':
inside = int(value)
else:
outside = int(value)
pi = 4 * inside / (inside + outside)
print pi
sys.stdout.flush()
return 0
def update_parser(parser):
parser.add_option('-p', '--points',
dest='num_points', type='int',
help='Number of points for each map task',
default=1000)
parser.add_option('-t', '--tasks',
dest='num_tasks', type='int',
help='Number of map tasks to use',
default=40)
return parser
if __name__ == '__main__':
mrs.main(SamplePi, update_parser)
|
|
8f056d2f0138c3757f55c393a0b81c2959778190
|
apps/notifications/tests/test_templatetags.py
|
apps/notifications/tests/test_templatetags.py
|
"""
Test suite for the template tags of the notifications app.
"""
from django.test import TestCase
from django.template import (engines,
TemplateSyntaxError)
from django.contrib.auth import get_user_model
from ..models import Notification
class NotificationsViewsTestCase(TestCase):
"""
Test suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe',
password='illpassword',
email='john.doe@example.com')
self.user2 = get_user_model().objects.create_user(username='johnsmith',
password='illpassword',
email='john.smith@example.com')
self.notif1 = Notification.objects.create(recipient=self.user1,
title='Test 1',
message='Test 1',
message_html='<p>Test 1</p>')
self.notif2 = Notification.objects.create(recipient=self.user2,
title='Test 2',
message='Test 2',
message_html='<p>Test 2</p>')
self.notif3 = Notification.objects.create(recipient=self.user1,
title='Test 3',
message='Test 3',
message_html='<p>Test 3</p>',
unread=False)
def test_notifications_count_include_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an include tag.
"""
template_code = "{% load notifications %}{% notifications_count %}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('1', html)
def test_notifications_count_assignment_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an assignment tag.
"""
template_code = "{% load notifications %}{% notifications_count as foobar %}#{{ foobar }}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('#1', html)
def test_notifications_count_bad_argc(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments count.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count 1 2 3 4 %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
def test_notifications_count_bad_argv(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments placement.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count foo bar %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
|
Add tests suite for template tags of notifications app
|
Add tests suite for template tags of notifications app
|
Python
|
agpl-3.0
|
TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker,TamiaLab/carnetdumaker
|
Add tests suite for template tags of notifications app
|
"""
Test suite for the template tags of the notifications app.
"""
from django.test import TestCase
from django.template import (engines,
TemplateSyntaxError)
from django.contrib.auth import get_user_model
from ..models import Notification
class NotificationsViewsTestCase(TestCase):
"""
Test suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe',
password='illpassword',
email='john.doe@example.com')
self.user2 = get_user_model().objects.create_user(username='johnsmith',
password='illpassword',
email='john.smith@example.com')
self.notif1 = Notification.objects.create(recipient=self.user1,
title='Test 1',
message='Test 1',
message_html='<p>Test 1</p>')
self.notif2 = Notification.objects.create(recipient=self.user2,
title='Test 2',
message='Test 2',
message_html='<p>Test 2</p>')
self.notif3 = Notification.objects.create(recipient=self.user1,
title='Test 3',
message='Test 3',
message_html='<p>Test 3</p>',
unread=False)
def test_notifications_count_include_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an include tag.
"""
template_code = "{% load notifications %}{% notifications_count %}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('1', html)
def test_notifications_count_assignment_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an assignment tag.
"""
template_code = "{% load notifications %}{% notifications_count as foobar %}#{{ foobar }}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('#1', html)
def test_notifications_count_bad_argc(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments count.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count 1 2 3 4 %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
def test_notifications_count_bad_argv(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments placement.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count foo bar %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
|
<commit_before><commit_msg>Add tests suite for template tags of notifications app<commit_after>
|
"""
Test suite for the template tags of the notifications app.
"""
from django.test import TestCase
from django.template import (engines,
TemplateSyntaxError)
from django.contrib.auth import get_user_model
from ..models import Notification
class NotificationsViewsTestCase(TestCase):
"""
Test suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe',
password='illpassword',
email='john.doe@example.com')
self.user2 = get_user_model().objects.create_user(username='johnsmith',
password='illpassword',
email='john.smith@example.com')
self.notif1 = Notification.objects.create(recipient=self.user1,
title='Test 1',
message='Test 1',
message_html='<p>Test 1</p>')
self.notif2 = Notification.objects.create(recipient=self.user2,
title='Test 2',
message='Test 2',
message_html='<p>Test 2</p>')
self.notif3 = Notification.objects.create(recipient=self.user1,
title='Test 3',
message='Test 3',
message_html='<p>Test 3</p>',
unread=False)
def test_notifications_count_include_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an include tag.
"""
template_code = "{% load notifications %}{% notifications_count %}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('1', html)
def test_notifications_count_assignment_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an assignment tag.
"""
template_code = "{% load notifications %}{% notifications_count as foobar %}#{{ foobar }}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('#1', html)
def test_notifications_count_bad_argc(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments count.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count 1 2 3 4 %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
def test_notifications_count_bad_argv(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments placement.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count foo bar %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
|
Add tests suite for template tags of notifications app"""
Test suite for the template tags of the notifications app.
"""
from django.test import TestCase
from django.template import (engines,
TemplateSyntaxError)
from django.contrib.auth import get_user_model
from ..models import Notification
class NotificationsViewsTestCase(TestCase):
"""
Test suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe',
password='illpassword',
email='john.doe@example.com')
self.user2 = get_user_model().objects.create_user(username='johnsmith',
password='illpassword',
email='john.smith@example.com')
self.notif1 = Notification.objects.create(recipient=self.user1,
title='Test 1',
message='Test 1',
message_html='<p>Test 1</p>')
self.notif2 = Notification.objects.create(recipient=self.user2,
title='Test 2',
message='Test 2',
message_html='<p>Test 2</p>')
self.notif3 = Notification.objects.create(recipient=self.user1,
title='Test 3',
message='Test 3',
message_html='<p>Test 3</p>',
unread=False)
def test_notifications_count_include_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an include tag.
"""
template_code = "{% load notifications %}{% notifications_count %}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('1', html)
def test_notifications_count_assignment_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an assignment tag.
"""
template_code = "{% load notifications %}{% notifications_count as foobar %}#{{ foobar }}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('#1', html)
def test_notifications_count_bad_argc(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments count.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count 1 2 3 4 %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
def test_notifications_count_bad_argv(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments placement.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count foo bar %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
|
<commit_before><commit_msg>Add tests suite for template tags of notifications app<commit_after>"""
Test suite for the template tags of the notifications app.
"""
from django.test import TestCase
from django.template import (engines,
TemplateSyntaxError)
from django.contrib.auth import get_user_model
from ..models import Notification
class NotificationsViewsTestCase(TestCase):
"""
Test suite for the views.
"""
def setUp(self):
"""
Create a new user named "johndoe" with password "illpassword".
"""
self.user1 = get_user_model().objects.create_user(username='johndoe',
password='illpassword',
email='john.doe@example.com')
self.user2 = get_user_model().objects.create_user(username='johnsmith',
password='illpassword',
email='john.smith@example.com')
self.notif1 = Notification.objects.create(recipient=self.user1,
title='Test 1',
message='Test 1',
message_html='<p>Test 1</p>')
self.notif2 = Notification.objects.create(recipient=self.user2,
title='Test 2',
message='Test 2',
message_html='<p>Test 2</p>')
self.notif3 = Notification.objects.create(recipient=self.user1,
title='Test 3',
message='Test 3',
message_html='<p>Test 3</p>',
unread=False)
def test_notifications_count_include_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an include tag.
"""
template_code = "{% load notifications %}{% notifications_count %}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('1', html)
def test_notifications_count_assignment_tag(self):
"""
Test if the ``notifications_count`` template tag work when used as an assignment tag.
"""
template_code = "{% load notifications %}{% notifications_count as foobar %}#{{ foobar }}"
template = engines['django'].from_string(template_code)
html = template.render({'user': self.user1})
self.assertEqual('#1', html)
def test_notifications_count_bad_argc(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments count.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count 1 2 3 4 %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
def test_notifications_count_bad_argv(self):
"""
Test if the ``notifications_count`` template tag raise error on bad arguments placement.
"""
with self.assertRaises(TemplateSyntaxError):
template_code = "{% load notifications %}{% notifications_count foo bar %}"
template = engines['django'].from_string(template_code)
template.render({'user': self.user1})
|
|
cff10711c9c613fad39e7a0eec5c37c3c17a7b94
|
zip2wd_mp/demographics2input.py
|
zip2wd_mp/demographics2input.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import argparse
def parse_command_line():
"""Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Demographics data to Inputs")
parser.add_argument('input', type=str, help='Demographics CSV file')
parser.add_argument("-o", "--output", type=str, default="inputs.csv",
help="ZIP2WD CSV input file (default: 'inputs.csv')")
parser.add_argument('year', type=int, help='Year to be query')
return parser.parse_args()
if __name__ == "__main__":
args = parse_command_line()
zipcode = set()
with open(args.input) as f:
reader = csv.DictReader(f)
for r in reader:
zipcode.add(r['zip_code'])
with open(args.output, 'wb') as o:
writer = csv.DictWriter(o, fieldnames=['uniqid', 'zip',
'from.year', 'from.month',
'from.day', 'to.year',
'to.month', 'to.day'])
writer.writeheader()
for i, z in enumerate(zipcode):
r = {'uniqid': i,
'zip': z,
'from.year': args.year,
'from.month': '01',
'from.day': '01',
'to.year': args.year,
'to.month': '12',
'to.day': '31'}
writer.writerow(r)
print("Total unique zip code = {:d}".format(i))
|
Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.
|
Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.
|
Python
|
mit
|
soodoku/get-weather-data,soodoku/get-weather-data
|
Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import argparse
def parse_command_line():
"""Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Demographics data to Inputs")
parser.add_argument('input', type=str, help='Demographics CSV file')
parser.add_argument("-o", "--output", type=str, default="inputs.csv",
help="ZIP2WD CSV input file (default: 'inputs.csv')")
parser.add_argument('year', type=int, help='Year to be query')
return parser.parse_args()
if __name__ == "__main__":
args = parse_command_line()
zipcode = set()
with open(args.input) as f:
reader = csv.DictReader(f)
for r in reader:
zipcode.add(r['zip_code'])
with open(args.output, 'wb') as o:
writer = csv.DictWriter(o, fieldnames=['uniqid', 'zip',
'from.year', 'from.month',
'from.day', 'to.year',
'to.month', 'to.day'])
writer.writeheader()
for i, z in enumerate(zipcode):
r = {'uniqid': i,
'zip': z,
'from.year': args.year,
'from.month': '01',
'from.day': '01',
'to.year': args.year,
'to.month': '12',
'to.day': '31'}
writer.writerow(r)
print("Total unique zip code = {:d}".format(i))
|
<commit_before><commit_msg>Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import argparse
def parse_command_line():
"""Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Demographics data to Inputs")
parser.add_argument('input', type=str, help='Demographics CSV file')
parser.add_argument("-o", "--output", type=str, default="inputs.csv",
help="ZIP2WD CSV input file (default: 'inputs.csv')")
parser.add_argument('year', type=int, help='Year to be query')
return parser.parse_args()
if __name__ == "__main__":
args = parse_command_line()
zipcode = set()
with open(args.input) as f:
reader = csv.DictReader(f)
for r in reader:
zipcode.add(r['zip_code'])
with open(args.output, 'wb') as o:
writer = csv.DictWriter(o, fieldnames=['uniqid', 'zip',
'from.year', 'from.month',
'from.day', 'to.year',
'to.month', 'to.day'])
writer.writeheader()
for i, z in enumerate(zipcode):
r = {'uniqid': i,
'zip': z,
'from.year': args.year,
'from.month': '01',
'from.day': '01',
'to.year': args.year,
'to.month': '12',
'to.day': '31'}
writer.writerow(r)
print("Total unique zip code = {:d}".format(i))
|
Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import argparse
def parse_command_line():
"""Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Demographics data to Inputs")
parser.add_argument('input', type=str, help='Demographics CSV file')
parser.add_argument("-o", "--output", type=str, default="inputs.csv",
help="ZIP2WD CSV input file (default: 'inputs.csv')")
parser.add_argument('year', type=int, help='Year to be query')
return parser.parse_args()
if __name__ == "__main__":
args = parse_command_line()
zipcode = set()
with open(args.input) as f:
reader = csv.DictReader(f)
for r in reader:
zipcode.add(r['zip_code'])
with open(args.output, 'wb') as o:
writer = csv.DictWriter(o, fieldnames=['uniqid', 'zip',
'from.year', 'from.month',
'from.day', 'to.year',
'to.month', 'to.day'])
writer.writeheader()
for i, z in enumerate(zipcode):
r = {'uniqid': i,
'zip': z,
'from.year': args.year,
'from.month': '01',
'from.day': '01',
'to.year': args.year,
'to.month': '12',
'to.day': '31'}
writer.writerow(r)
print("Total unique zip code = {:d}".format(i))
|
<commit_before><commit_msg>Add a simple script to get unique zip codes from demographics file and save as zip2wd inputs.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import argparse
def parse_command_line():
"""Parse command line arguments
"""
parser = argparse.ArgumentParser(description="Demographics data to Inputs")
parser.add_argument('input', type=str, help='Demographics CSV file')
parser.add_argument("-o", "--output", type=str, default="inputs.csv",
help="ZIP2WD CSV input file (default: 'inputs.csv')")
parser.add_argument('year', type=int, help='Year to be query')
return parser.parse_args()
if __name__ == "__main__":
args = parse_command_line()
zipcode = set()
with open(args.input) as f:
reader = csv.DictReader(f)
for r in reader:
zipcode.add(r['zip_code'])
with open(args.output, 'wb') as o:
writer = csv.DictWriter(o, fieldnames=['uniqid', 'zip',
'from.year', 'from.month',
'from.day', 'to.year',
'to.month', 'to.day'])
writer.writeheader()
for i, z in enumerate(zipcode):
r = {'uniqid': i,
'zip': z,
'from.year': args.year,
'from.month': '01',
'from.day': '01',
'to.year': args.year,
'to.month': '12',
'to.day': '31'}
writer.writerow(r)
print("Total unique zip code = {:d}".format(i))
|
|
46f61c87b8afe586cacc12a91d3f94c89253365f
|
functional/tests/compute/v2/test_security_group.py
|
functional/tests/compute/v2/test_security_group.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class SecurityGroupTests(test.TestCase):
"""Functional tests for security group. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group set --name ' +
cls.OTHER_NAME + ' ' + cls.NAME + opts)
cls.assertOutput(cls.OTHER_NAME + "\n", raw_output)
# Delete test
raw_output = cls.openstack('security group delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_security_group_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('security group list' + opts)
self.assertIn(self.NAME, raw_output)
def test_security_group_set(self):
opts = self.get_show_opts(['description', 'name'])
raw_output = self.openstack('security group set --description NSA ' +
self.NAME + opts)
self.assertEqual("NSA\n" + self.NAME + "\n", raw_output)
def test_security_group_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('security group show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
Add functional tests for security group CRUD
|
Add functional tests for security group CRUD
Change-Id: Ib5bbd46c0454d7dbb541354d515430922569c994
|
Python
|
apache-2.0
|
dtroyer/python-openstackclient,openstack/python-openstackclient,redhat-openstack/python-openstackclient,BjoernT/python-openstackclient,redhat-openstack/python-openstackclient,dtroyer/python-openstackclient,BjoernT/python-openstackclient,openstack/python-openstackclient
|
Add functional tests for security group CRUD
Change-Id: Ib5bbd46c0454d7dbb541354d515430922569c994
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class SecurityGroupTests(test.TestCase):
"""Functional tests for security group. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group set --name ' +
cls.OTHER_NAME + ' ' + cls.NAME + opts)
cls.assertOutput(cls.OTHER_NAME + "\n", raw_output)
# Delete test
raw_output = cls.openstack('security group delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_security_group_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('security group list' + opts)
self.assertIn(self.NAME, raw_output)
def test_security_group_set(self):
opts = self.get_show_opts(['description', 'name'])
raw_output = self.openstack('security group set --description NSA ' +
self.NAME + opts)
self.assertEqual("NSA\n" + self.NAME + "\n", raw_output)
def test_security_group_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('security group show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for security group CRUD
Change-Id: Ib5bbd46c0454d7dbb541354d515430922569c994<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class SecurityGroupTests(test.TestCase):
"""Functional tests for security group. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group set --name ' +
cls.OTHER_NAME + ' ' + cls.NAME + opts)
cls.assertOutput(cls.OTHER_NAME + "\n", raw_output)
# Delete test
raw_output = cls.openstack('security group delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_security_group_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('security group list' + opts)
self.assertIn(self.NAME, raw_output)
def test_security_group_set(self):
opts = self.get_show_opts(['description', 'name'])
raw_output = self.openstack('security group set --description NSA ' +
self.NAME + opts)
self.assertEqual("NSA\n" + self.NAME + "\n", raw_output)
def test_security_group_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('security group show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
Add functional tests for security group CRUD
Change-Id: Ib5bbd46c0454d7dbb541354d515430922569c994# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class SecurityGroupTests(test.TestCase):
"""Functional tests for security group. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group set --name ' +
cls.OTHER_NAME + ' ' + cls.NAME + opts)
cls.assertOutput(cls.OTHER_NAME + "\n", raw_output)
# Delete test
raw_output = cls.openstack('security group delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_security_group_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('security group list' + opts)
self.assertIn(self.NAME, raw_output)
def test_security_group_set(self):
opts = self.get_show_opts(['description', 'name'])
raw_output = self.openstack('security group set --description NSA ' +
self.NAME + opts)
self.assertEqual("NSA\n" + self.NAME + "\n", raw_output)
def test_security_group_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('security group show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for security group CRUD
Change-Id: Ib5bbd46c0454d7dbb541354d515430922569c994<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class SecurityGroupTests(test.TestCase):
"""Functional tests for security group. """
NAME = uuid.uuid4().hex
OTHER_NAME = uuid.uuid4().hex
HEADERS = ['Name']
FIELDS = ['name']
@classmethod
def setUpClass(cls):
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group create ' + cls.NAME + opts)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
# Rename test
opts = cls.get_show_opts(cls.FIELDS)
raw_output = cls.openstack('security group set --name ' +
cls.OTHER_NAME + ' ' + cls.NAME + opts)
cls.assertOutput(cls.OTHER_NAME + "\n", raw_output)
# Delete test
raw_output = cls.openstack('security group delete ' + cls.OTHER_NAME)
cls.assertOutput('', raw_output)
def test_security_group_list(self):
opts = self.get_list_opts(self.HEADERS)
raw_output = self.openstack('security group list' + opts)
self.assertIn(self.NAME, raw_output)
def test_security_group_set(self):
opts = self.get_show_opts(['description', 'name'])
raw_output = self.openstack('security group set --description NSA ' +
self.NAME + opts)
self.assertEqual("NSA\n" + self.NAME + "\n", raw_output)
def test_security_group_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack('security group show ' + self.NAME + opts)
self.assertEqual(self.NAME + "\n", raw_output)
|
|
b46b8cd1a6f31947ccc3a58722147d5d4b7d97e7
|
test_slashes.py
|
test_slashes.py
|
import os
import unittest
import stack
class TestSlashConvert(unittest.TestCase):
def test_relative(self):
# start with unix-style
source = "test/sub \n dir/file.txt"
# convert to OS-specific separators; this is a no-op on unix-like systems
os_slash = os.path.normpath(source)
# check that the convert to unix-style gets us back to the start
unix_slash = stack._to_unix_slashes(os_slash)
self.assertEquals(unix_slash, source)
if __name__ == '__main__':
unittest.main()
|
Add unit test for path conversion.
|
Add unit test for path conversion.
|
Python
|
mit
|
bbinet/pillarstack
|
Add unit test for path conversion.
|
import os
import unittest
import stack
class TestSlashConvert(unittest.TestCase):
def test_relative(self):
# start with unix-style
source = "test/sub \n dir/file.txt"
# convert to OS-specific separators; this is a no-op on unix-like systems
os_slash = os.path.normpath(source)
# check that the convert to unix-style gets us back to the start
unix_slash = stack._to_unix_slashes(os_slash)
self.assertEquals(unix_slash, source)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for path conversion.<commit_after>
|
import os
import unittest
import stack
class TestSlashConvert(unittest.TestCase):
def test_relative(self):
# start with unix-style
source = "test/sub \n dir/file.txt"
# convert to OS-specific separators; this is a no-op on unix-like systems
os_slash = os.path.normpath(source)
# check that the convert to unix-style gets us back to the start
unix_slash = stack._to_unix_slashes(os_slash)
self.assertEquals(unix_slash, source)
if __name__ == '__main__':
unittest.main()
|
Add unit test for path conversion.import os
import unittest
import stack
class TestSlashConvert(unittest.TestCase):
def test_relative(self):
# start with unix-style
source = "test/sub \n dir/file.txt"
# convert to OS-specific separators; this is a no-op on unix-like systems
os_slash = os.path.normpath(source)
# check that the convert to unix-style gets us back to the start
unix_slash = stack._to_unix_slashes(os_slash)
self.assertEquals(unix_slash, source)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for path conversion.<commit_after>import os
import unittest
import stack
class TestSlashConvert(unittest.TestCase):
def test_relative(self):
# start with unix-style
source = "test/sub \n dir/file.txt"
# convert to OS-specific separators; this is a no-op on unix-like systems
os_slash = os.path.normpath(source)
# check that the convert to unix-style gets us back to the start
unix_slash = stack._to_unix_slashes(os_slash)
self.assertEquals(unix_slash, source)
if __name__ == '__main__':
unittest.main()
|
|
8f7fe11bc02c7cadf6f531c064a560968dc2b003
|
readthedocs/core/management/commands/set_metadata.py
|
readthedocs/core/management/commands/set_metadata.py
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import Project
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s" % p)
try:
tasks.update_static_metadata(p.pk)
except Exception:
log.error('Build failed for %s' % p, exc_info=True)
|
Add command to generate metadata
|
Add command to generate metadata
|
Python
|
mit
|
kenshinthebattosai/readthedocs.org,clarkperkins/readthedocs.org,tddv/readthedocs.org,d0ugal/readthedocs.org,Tazer/readthedocs.org,kenwang76/readthedocs.org,emawind84/readthedocs.org,sils1297/readthedocs.org,fujita-shintaro/readthedocs.org,stevepiercy/readthedocs.org,espdev/readthedocs.org,emawind84/readthedocs.org,LukasBoersma/readthedocs.org,hach-que/readthedocs.org,agjohnson/readthedocs.org,VishvajitP/readthedocs.org,sid-kap/readthedocs.org,wijerasa/readthedocs.org,emawind84/readthedocs.org,espdev/readthedocs.org,d0ugal/readthedocs.org,agjohnson/readthedocs.org,nikolas/readthedocs.org,attakei/readthedocs-oauth,mhils/readthedocs.org,fujita-shintaro/readthedocs.org,safwanrahman/readthedocs.org,nikolas/readthedocs.org,tddv/readthedocs.org,davidfischer/readthedocs.org,gjtorikian/readthedocs.org,kdkeyser/readthedocs.org,istresearch/readthedocs.org,singingwolfboy/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,takluyver/readthedocs.org,hach-que/readthedocs.org,titiushko/readthedocs.org,dirn/readthedocs.org,d0ugal/readthedocs.org,LukasBoersma/readthedocs.org,raven47git/readthedocs.org,techtonik/readthedocs.org,stevepiercy/readthedocs.org,soulshake/readthedocs.org,kenwang76/readthedocs.org,istresearch/readthedocs.org,GovReady/readthedocs.org,wijerasa/readthedocs.org,Carreau/readthedocs.org,singingwolfboy/readthedocs.org,jerel/readthedocs.org,pombredanne/readthedocs.org,mhils/readthedocs.org,KamranMackey/readthedocs.org,mrshoki/readthedocs.org,espdev/readthedocs.org,Tazer/readthedocs.org,royalwang/readthedocs.org,dirn/readthedocs.org,mhils/readthedocs.org,nikolas/readthedocs.org,wanghaven/readthedocs.org,gjtorikian/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,michaelmcandrew/readthedocs.org,istresearch/readthedocs.org,SteveViss/readthedocs.org,asampat3090/readthedocs.org,sunnyzwh/readthedocs.org,GovReady/readthedocs.org,sid-kap/readthedocs.org,kenwang76/readthedocs.org,jerel/readthedocs.org,KamranMackey/readthedocs.org,Tazer/readthedocs.org,davidfischer/readthedocs.org,kenshinthebattosai/readthedocs.org,attakei/readthedocs-oauth,kenshinthebattosai/readthedocs.org,wanghaven/readthedocs.org,rtfd/readthedocs.org,asampat3090/readthedocs.org,pombredanne/readthedocs.org,cgourlay/readthedocs.org,d0ugal/readthedocs.org,jerel/readthedocs.org,rtfd/readthedocs.org,CedarLogic/readthedocs.org,raven47git/readthedocs.org,michaelmcandrew/readthedocs.org,takluyver/readthedocs.org,techtonik/readthedocs.org,laplaceliu/readthedocs.org,mhils/readthedocs.org,sid-kap/readthedocs.org,agjohnson/readthedocs.org,singingwolfboy/readthedocs.org,safwanrahman/readthedocs.org,fujita-shintaro/readthedocs.org,titiushko/readthedocs.org,attakei/readthedocs-oauth,nikolas/readthedocs.org,clarkperkins/readthedocs.org,sils1297/readthedocs.org,istresearch/readthedocs.org,sunnyzwh/readthedocs.org,stevepiercy/readthedocs.org,pombredanne/readthedocs.org,clarkperkins/readthedocs.org,SteveViss/readthedocs.org,KamranMackey/readthedocs.org,rtfd/readthedocs.org,laplaceliu/readthedocs.org,kdkeyser/readthedocs.org,emawind84/readthedocs.org,michaelmcandrew/readthedocs.org,safwanrahman/readthedocs.org,attakei/readthedocs-oauth,davidfischer/readthedocs.org,VishvajitP/readthedocs.org,Tazer/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,wanghaven/readthedocs.org,espdev/readthedocs.org,fujita-shintaro/readthedocs.org,SteveViss/readthedocs.org,hach-que/readthedocs.org,sils1297/readthedocs.org,cgourlay/readthedocs.org,mrshoki/readthedocs.org,gjtorikian/readthedocs.org,LukasBoersma/readthedocs.org,techtonik/readthedocs.org,cgourlay/readthedocs.org,SteveViss/readthedocs.org,laplaceliu/readthedocs.org,dirn/readthedocs.org,titiushko/readthedocs.org,kenwang76/readthedocs.org,rtfd/readthedocs.org,tddv/readthedocs.org,jerel/readthedocs.org,CedarLogic/readthedocs.org,asampat3090/readthedocs.org,kenshinthebattosai/readthedocs.org,KamranMackey/readthedocs.org,atsuyim/readthedocs.org,GovReady/readthedocs.org,wijerasa/readthedocs.org,techtonik/readthedocs.org,mrshoki/readthedocs.org,atsuyim/readthedocs.org,sunnyzwh/readthedocs.org,cgourlay/readthedocs.org,LukasBoersma/readthedocs.org,sunnyzwh/readthedocs.org,GovReady/readthedocs.org,mrshoki/readthedocs.org,sid-kap/readthedocs.org,titiushko/readthedocs.org,Carreau/readthedocs.org,takluyver/readthedocs.org,Carreau/readthedocs.org,takluyver/readthedocs.org,CedarLogic/readthedocs.org,laplaceliu/readthedocs.org,soulshake/readthedocs.org,espdev/readthedocs.org,davidfischer/readthedocs.org,soulshake/readthedocs.org,royalwang/readthedocs.org,kdkeyser/readthedocs.org,agjohnson/readthedocs.org,safwanrahman/readthedocs.org,wijerasa/readthedocs.org,atsuyim/readthedocs.org,hach-que/readthedocs.org,royalwang/readthedocs.org,gjtorikian/readthedocs.org,dirn/readthedocs.org,soulshake/readthedocs.org,michaelmcandrew/readthedocs.org,asampat3090/readthedocs.org,atsuyim/readthedocs.org,kdkeyser/readthedocs.org,royalwang/readthedocs.org,sils1297/readthedocs.org,stevepiercy/readthedocs.org,singingwolfboy/readthedocs.org,Carreau/readthedocs.org,wanghaven/readthedocs.org
|
Add command to generate metadata
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import Project
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s" % p)
try:
tasks.update_static_metadata(p.pk)
except Exception:
log.error('Build failed for %s' % p, exc_info=True)
|
<commit_before><commit_msg>Add command to generate metadata<commit_after>
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import Project
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s" % p)
try:
tasks.update_static_metadata(p.pk)
except Exception:
log.error('Build failed for %s' % p, exc_info=True)
|
Add command to generate metadataimport logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import Project
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s" % p)
try:
tasks.update_static_metadata(p.pk)
except Exception:
log.error('Build failed for %s' % p, exc_info=True)
|
<commit_before><commit_msg>Add command to generate metadata<commit_after>import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from projects import tasks
from projects.models import Project
log = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
queryset = Project.objects.all()
for p in queryset:
log.info("Generating metadata for %s" % p)
try:
tasks.update_static_metadata(p.pk)
except Exception:
log.error('Build failed for %s' % p, exc_info=True)
|
|
b9dac640d1e715d33354029ebf46ad3387f22bfe
|
CodeFights/mergingVines.py
|
CodeFights/mergingVines.py
|
#!/usr/local/bin/python
# Code Fights Merging Vines Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def mergingVines(vines, n):
def nTimes(n):
pass
@nTimes(n)
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
return sumOnce(vines)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights merging vines problem
|
Set up Code Fights merging vines problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights merging vines problem
|
#!/usr/local/bin/python
# Code Fights Merging Vines Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def mergingVines(vines, n):
def nTimes(n):
pass
@nTimes(n)
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
return sumOnce(vines)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights merging vines problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Merging Vines Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def mergingVines(vines, n):
def nTimes(n):
pass
@nTimes(n)
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
return sumOnce(vines)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights merging vines problem#!/usr/local/bin/python
# Code Fights Merging Vines Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def mergingVines(vines, n):
def nTimes(n):
pass
@nTimes(n)
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
return sumOnce(vines)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights merging vines problem<commit_after>#!/usr/local/bin/python
# Code Fights Merging Vines Problem
from functools import reduce
import math
def compose(functions):
return reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)
def functionsComposition(functions, x):
return compose(map(eval, functions))(x)
def mergingVines(vines, n):
def nTimes(n):
pass
@nTimes(n)
def sumOnce(vines):
res = [vines[i] + vines[i + 1] for i in range(0, len(vines) - 1, 2)]
if len(vines) % 2 == 1:
res.append(vines[-1])
return res
return sumOnce(vines)
def main():
tests = [
[["abs", "math.sin", "lambda x: 3 * x / 2"], math.pi, 1],
[["math.sin", "math.cos", "lambda x: x * 2", "lambda x: x ** 2"],
1, math.sin(math.cos((1**2) * 2))],
[["lambda z: z", "lambda z: 1.0 * z / 13"], -1000, (-1000 / 13) * 1.0],
[["float"], 1000, 1000],
[["abs"], -20, 20]
]
for t in tests:
res = functionsComposition(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: functionsComposition({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: functionsComposition({}, {}) returned {},"
"answer: {}".format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
52d45a55c61c229f9d99b3f437b76a363a34b481
|
i8c/tests/test_externals.py
|
i8c/tests/test_externals.py
|
from i8c.tests import TestCase
SOURCE = """\
define test::externals_test returns int
extern ptr a_symbol
extern func int, int (ptr) a_function
call
return
"""
class TestExternals(TestCase):
def test_externals(self):
"""Check that externals work."""
tree, output = self.compile(SOURCE)
self.assertEqual(["GNU_i8call"], output.operations)
|
Add a basic check for externals
|
Add a basic check for externals
|
Python
|
lgpl-2.1
|
gbenson/i8c
|
Add a basic check for externals
|
from i8c.tests import TestCase
SOURCE = """\
define test::externals_test returns int
extern ptr a_symbol
extern func int, int (ptr) a_function
call
return
"""
class TestExternals(TestCase):
def test_externals(self):
"""Check that externals work."""
tree, output = self.compile(SOURCE)
self.assertEqual(["GNU_i8call"], output.operations)
|
<commit_before><commit_msg>Add a basic check for externals<commit_after>
|
from i8c.tests import TestCase
SOURCE = """\
define test::externals_test returns int
extern ptr a_symbol
extern func int, int (ptr) a_function
call
return
"""
class TestExternals(TestCase):
def test_externals(self):
"""Check that externals work."""
tree, output = self.compile(SOURCE)
self.assertEqual(["GNU_i8call"], output.operations)
|
Add a basic check for externalsfrom i8c.tests import TestCase
SOURCE = """\
define test::externals_test returns int
extern ptr a_symbol
extern func int, int (ptr) a_function
call
return
"""
class TestExternals(TestCase):
def test_externals(self):
"""Check that externals work."""
tree, output = self.compile(SOURCE)
self.assertEqual(["GNU_i8call"], output.operations)
|
<commit_before><commit_msg>Add a basic check for externals<commit_after>from i8c.tests import TestCase
SOURCE = """\
define test::externals_test returns int
extern ptr a_symbol
extern func int, int (ptr) a_function
call
return
"""
class TestExternals(TestCase):
def test_externals(self):
"""Check that externals work."""
tree, output = self.compile(SOURCE)
self.assertEqual(["GNU_i8call"], output.operations)
|
|
281695fed73f9aa9eee0a1007388cc5ae4768bc8
|
examples/translations/spanish_test_1.py
|
examples/translations/spanish_test_1.py
|
# Spanish Language Test - Python 3 Only!
from seleniumbase.translate.spanish import CasoDePrueba
class MiClaseDePrueba(CasoDePrueba):
def test_ejemplo_1(self):
self.abrir_url("https://es.wikipedia.org/wiki/")
self.verificar_texto("Wikipedia")
self.verificar_elemento('[title="Visitar la página principal"]')
self.actualizar_texto("#searchInput", "Parc d'Atraccions Tibidabo")
self.haga_clic("#searchButton")
self.verificar_texto("Tibidabo", "#firstHeading")
self.verificar_elemento('img[alt*="Tibidabo"]')
self.actualizar_texto("#searchInput", "Palma de Mallorca")
self.haga_clic("#searchButton")
self.verificar_texto("Palma de Mallorca", "#firstHeading")
self.verificar_elemento('img[alt*="Palma"]')
self.volver()
self.verificar_verdad("Tibidabo" in self.obtener_url_actual())
self.adelante()
self.verificar_verdad("Mallorca" in self.obtener_url_actual())
|
Add an example test in Spanish
|
Add an example test in Spanish
|
Python
|
mit
|
mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add an example test in Spanish
|
# Spanish Language Test - Python 3 Only!
from seleniumbase.translate.spanish import CasoDePrueba
class MiClaseDePrueba(CasoDePrueba):
def test_ejemplo_1(self):
self.abrir_url("https://es.wikipedia.org/wiki/")
self.verificar_texto("Wikipedia")
self.verificar_elemento('[title="Visitar la página principal"]')
self.actualizar_texto("#searchInput", "Parc d'Atraccions Tibidabo")
self.haga_clic("#searchButton")
self.verificar_texto("Tibidabo", "#firstHeading")
self.verificar_elemento('img[alt*="Tibidabo"]')
self.actualizar_texto("#searchInput", "Palma de Mallorca")
self.haga_clic("#searchButton")
self.verificar_texto("Palma de Mallorca", "#firstHeading")
self.verificar_elemento('img[alt*="Palma"]')
self.volver()
self.verificar_verdad("Tibidabo" in self.obtener_url_actual())
self.adelante()
self.verificar_verdad("Mallorca" in self.obtener_url_actual())
|
<commit_before><commit_msg>Add an example test in Spanish<commit_after>
|
# Spanish Language Test - Python 3 Only!
from seleniumbase.translate.spanish import CasoDePrueba
class MiClaseDePrueba(CasoDePrueba):
def test_ejemplo_1(self):
self.abrir_url("https://es.wikipedia.org/wiki/")
self.verificar_texto("Wikipedia")
self.verificar_elemento('[title="Visitar la página principal"]')
self.actualizar_texto("#searchInput", "Parc d'Atraccions Tibidabo")
self.haga_clic("#searchButton")
self.verificar_texto("Tibidabo", "#firstHeading")
self.verificar_elemento('img[alt*="Tibidabo"]')
self.actualizar_texto("#searchInput", "Palma de Mallorca")
self.haga_clic("#searchButton")
self.verificar_texto("Palma de Mallorca", "#firstHeading")
self.verificar_elemento('img[alt*="Palma"]')
self.volver()
self.verificar_verdad("Tibidabo" in self.obtener_url_actual())
self.adelante()
self.verificar_verdad("Mallorca" in self.obtener_url_actual())
|
Add an example test in Spanish# Spanish Language Test - Python 3 Only!
from seleniumbase.translate.spanish import CasoDePrueba
class MiClaseDePrueba(CasoDePrueba):
def test_ejemplo_1(self):
self.abrir_url("https://es.wikipedia.org/wiki/")
self.verificar_texto("Wikipedia")
self.verificar_elemento('[title="Visitar la página principal"]')
self.actualizar_texto("#searchInput", "Parc d'Atraccions Tibidabo")
self.haga_clic("#searchButton")
self.verificar_texto("Tibidabo", "#firstHeading")
self.verificar_elemento('img[alt*="Tibidabo"]')
self.actualizar_texto("#searchInput", "Palma de Mallorca")
self.haga_clic("#searchButton")
self.verificar_texto("Palma de Mallorca", "#firstHeading")
self.verificar_elemento('img[alt*="Palma"]')
self.volver()
self.verificar_verdad("Tibidabo" in self.obtener_url_actual())
self.adelante()
self.verificar_verdad("Mallorca" in self.obtener_url_actual())
|
<commit_before><commit_msg>Add an example test in Spanish<commit_after># Spanish Language Test - Python 3 Only!
from seleniumbase.translate.spanish import CasoDePrueba
class MiClaseDePrueba(CasoDePrueba):
def test_ejemplo_1(self):
self.abrir_url("https://es.wikipedia.org/wiki/")
self.verificar_texto("Wikipedia")
self.verificar_elemento('[title="Visitar la página principal"]')
self.actualizar_texto("#searchInput", "Parc d'Atraccions Tibidabo")
self.haga_clic("#searchButton")
self.verificar_texto("Tibidabo", "#firstHeading")
self.verificar_elemento('img[alt*="Tibidabo"]')
self.actualizar_texto("#searchInput", "Palma de Mallorca")
self.haga_clic("#searchButton")
self.verificar_texto("Palma de Mallorca", "#firstHeading")
self.verificar_elemento('img[alt*="Palma"]')
self.volver()
self.verificar_verdad("Tibidabo" in self.obtener_url_actual())
self.adelante()
self.verificar_verdad("Mallorca" in self.obtener_url_actual())
|
|
dcb63e9153091052f8bcb253885e2f124ca68f18
|
vtwt/block.py
|
vtwt/block.py
|
import sys
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
from jersey import log
from vtwt import cli
class BlockOptions(cli.Options):
def parseArgs(self, *names):
if not names:
raise usage.error("No one to block ;(")
self["blockees"] = names
class Blocker(cli.Command):
def execute(self):
return gatherResults(map(self._block, self.config["blockees"]))
@inlineCallbacks
def _block(self, user):
print "blocking: {0}".format(user)
yield self.vtwt.block(user)
print "{0} blocked {1}".format(self.config["user"], user)
class BlockLoader(cli.CommandFactory):
implements(IPlugin)
description = "Block the given user"
name = "block"
shortcut = "B"
options = BlockOptions
command = Blocker
loader = BlockLoader()
|
Add a flag to print debuggy logging info vtwt/cli.py
|
Add a flag to print debuggy logging info vtwt/cli.py
|
Python
|
bsd-3-clause
|
olix0r/vtwt
|
Add a flag to print debuggy logging info vtwt/cli.py
|
import sys
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
from jersey import log
from vtwt import cli
class BlockOptions(cli.Options):
def parseArgs(self, *names):
if not names:
raise usage.error("No one to block ;(")
self["blockees"] = names
class Blocker(cli.Command):
def execute(self):
return gatherResults(map(self._block, self.config["blockees"]))
@inlineCallbacks
def _block(self, user):
print "blocking: {0}".format(user)
yield self.vtwt.block(user)
print "{0} blocked {1}".format(self.config["user"], user)
class BlockLoader(cli.CommandFactory):
implements(IPlugin)
description = "Block the given user"
name = "block"
shortcut = "B"
options = BlockOptions
command = Blocker
loader = BlockLoader()
|
<commit_before><commit_msg>Add a flag to print debuggy logging info vtwt/cli.py<commit_after>
|
import sys
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
from jersey import log
from vtwt import cli
class BlockOptions(cli.Options):
def parseArgs(self, *names):
if not names:
raise usage.error("No one to block ;(")
self["blockees"] = names
class Blocker(cli.Command):
def execute(self):
return gatherResults(map(self._block, self.config["blockees"]))
@inlineCallbacks
def _block(self, user):
print "blocking: {0}".format(user)
yield self.vtwt.block(user)
print "{0} blocked {1}".format(self.config["user"], user)
class BlockLoader(cli.CommandFactory):
implements(IPlugin)
description = "Block the given user"
name = "block"
shortcut = "B"
options = BlockOptions
command = Blocker
loader = BlockLoader()
|
Add a flag to print debuggy logging info vtwt/cli.pyimport sys
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
from jersey import log
from vtwt import cli
class BlockOptions(cli.Options):
def parseArgs(self, *names):
if not names:
raise usage.error("No one to block ;(")
self["blockees"] = names
class Blocker(cli.Command):
def execute(self):
return gatherResults(map(self._block, self.config["blockees"]))
@inlineCallbacks
def _block(self, user):
print "blocking: {0}".format(user)
yield self.vtwt.block(user)
print "{0} blocked {1}".format(self.config["user"], user)
class BlockLoader(cli.CommandFactory):
implements(IPlugin)
description = "Block the given user"
name = "block"
shortcut = "B"
options = BlockOptions
command = Blocker
loader = BlockLoader()
|
<commit_before><commit_msg>Add a flag to print debuggy logging info vtwt/cli.py<commit_after>import sys
from twisted.internet.defer import inlineCallbacks, gatherResults
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
from jersey import log
from vtwt import cli
class BlockOptions(cli.Options):
def parseArgs(self, *names):
if not names:
raise usage.error("No one to block ;(")
self["blockees"] = names
class Blocker(cli.Command):
def execute(self):
return gatherResults(map(self._block, self.config["blockees"]))
@inlineCallbacks
def _block(self, user):
print "blocking: {0}".format(user)
yield self.vtwt.block(user)
print "{0} blocked {1}".format(self.config["user"], user)
class BlockLoader(cli.CommandFactory):
implements(IPlugin)
description = "Block the given user"
name = "block"
shortcut = "B"
options = BlockOptions
command = Blocker
loader = BlockLoader()
|
|
576dd4204bb546c1944723742d63687e91099e81
|
tests/app/test_metrics.py
|
tests/app/test_metrics.py
|
import re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetrics(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
|
Add standard tests for metrics
|
Add standard tests for metrics
|
Python
|
mit
|
alphagov/digitalmarketplace-search-api,alphagov/digitalmarketplace-search-api
|
Add standard tests for metrics
|
import re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetrics(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
|
<commit_before><commit_msg>Add standard tests for metrics<commit_after>
|
import re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetrics(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
|
Add standard tests for metricsimport re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetrics(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
|
<commit_before><commit_msg>Add standard tests for metrics<commit_after>import re
from tests.helpers import BaseApplicationTest
def load_prometheus_metrics(response_bytes):
return dict(re.findall(rb"(\w+{.+?}) (\d+)", response_bytes))
class TestMetrics(BaseApplicationTest):
def test_metrics_page_accessible(self):
metrics_response = self.client.get('/_metrics')
assert metrics_response.status_code == 200
def test_metrics_page_contents(self):
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
) in results
class TestMetricsPageRegistersPageViews(BaseApplicationTest):
def test_metrics_page_registers_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
assert expected_metric_name in results
def test_metrics_page_registers_multiple_page_views(self):
expected_metric_name = (
b'http_server_requests_total{code="200",host="localhost",method="GET",path="/"}'
)
initial_metrics_response = self.client.get('/_metrics')
initial_results = load_prometheus_metrics(initial_metrics_response.data)
initial_metric_value = int(initial_results.get(expected_metric_name, 0))
for _ in range(3):
res = self.client.get('/')
assert res.status_code == 200
metrics_response = self.client.get('/_metrics')
results = load_prometheus_metrics(metrics_response.data)
metric_value = int(results.get(expected_metric_name, 0))
assert expected_metric_name in results
assert metric_value - initial_metric_value == 3
|
|
97832925b88406434ba3e4e5c8f4b8870f303816
|
producerTrump.py
|
producerTrump.py
|
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from kafka import SimpleProducer, KafkaClient
from kafka import KafkaProducer
from operator import add
import sys
import json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars $SPARK_HOME/jars/spark-streaming-kafka-0-8-assembly_2.11.jar pyspark-shell'
def read_credentials():
file_name = "credentials.json"
try:
with open(file_name) as data_file:
return json.load(data_file)
except:
print ("Cannot load "+data_file)
return None
def producer1():
sc = SparkContext(appName="ProducerTrump")
ssc = StreamingContext(sc, 180)
kvs = KafkaUtils.createDirectStream(ssc, ["trump"], {"metadata.broker.list": "localhost:9092"})
kvs.foreachRDD(send)
producer.flush()
ssc.start()
ssc.awaitTermination()
def send(message):
iterator = twitter_stream.statuses.sample()
count=0
for tweet in iterator:
producer.send('trump', bytes(json.dumps(tweet, indent=6), "ascii"))
count+=1
print("Tweet sent")
if(count==20000):
break
if __name__ == "__main__":
print("Starting to read tweets")
credentials = read_credentials()
oauth = OAuth(credentials['ACCESS_TOKEN'], credentials['ACCESS_SECRET'], credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
twitter_stream = TwitterStream(auth=oauth)
producer = KafkaProducer(bootstrap_servers='localhost:9092')
producer1()
|
Add new producer to test
|
Add new producer to test
|
Python
|
mit
|
andresher/bigdata-project2
|
Add new producer to test
|
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from kafka import SimpleProducer, KafkaClient
from kafka import KafkaProducer
from operator import add
import sys
import json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars $SPARK_HOME/jars/spark-streaming-kafka-0-8-assembly_2.11.jar pyspark-shell'
def read_credentials():
file_name = "credentials.json"
try:
with open(file_name) as data_file:
return json.load(data_file)
except:
print ("Cannot load "+data_file)
return None
def producer1():
sc = SparkContext(appName="ProducerTrump")
ssc = StreamingContext(sc, 180)
kvs = KafkaUtils.createDirectStream(ssc, ["trump"], {"metadata.broker.list": "localhost:9092"})
kvs.foreachRDD(send)
producer.flush()
ssc.start()
ssc.awaitTermination()
def send(message):
iterator = twitter_stream.statuses.sample()
count=0
for tweet in iterator:
producer.send('trump', bytes(json.dumps(tweet, indent=6), "ascii"))
count+=1
print("Tweet sent")
if(count==20000):
break
if __name__ == "__main__":
print("Starting to read tweets")
credentials = read_credentials()
oauth = OAuth(credentials['ACCESS_TOKEN'], credentials['ACCESS_SECRET'], credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
twitter_stream = TwitterStream(auth=oauth)
producer = KafkaProducer(bootstrap_servers='localhost:9092')
producer1()
|
<commit_before><commit_msg>Add new producer to test<commit_after>
|
from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from kafka import SimpleProducer, KafkaClient
from kafka import KafkaProducer
from operator import add
import sys
import json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars $SPARK_HOME/jars/spark-streaming-kafka-0-8-assembly_2.11.jar pyspark-shell'
def read_credentials():
file_name = "credentials.json"
try:
with open(file_name) as data_file:
return json.load(data_file)
except:
print ("Cannot load "+data_file)
return None
def producer1():
sc = SparkContext(appName="ProducerTrump")
ssc = StreamingContext(sc, 180)
kvs = KafkaUtils.createDirectStream(ssc, ["trump"], {"metadata.broker.list": "localhost:9092"})
kvs.foreachRDD(send)
producer.flush()
ssc.start()
ssc.awaitTermination()
def send(message):
iterator = twitter_stream.statuses.sample()
count=0
for tweet in iterator:
producer.send('trump', bytes(json.dumps(tweet, indent=6), "ascii"))
count+=1
print("Tweet sent")
if(count==20000):
break
if __name__ == "__main__":
print("Starting to read tweets")
credentials = read_credentials()
oauth = OAuth(credentials['ACCESS_TOKEN'], credentials['ACCESS_SECRET'], credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
twitter_stream = TwitterStream(auth=oauth)
producer = KafkaProducer(bootstrap_servers='localhost:9092')
producer1()
|
Add new producer to testfrom pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from kafka import SimpleProducer, KafkaClient
from kafka import KafkaProducer
from operator import add
import sys
import json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars $SPARK_HOME/jars/spark-streaming-kafka-0-8-assembly_2.11.jar pyspark-shell'
def read_credentials():
file_name = "credentials.json"
try:
with open(file_name) as data_file:
return json.load(data_file)
except:
print ("Cannot load "+data_file)
return None
def producer1():
sc = SparkContext(appName="ProducerTrump")
ssc = StreamingContext(sc, 180)
kvs = KafkaUtils.createDirectStream(ssc, ["trump"], {"metadata.broker.list": "localhost:9092"})
kvs.foreachRDD(send)
producer.flush()
ssc.start()
ssc.awaitTermination()
def send(message):
iterator = twitter_stream.statuses.sample()
count=0
for tweet in iterator:
producer.send('trump', bytes(json.dumps(tweet, indent=6), "ascii"))
count+=1
print("Tweet sent")
if(count==20000):
break
if __name__ == "__main__":
print("Starting to read tweets")
credentials = read_credentials()
oauth = OAuth(credentials['ACCESS_TOKEN'], credentials['ACCESS_SECRET'], credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
twitter_stream = TwitterStream(auth=oauth)
producer = KafkaProducer(bootstrap_servers='localhost:9092')
producer1()
|
<commit_before><commit_msg>Add new producer to test<commit_after>from pyspark import SparkConf, SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
from kafka import SimpleProducer, KafkaClient
from kafka import KafkaProducer
from operator import add
import sys
import json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import os
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars $SPARK_HOME/jars/spark-streaming-kafka-0-8-assembly_2.11.jar pyspark-shell'
def read_credentials():
file_name = "credentials.json"
try:
with open(file_name) as data_file:
return json.load(data_file)
except:
print ("Cannot load "+data_file)
return None
def producer1():
sc = SparkContext(appName="ProducerTrump")
ssc = StreamingContext(sc, 180)
kvs = KafkaUtils.createDirectStream(ssc, ["trump"], {"metadata.broker.list": "localhost:9092"})
kvs.foreachRDD(send)
producer.flush()
ssc.start()
ssc.awaitTermination()
def send(message):
iterator = twitter_stream.statuses.sample()
count=0
for tweet in iterator:
producer.send('trump', bytes(json.dumps(tweet, indent=6), "ascii"))
count+=1
print("Tweet sent")
if(count==20000):
break
if __name__ == "__main__":
print("Starting to read tweets")
credentials = read_credentials()
oauth = OAuth(credentials['ACCESS_TOKEN'], credentials['ACCESS_SECRET'], credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
twitter_stream = TwitterStream(auth=oauth)
producer = KafkaProducer(bootstrap_servers='localhost:9092')
producer1()
|
|
dbeca32771b5a3afc419aa3e9e6cf949e669b928
|
praatio/tgio.py
|
praatio/tgio.py
|
class WrongPraatioVersion(Exception):
pass
raise WrongPraatioVersion(
"\n\nWARNING: You've tried to import 'tgio' which was renamed 'textgrid' in praatio 5.x.\n"
"Many other, breaking changes were made.\n"
"You will need to modify your code to use praatio 5.0.\n"
"If you would like to use your code without changes, please run the following "
"two instructions from the command line to install praatio 4.x:\n\n"
"pip uninstall praatio\n"
"pip install 'praatio<5'"
)
|
Add upgrade warning for users expecting praatio 4.x
|
Add upgrade warning for users expecting praatio 4.x
|
Python
|
mit
|
timmahrt/praatIO
|
Add upgrade warning for users expecting praatio 4.x
|
class WrongPraatioVersion(Exception):
pass
raise WrongPraatioVersion(
"\n\nWARNING: You've tried to import 'tgio' which was renamed 'textgrid' in praatio 5.x.\n"
"Many other, breaking changes were made.\n"
"You will need to modify your code to use praatio 5.0.\n"
"If you would like to use your code without changes, please run the following "
"two instructions from the command line to install praatio 4.x:\n\n"
"pip uninstall praatio\n"
"pip install 'praatio<5'"
)
|
<commit_before><commit_msg>Add upgrade warning for users expecting praatio 4.x<commit_after>
|
class WrongPraatioVersion(Exception):
pass
raise WrongPraatioVersion(
"\n\nWARNING: You've tried to import 'tgio' which was renamed 'textgrid' in praatio 5.x.\n"
"Many other, breaking changes were made.\n"
"You will need to modify your code to use praatio 5.0.\n"
"If you would like to use your code without changes, please run the following "
"two instructions from the command line to install praatio 4.x:\n\n"
"pip uninstall praatio\n"
"pip install 'praatio<5'"
)
|
Add upgrade warning for users expecting praatio 4.xclass WrongPraatioVersion(Exception):
pass
raise WrongPraatioVersion(
"\n\nWARNING: You've tried to import 'tgio' which was renamed 'textgrid' in praatio 5.x.\n"
"Many other, breaking changes were made.\n"
"You will need to modify your code to use praatio 5.0.\n"
"If you would like to use your code without changes, please run the following "
"two instructions from the command line to install praatio 4.x:\n\n"
"pip uninstall praatio\n"
"pip install 'praatio<5'"
)
|
<commit_before><commit_msg>Add upgrade warning for users expecting praatio 4.x<commit_after>class WrongPraatioVersion(Exception):
pass
raise WrongPraatioVersion(
"\n\nWARNING: You've tried to import 'tgio' which was renamed 'textgrid' in praatio 5.x.\n"
"Many other, breaking changes were made.\n"
"You will need to modify your code to use praatio 5.0.\n"
"If you would like to use your code without changes, please run the following "
"two instructions from the command line to install praatio 4.x:\n\n"
"pip uninstall praatio\n"
"pip install 'praatio<5'"
)
|
|
d88006087c7428295dafad5e27aab69c000cb278
|
pyedgar/form10.py
|
pyedgar/form10.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities for interacting with edgar forms.
"""
# import os
# import re
import logging
from . import localstore
from . import forms
# from . import plaintext
# from .htmlparse import RE_HTML_TAGS, convert_html_to_text, html_ent_re_sub
# from .. import exceptions as EX
__logger = logging.getLogger(__name__)
def get_headers(cik, accession, file_date):
orig_fname = localstore.get_filing_path(cik, accession)
with open(orig_fname) as fh:
orig_txt = fh.read(10000)
keep_hdrs = 'filing-date fiscal-year-end form-type period fyend fyear quarter'.split()
hdrs = {k:forms.get_header(orig_txt, k) for k in keep_hdrs}
for k in keep_hdrs:
if hdrs[k] == '':
pass
# print("We failed to find {}".format(k))
tmp = hdrs.get('filing-date', None)
try:
hdrs['fdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
hdrs['fdate'] = file_date
tmp = hdrs.get('period', None)
try:
hdrs['pdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
pass
tmp = hdrs.get('fiscal-year-end', None)
try:
hdrs['fyend'] = dt.datetime(hdrs.get('pdate', file_date-dt.timedelta(90)).year,
int(tmp[:2]), int(tmp[-2:]))
if hdrs['fyend'] > hdrs.get('pdate', file_date-dt.timedelta(90)):
hdrs['fyend'] = dt.datetime(hdrs['fyend'].year - 1,
hdrs['fyend'].month,
hdrs['fyend'].day)
except ValueError:
pass
try:
hdrs['quarter'] = round((hdrs.get('pdate', file_date-dt.timedelta(90)) -
hdrs['fyend']).days/90)
hdrs['fyear'] = hdrs['fyend'].year
except TypeError:
pass
hdrs['cik'] = cik
hdrs['accession'] = forms.get_header(orig_txt, 'accession-number')
return hdrs
|
Add custom date and period to form 10
|
Add custom date and period to form 10
|
Python
|
mit
|
gaulinmp/pyedgar
|
Add custom date and period to form 10
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities for interacting with edgar forms.
"""
# import os
# import re
import logging
from . import localstore
from . import forms
# from . import plaintext
# from .htmlparse import RE_HTML_TAGS, convert_html_to_text, html_ent_re_sub
# from .. import exceptions as EX
__logger = logging.getLogger(__name__)
def get_headers(cik, accession, file_date):
orig_fname = localstore.get_filing_path(cik, accession)
with open(orig_fname) as fh:
orig_txt = fh.read(10000)
keep_hdrs = 'filing-date fiscal-year-end form-type period fyend fyear quarter'.split()
hdrs = {k:forms.get_header(orig_txt, k) for k in keep_hdrs}
for k in keep_hdrs:
if hdrs[k] == '':
pass
# print("We failed to find {}".format(k))
tmp = hdrs.get('filing-date', None)
try:
hdrs['fdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
hdrs['fdate'] = file_date
tmp = hdrs.get('period', None)
try:
hdrs['pdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
pass
tmp = hdrs.get('fiscal-year-end', None)
try:
hdrs['fyend'] = dt.datetime(hdrs.get('pdate', file_date-dt.timedelta(90)).year,
int(tmp[:2]), int(tmp[-2:]))
if hdrs['fyend'] > hdrs.get('pdate', file_date-dt.timedelta(90)):
hdrs['fyend'] = dt.datetime(hdrs['fyend'].year - 1,
hdrs['fyend'].month,
hdrs['fyend'].day)
except ValueError:
pass
try:
hdrs['quarter'] = round((hdrs.get('pdate', file_date-dt.timedelta(90)) -
hdrs['fyend']).days/90)
hdrs['fyear'] = hdrs['fyend'].year
except TypeError:
pass
hdrs['cik'] = cik
hdrs['accession'] = forms.get_header(orig_txt, 'accession-number')
return hdrs
|
<commit_before><commit_msg>Add custom date and period to form 10<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities for interacting with edgar forms.
"""
# import os
# import re
import logging
from . import localstore
from . import forms
# from . import plaintext
# from .htmlparse import RE_HTML_TAGS, convert_html_to_text, html_ent_re_sub
# from .. import exceptions as EX
__logger = logging.getLogger(__name__)
def get_headers(cik, accession, file_date):
orig_fname = localstore.get_filing_path(cik, accession)
with open(orig_fname) as fh:
orig_txt = fh.read(10000)
keep_hdrs = 'filing-date fiscal-year-end form-type period fyend fyear quarter'.split()
hdrs = {k:forms.get_header(orig_txt, k) for k in keep_hdrs}
for k in keep_hdrs:
if hdrs[k] == '':
pass
# print("We failed to find {}".format(k))
tmp = hdrs.get('filing-date', None)
try:
hdrs['fdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
hdrs['fdate'] = file_date
tmp = hdrs.get('period', None)
try:
hdrs['pdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
pass
tmp = hdrs.get('fiscal-year-end', None)
try:
hdrs['fyend'] = dt.datetime(hdrs.get('pdate', file_date-dt.timedelta(90)).year,
int(tmp[:2]), int(tmp[-2:]))
if hdrs['fyend'] > hdrs.get('pdate', file_date-dt.timedelta(90)):
hdrs['fyend'] = dt.datetime(hdrs['fyend'].year - 1,
hdrs['fyend'].month,
hdrs['fyend'].day)
except ValueError:
pass
try:
hdrs['quarter'] = round((hdrs.get('pdate', file_date-dt.timedelta(90)) -
hdrs['fyend']).days/90)
hdrs['fyear'] = hdrs['fyend'].year
except TypeError:
pass
hdrs['cik'] = cik
hdrs['accession'] = forms.get_header(orig_txt, 'accession-number')
return hdrs
|
Add custom date and period to form 10#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities for interacting with edgar forms.
"""
# import os
# import re
import logging
from . import localstore
from . import forms
# from . import plaintext
# from .htmlparse import RE_HTML_TAGS, convert_html_to_text, html_ent_re_sub
# from .. import exceptions as EX
__logger = logging.getLogger(__name__)
def get_headers(cik, accession, file_date):
orig_fname = localstore.get_filing_path(cik, accession)
with open(orig_fname) as fh:
orig_txt = fh.read(10000)
keep_hdrs = 'filing-date fiscal-year-end form-type period fyend fyear quarter'.split()
hdrs = {k:forms.get_header(orig_txt, k) for k in keep_hdrs}
for k in keep_hdrs:
if hdrs[k] == '':
pass
# print("We failed to find {}".format(k))
tmp = hdrs.get('filing-date', None)
try:
hdrs['fdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
hdrs['fdate'] = file_date
tmp = hdrs.get('period', None)
try:
hdrs['pdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
pass
tmp = hdrs.get('fiscal-year-end', None)
try:
hdrs['fyend'] = dt.datetime(hdrs.get('pdate', file_date-dt.timedelta(90)).year,
int(tmp[:2]), int(tmp[-2:]))
if hdrs['fyend'] > hdrs.get('pdate', file_date-dt.timedelta(90)):
hdrs['fyend'] = dt.datetime(hdrs['fyend'].year - 1,
hdrs['fyend'].month,
hdrs['fyend'].day)
except ValueError:
pass
try:
hdrs['quarter'] = round((hdrs.get('pdate', file_date-dt.timedelta(90)) -
hdrs['fyend']).days/90)
hdrs['fyear'] = hdrs['fyend'].year
except TypeError:
pass
hdrs['cik'] = cik
hdrs['accession'] = forms.get_header(orig_txt, 'accession-number')
return hdrs
|
<commit_before><commit_msg>Add custom date and period to form 10<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utilities for interacting with edgar forms.
"""
# import os
# import re
import logging
from . import localstore
from . import forms
# from . import plaintext
# from .htmlparse import RE_HTML_TAGS, convert_html_to_text, html_ent_re_sub
# from .. import exceptions as EX
__logger = logging.getLogger(__name__)
def get_headers(cik, accession, file_date):
orig_fname = localstore.get_filing_path(cik, accession)
with open(orig_fname) as fh:
orig_txt = fh.read(10000)
keep_hdrs = 'filing-date fiscal-year-end form-type period fyend fyear quarter'.split()
hdrs = {k:forms.get_header(orig_txt, k) for k in keep_hdrs}
for k in keep_hdrs:
if hdrs[k] == '':
pass
# print("We failed to find {}".format(k))
tmp = hdrs.get('filing-date', None)
try:
hdrs['fdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
hdrs['fdate'] = file_date
tmp = hdrs.get('period', None)
try:
hdrs['pdate'] = dt.datetime(int(tmp[:4]), int(tmp[4:6]), int(tmp[-2:]))
except ValueError:
pass
tmp = hdrs.get('fiscal-year-end', None)
try:
hdrs['fyend'] = dt.datetime(hdrs.get('pdate', file_date-dt.timedelta(90)).year,
int(tmp[:2]), int(tmp[-2:]))
if hdrs['fyend'] > hdrs.get('pdate', file_date-dt.timedelta(90)):
hdrs['fyend'] = dt.datetime(hdrs['fyend'].year - 1,
hdrs['fyend'].month,
hdrs['fyend'].day)
except ValueError:
pass
try:
hdrs['quarter'] = round((hdrs.get('pdate', file_date-dt.timedelta(90)) -
hdrs['fyend']).days/90)
hdrs['fyear'] = hdrs['fyend'].year
except TypeError:
pass
hdrs['cik'] = cik
hdrs['accession'] = forms.get_header(orig_txt, 'accession-number')
return hdrs
|
|
9dc21af406efca98bd68bdb428c7f563790ed1a2
|
scripts/crop_raster.py
|
scripts/crop_raster.py
|
import subprocess
# TODO: Crop from boundaries of raster tiles in a folder
def gdal_crop(input_file, output_file, corners):
proc_name = 'gdal_translate'
# Using 'lanczos' instead of default 'nearest' because of bug (feature?)
# in gdal 2.1 regarding subpixel unalignement http://www.gdal.org/gdal_translate.html
options = ['-r', 'lanczos',
'-projwin', str(corners[0][0]), str(corners[0][1]), str(corners[1][0]), str(corners[1][1]),
'-of', 'GTiff']
result = subprocess.run([proc_name]+options+[input_file, output_file])
return result
if __name__ == "__main__":
gdcrop = gdal_crop('/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25.tif',
'/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25_0525_6250_yeeees.tif',
((524987.5, 6250012.5), (549987.5, 6225012.5)))
if gdcrop.returncode != 0:
raise RuntimeError("Error during execution! gdal_translate returned {}.".format(
gdcrop.returncode))
|
Add script to crop a raster to some boundaries
|
Add script to crop a raster to some boundaries
|
Python
|
bsd-2-clause
|
fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop
|
Add script to crop a raster to some boundaries
|
import subprocess
# TODO: Crop from boundaries of raster tiles in a folder
def gdal_crop(input_file, output_file, corners):
proc_name = 'gdal_translate'
# Using 'lanczos' instead of default 'nearest' because of bug (feature?)
# in gdal 2.1 regarding subpixel unalignement http://www.gdal.org/gdal_translate.html
options = ['-r', 'lanczos',
'-projwin', str(corners[0][0]), str(corners[0][1]), str(corners[1][0]), str(corners[1][1]),
'-of', 'GTiff']
result = subprocess.run([proc_name]+options+[input_file, output_file])
return result
if __name__ == "__main__":
gdcrop = gdal_crop('/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25.tif',
'/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25_0525_6250_yeeees.tif',
((524987.5, 6250012.5), (549987.5, 6225012.5)))
if gdcrop.returncode != 0:
raise RuntimeError("Error during execution! gdal_translate returned {}.".format(
gdcrop.returncode))
|
<commit_before><commit_msg>Add script to crop a raster to some boundaries<commit_after>
|
import subprocess
# TODO: Crop from boundaries of raster tiles in a folder
def gdal_crop(input_file, output_file, corners):
proc_name = 'gdal_translate'
# Using 'lanczos' instead of default 'nearest' because of bug (feature?)
# in gdal 2.1 regarding subpixel unalignement http://www.gdal.org/gdal_translate.html
options = ['-r', 'lanczos',
'-projwin', str(corners[0][0]), str(corners[0][1]), str(corners[1][0]), str(corners[1][1]),
'-of', 'GTiff']
result = subprocess.run([proc_name]+options+[input_file, output_file])
return result
if __name__ == "__main__":
gdcrop = gdal_crop('/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25.tif',
'/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25_0525_6250_yeeees.tif',
((524987.5, 6250012.5), (549987.5, 6225012.5)))
if gdcrop.returncode != 0:
raise RuntimeError("Error during execution! gdal_translate returned {}.".format(
gdcrop.returncode))
|
Add script to crop a raster to some boundariesimport subprocess
# TODO: Crop from boundaries of raster tiles in a folder
def gdal_crop(input_file, output_file, corners):
proc_name = 'gdal_translate'
# Using 'lanczos' instead of default 'nearest' because of bug (feature?)
# in gdal 2.1 regarding subpixel unalignement http://www.gdal.org/gdal_translate.html
options = ['-r', 'lanczos',
'-projwin', str(corners[0][0]), str(corners[0][1]), str(corners[1][0]), str(corners[1][1]),
'-of', 'GTiff']
result = subprocess.run([proc_name]+options+[input_file, output_file])
return result
if __name__ == "__main__":
gdcrop = gdal_crop('/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25.tif',
'/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25_0525_6250_yeeees.tif',
((524987.5, 6250012.5), (549987.5, 6225012.5)))
if gdcrop.returncode != 0:
raise RuntimeError("Error during execution! gdal_translate returned {}.".format(
gdcrop.returncode))
|
<commit_before><commit_msg>Add script to crop a raster to some boundaries<commit_after>import subprocess
# TODO: Crop from boundaries of raster tiles in a folder
def gdal_crop(input_file, output_file, corners):
proc_name = 'gdal_translate'
# Using 'lanczos' instead of default 'nearest' because of bug (feature?)
# in gdal 2.1 regarding subpixel unalignement http://www.gdal.org/gdal_translate.html
options = ['-r', 'lanczos',
'-projwin', str(corners[0][0]), str(corners[0][1]), str(corners[1][0]), str(corners[1][1]),
'-of', 'GTiff']
result = subprocess.run([proc_name]+options+[input_file, output_file])
return result
if __name__ == "__main__":
gdcrop = gdal_crop('/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25.tif',
'/home/rbailonr/g100_clc06_V18_5_Lambert_compressed_25_0525_6250_yeeees.tif',
((524987.5, 6250012.5), (549987.5, 6225012.5)))
if gdcrop.returncode != 0:
raise RuntimeError("Error during execution! gdal_translate returned {}.".format(
gdcrop.returncode))
|
|
7be01251c9a98b7756128dd627b5d7be67645387
|
mapnik/scales.py
|
mapnik/scales.py
|
"""Scale helpers functions."""
import math
def any_scale(scale):
"""Scale helper function that allows any scale."""
return scale
def sequence_scale(scale, scale_sequence):
"""Sequence scale helper, this rounds scale to a 'sensible' value."""
factor = math.floor(math.log10(scale))
norm = scale / (10**factor)
for s in scale_sequence:
if norm <= s:
return s * 10**factor
return scale_sequence[0] * 10**(factor + 1)
def default_scale(scale):
"""Default scale helper, this rounds scale to a 'sensible' value."""
return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))
def deg_min_sec_scale(scale):
for x in (1.0 / 3600,
2.0 / 3600,
5.0 / 3600,
10.0 / 3600,
30.0 / 3600,
1.0 / 60,
2.0 / 60,
5.0 / 60,
10.0 / 60,
30.0 / 60,
1,
2,
5,
10,
30,
60
):
if scale < x:
return x
else:
return x
|
Move scale helpers into a separate module
|
Move scale helpers into a separate module
No change in behaviour
|
Python
|
lgpl-2.1
|
mapnik/python-mapnik,tomhughes/python-mapnik,tomhughes/python-mapnik,mapnik/python-mapnik,mapnik/python-mapnik,tomhughes/python-mapnik
|
Move scale helpers into a separate module
No change in behaviour
|
"""Scale helpers functions."""
import math
def any_scale(scale):
"""Scale helper function that allows any scale."""
return scale
def sequence_scale(scale, scale_sequence):
"""Sequence scale helper, this rounds scale to a 'sensible' value."""
factor = math.floor(math.log10(scale))
norm = scale / (10**factor)
for s in scale_sequence:
if norm <= s:
return s * 10**factor
return scale_sequence[0] * 10**(factor + 1)
def default_scale(scale):
"""Default scale helper, this rounds scale to a 'sensible' value."""
return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))
def deg_min_sec_scale(scale):
for x in (1.0 / 3600,
2.0 / 3600,
5.0 / 3600,
10.0 / 3600,
30.0 / 3600,
1.0 / 60,
2.0 / 60,
5.0 / 60,
10.0 / 60,
30.0 / 60,
1,
2,
5,
10,
30,
60
):
if scale < x:
return x
else:
return x
|
<commit_before><commit_msg>Move scale helpers into a separate module
No change in behaviour<commit_after>
|
"""Scale helpers functions."""
import math
def any_scale(scale):
"""Scale helper function that allows any scale."""
return scale
def sequence_scale(scale, scale_sequence):
"""Sequence scale helper, this rounds scale to a 'sensible' value."""
factor = math.floor(math.log10(scale))
norm = scale / (10**factor)
for s in scale_sequence:
if norm <= s:
return s * 10**factor
return scale_sequence[0] * 10**(factor + 1)
def default_scale(scale):
"""Default scale helper, this rounds scale to a 'sensible' value."""
return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))
def deg_min_sec_scale(scale):
for x in (1.0 / 3600,
2.0 / 3600,
5.0 / 3600,
10.0 / 3600,
30.0 / 3600,
1.0 / 60,
2.0 / 60,
5.0 / 60,
10.0 / 60,
30.0 / 60,
1,
2,
5,
10,
30,
60
):
if scale < x:
return x
else:
return x
|
Move scale helpers into a separate module
No change in behaviour"""Scale helpers functions."""
import math
def any_scale(scale):
"""Scale helper function that allows any scale."""
return scale
def sequence_scale(scale, scale_sequence):
"""Sequence scale helper, this rounds scale to a 'sensible' value."""
factor = math.floor(math.log10(scale))
norm = scale / (10**factor)
for s in scale_sequence:
if norm <= s:
return s * 10**factor
return scale_sequence[0] * 10**(factor + 1)
def default_scale(scale):
"""Default scale helper, this rounds scale to a 'sensible' value."""
return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))
def deg_min_sec_scale(scale):
for x in (1.0 / 3600,
2.0 / 3600,
5.0 / 3600,
10.0 / 3600,
30.0 / 3600,
1.0 / 60,
2.0 / 60,
5.0 / 60,
10.0 / 60,
30.0 / 60,
1,
2,
5,
10,
30,
60
):
if scale < x:
return x
else:
return x
|
<commit_before><commit_msg>Move scale helpers into a separate module
No change in behaviour<commit_after>"""Scale helpers functions."""
import math
def any_scale(scale):
"""Scale helper function that allows any scale."""
return scale
def sequence_scale(scale, scale_sequence):
"""Sequence scale helper, this rounds scale to a 'sensible' value."""
factor = math.floor(math.log10(scale))
norm = scale / (10**factor)
for s in scale_sequence:
if norm <= s:
return s * 10**factor
return scale_sequence[0] * 10**(factor + 1)
def default_scale(scale):
"""Default scale helper, this rounds scale to a 'sensible' value."""
return sequence_scale(scale, (1, 1.25, 1.5, 1.75, 2, 2.5, 3, 4, 5, 6, 7.5, 8, 9, 10))
def deg_min_sec_scale(scale):
for x in (1.0 / 3600,
2.0 / 3600,
5.0 / 3600,
10.0 / 3600,
30.0 / 3600,
1.0 / 60,
2.0 / 60,
5.0 / 60,
10.0 / 60,
30.0 / 60,
1,
2,
5,
10,
30,
60
):
if scale < x:
return x
else:
return x
|
|
a8b2930754fd4c62f2393e60826c2e089c6761a4
|
shopify_auth/models.py
|
shopify_auth/models.py
|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class ShopUserManager(BaseUserManager):
def create_user(self, myshopify_domain, domain, password = None):
"""
Creates and saves a ShopUser with the given domains and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(
myshopify_domain = myshopify_domain,
domain = domain,
)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using = self._db)
return user
def create_superuser(self, myshopify_domain, domain, password):
"""
Creates and saves a ShopUser with the given domains and password.
"""
return self.create_user(myshopify_domain, domain, password)
class ShopUser(AbstractBaseUser):
myshopify_domain = models.CharField(max_length = 255, unique = True)
token = models.CharField(max_length = 32)
objects = ShopUserManager()
USERNAME_FIELD = 'myshopify_domain'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.myshopify_domain
def get_short_name(self):
return self.myshopify_domain
def __unicode__(self):
return self.get_full_name()
class Meta:
abstract = True
class UserOwnedMixin(object):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
|
Add an abstract ShopUser base class that other apps can extend for their auth user.
|
Add an abstract ShopUser base class that other apps can extend for their auth user.
|
Python
|
mit
|
discolabs/django-shopify-auth,RafaAguilar/django-shopify-auth,funkybob/django-shopify-auth,funkybob/django-shopify-auth,discolabs/django-shopify-auth,RafaAguilar/django-shopify-auth
|
Add an abstract ShopUser base class that other apps can extend for their auth user.
|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class ShopUserManager(BaseUserManager):
def create_user(self, myshopify_domain, domain, password = None):
"""
Creates and saves a ShopUser with the given domains and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(
myshopify_domain = myshopify_domain,
domain = domain,
)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using = self._db)
return user
def create_superuser(self, myshopify_domain, domain, password):
"""
Creates and saves a ShopUser with the given domains and password.
"""
return self.create_user(myshopify_domain, domain, password)
class ShopUser(AbstractBaseUser):
myshopify_domain = models.CharField(max_length = 255, unique = True)
token = models.CharField(max_length = 32)
objects = ShopUserManager()
USERNAME_FIELD = 'myshopify_domain'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.myshopify_domain
def get_short_name(self):
return self.myshopify_domain
def __unicode__(self):
return self.get_full_name()
class Meta:
abstract = True
class UserOwnedMixin(object):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
|
<commit_before><commit_msg>Add an abstract ShopUser base class that other apps can extend for their auth user.<commit_after>
|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class ShopUserManager(BaseUserManager):
def create_user(self, myshopify_domain, domain, password = None):
"""
Creates and saves a ShopUser with the given domains and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(
myshopify_domain = myshopify_domain,
domain = domain,
)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using = self._db)
return user
def create_superuser(self, myshopify_domain, domain, password):
"""
Creates and saves a ShopUser with the given domains and password.
"""
return self.create_user(myshopify_domain, domain, password)
class ShopUser(AbstractBaseUser):
myshopify_domain = models.CharField(max_length = 255, unique = True)
token = models.CharField(max_length = 32)
objects = ShopUserManager()
USERNAME_FIELD = 'myshopify_domain'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.myshopify_domain
def get_short_name(self):
return self.myshopify_domain
def __unicode__(self):
return self.get_full_name()
class Meta:
abstract = True
class UserOwnedMixin(object):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
|
Add an abstract ShopUser base class that other apps can extend for their auth user.from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class ShopUserManager(BaseUserManager):
def create_user(self, myshopify_domain, domain, password = None):
"""
Creates and saves a ShopUser with the given domains and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(
myshopify_domain = myshopify_domain,
domain = domain,
)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using = self._db)
return user
def create_superuser(self, myshopify_domain, domain, password):
"""
Creates and saves a ShopUser with the given domains and password.
"""
return self.create_user(myshopify_domain, domain, password)
class ShopUser(AbstractBaseUser):
myshopify_domain = models.CharField(max_length = 255, unique = True)
token = models.CharField(max_length = 32)
objects = ShopUserManager()
USERNAME_FIELD = 'myshopify_domain'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.myshopify_domain
def get_short_name(self):
return self.myshopify_domain
def __unicode__(self):
return self.get_full_name()
class Meta:
abstract = True
class UserOwnedMixin(object):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
|
<commit_before><commit_msg>Add an abstract ShopUser base class that other apps can extend for their auth user.<commit_after>from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
class ShopUserManager(BaseUserManager):
def create_user(self, myshopify_domain, domain, password = None):
"""
Creates and saves a ShopUser with the given domains and password.
"""
if not myshopify_domain:
raise ValueError('ShopUsers must have a myshopify domain')
user = self.model(
myshopify_domain = myshopify_domain,
domain = domain,
)
# Never want to be able to log on externally.
# Authentication will be taken care of by Shopify OAuth.
user.set_unusable_password()
user.save(using = self._db)
return user
def create_superuser(self, myshopify_domain, domain, password):
"""
Creates and saves a ShopUser with the given domains and password.
"""
return self.create_user(myshopify_domain, domain, password)
class ShopUser(AbstractBaseUser):
myshopify_domain = models.CharField(max_length = 255, unique = True)
token = models.CharField(max_length = 32)
objects = ShopUserManager()
USERNAME_FIELD = 'myshopify_domain'
REQUIRED_FIELDS = []
def get_full_name(self):
return self.myshopify_domain
def get_short_name(self):
return self.myshopify_domain
def __unicode__(self):
return self.get_full_name()
class Meta:
abstract = True
class UserOwnedMixin(object):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
|
|
43d1ad2924ba0cf605628ddbfd17eee0e55c08b6
|
security/migrations/0002_convert_pass_expiry_user_to_one2one.py
|
security/migrations/0002_convert_pass_expiry_user_to_one2one.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passwordexpiry',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
Add password expiry user migration
|
Add password expiry user migration
|
Python
|
bsd-3-clause
|
MartinPetkov/django-security,MartinPetkov/django-security
|
Add password expiry user migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passwordexpiry',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add password expiry user migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passwordexpiry',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
Add password expiry user migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passwordexpiry',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add password expiry user migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='passwordexpiry',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
|
da59e27fe4176e23ec305f9a7143b288521db6ef
|
src/ggrc/converters/handlers/request.py
|
src/ggrc/converters/handlers/request.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in {"Final", "Verified"}:
value = "In Progress"
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
from ggrc import models
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in models.Request.END_STATES:
value = models.Request.PROGRESS_STATE
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
Update Request object import to use statusable attributes
|
Update Request object import to use statusable attributes
|
Python
|
apache-2.0
|
kr41/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in {"Final", "Verified"}:
value = "In Progress"
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
Update Request object import to use statusable attributes
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
from ggrc import models
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in models.Request.END_STATES:
value = models.Request.PROGRESS_STATE
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
<commit_before># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in {"Final", "Verified"}:
value = "In Progress"
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
<commit_msg>Update Request object import to use statusable attributes<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
from ggrc import models
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in models.Request.END_STATES:
value = models.Request.PROGRESS_STATE
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in {"Final", "Verified"}:
value = "In Progress"
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
Update Request object import to use statusable attributes# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
from ggrc import models
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in models.Request.END_STATES:
value = models.Request.PROGRESS_STATE
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
<commit_before># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in {"Final", "Verified"}:
value = "In Progress"
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
<commit_msg>Update Request object import to use statusable attributes<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Handlers for request specific columns."""
from ggrc.converters.handlers import handlers
from ggrc.converters import errors
from ggrc import models
class RequestStatusColumnHandler(handlers.StatusColumnHandler):
"""Handler for request status."""
def parse_item(self):
"""Parse raw_value into a valid request status if possible."""
value = handlers.StatusColumnHandler.parse_item(self)
if value in models.Request.END_STATES:
value = models.Request.PROGRESS_STATE
self.add_warning(errors.REQUEST_INVALID_STATE)
return value
|
2b2a68191715e37314378b87ccfb30a2cb2a8e13
|
numba/tests/test_profiler.py
|
numba/tests/test_profiler.py
|
import numpy as np
import cProfile as profiler
import pstats
from numba import jit
from numba import unittest_support as unittest
def dot(a, b):
sum=0
for i in range(len(a)):
sum += a[i]*b[i]
return sum
class TestProfiler(unittest.TestCase):
def test_profiler(self):
"""Make sure the jit-compiled function shows up in the profile stats."""
a = np.arange(16, dtype=np.float32)
b = np.arange(16, dtype=np.float32)
p = profiler.Profile()
try:
p.enable()
dot(a, b)
p.disable()
stats = pstats.Stats(p).strip_dirs()
self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)
finally:
# make sure the profiler is deactivated when this test is done so as not to
# pollute any other tests
p.disable()
del p
if __name__ == '__main__':
unittest.main()
|
Add test to verify that jit-compiled functions show up in the profile stats.
|
Add test to verify that jit-compiled functions show up in the profile stats.
|
Python
|
bsd-2-clause
|
stefanseefeld/numba,jriehl/numba,seibert/numba,cpcloud/numba,numba/numba,gmarkall/numba,IntelLabs/numba,stefanseefeld/numba,sklam/numba,jriehl/numba,stuartarchibald/numba,seibert/numba,stefanseefeld/numba,IntelLabs/numba,numba/numba,IntelLabs/numba,IntelLabs/numba,stuartarchibald/numba,stuartarchibald/numba,stuartarchibald/numba,seibert/numba,seibert/numba,cpcloud/numba,sklam/numba,cpcloud/numba,numba/numba,stuartarchibald/numba,stonebig/numba,gmarkall/numba,sklam/numba,gmarkall/numba,stonebig/numba,seibert/numba,jriehl/numba,cpcloud/numba,stonebig/numba,jriehl/numba,numba/numba,stonebig/numba,jriehl/numba,IntelLabs/numba,gmarkall/numba,stonebig/numba,stefanseefeld/numba,sklam/numba,stefanseefeld/numba,gmarkall/numba,numba/numba,cpcloud/numba,sklam/numba
|
Add test to verify that jit-compiled functions show up in the profile stats.
|
import numpy as np
import cProfile as profiler
import pstats
from numba import jit
from numba import unittest_support as unittest
def dot(a, b):
sum=0
for i in range(len(a)):
sum += a[i]*b[i]
return sum
class TestProfiler(unittest.TestCase):
def test_profiler(self):
"""Make sure the jit-compiled function shows up in the profile stats."""
a = np.arange(16, dtype=np.float32)
b = np.arange(16, dtype=np.float32)
p = profiler.Profile()
try:
p.enable()
dot(a, b)
p.disable()
stats = pstats.Stats(p).strip_dirs()
self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)
finally:
# make sure the profiler is deactivated when this test is done so as not to
# pollute any other tests
p.disable()
del p
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test to verify that jit-compiled functions show up in the profile stats.<commit_after>
|
import numpy as np
import cProfile as profiler
import pstats
from numba import jit
from numba import unittest_support as unittest
def dot(a, b):
sum=0
for i in range(len(a)):
sum += a[i]*b[i]
return sum
class TestProfiler(unittest.TestCase):
def test_profiler(self):
"""Make sure the jit-compiled function shows up in the profile stats."""
a = np.arange(16, dtype=np.float32)
b = np.arange(16, dtype=np.float32)
p = profiler.Profile()
try:
p.enable()
dot(a, b)
p.disable()
stats = pstats.Stats(p).strip_dirs()
self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)
finally:
# make sure the profiler is deactivated when this test is done so as not to
# pollute any other tests
p.disable()
del p
if __name__ == '__main__':
unittest.main()
|
Add test to verify that jit-compiled functions show up in the profile stats.import numpy as np
import cProfile as profiler
import pstats
from numba import jit
from numba import unittest_support as unittest
def dot(a, b):
sum=0
for i in range(len(a)):
sum += a[i]*b[i]
return sum
class TestProfiler(unittest.TestCase):
def test_profiler(self):
"""Make sure the jit-compiled function shows up in the profile stats."""
a = np.arange(16, dtype=np.float32)
b = np.arange(16, dtype=np.float32)
p = profiler.Profile()
try:
p.enable()
dot(a, b)
p.disable()
stats = pstats.Stats(p).strip_dirs()
self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)
finally:
# make sure the profiler is deactivated when this test is done so as not to
# pollute any other tests
p.disable()
del p
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test to verify that jit-compiled functions show up in the profile stats.<commit_after>import numpy as np
import cProfile as profiler
import pstats
from numba import jit
from numba import unittest_support as unittest
def dot(a, b):
sum=0
for i in range(len(a)):
sum += a[i]*b[i]
return sum
class TestProfiler(unittest.TestCase):
def test_profiler(self):
"""Make sure the jit-compiled function shows up in the profile stats."""
a = np.arange(16, dtype=np.float32)
b = np.arange(16, dtype=np.float32)
p = profiler.Profile()
try:
p.enable()
dot(a, b)
p.disable()
stats = pstats.Stats(p).strip_dirs()
self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)
finally:
# make sure the profiler is deactivated when this test is done so as not to
# pollute any other tests
p.disable()
del p
if __name__ == '__main__':
unittest.main()
|
|
229cd1c8dec92010074bad6c6081142f3e551026
|
py/guess-number-higher-or-lower-ii.py
|
py/guess-number-higher-or-lower-ii.py
|
class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
|
Add py solution for 375. Guess Number Higher or Lower II
|
Add py solution for 375. Guess Number Higher or Lower II
375. Guess Number Higher or Lower II: https://leetcode.com/problems/guess-number-higher-or-lower-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 375. Guess Number Higher or Lower II
375. Guess Number Higher or Lower II: https://leetcode.com/problems/guess-number-higher-or-lower-ii/
|
class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
|
<commit_before><commit_msg>Add py solution for 375. Guess Number Higher or Lower II
375. Guess Number Higher or Lower II: https://leetcode.com/problems/guess-number-higher-or-lower-ii/<commit_after>
|
class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
|
Add py solution for 375. Guess Number Higher or Lower II
375. Guess Number Higher or Lower II: https://leetcode.com/problems/guess-number-higher-or-lower-ii/class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
|
<commit_before><commit_msg>Add py solution for 375. Guess Number Higher or Lower II
375. Guess Number Higher or Lower II: https://leetcode.com/problems/guess-number-higher-or-lower-ii/<commit_after>class Solution(object):
def getMoneyAmount(self, n, table=dict()):
"""
:type n: int
:rtype: int
"""
def dp(L, U):
if (L, U) not in table:
if L + 1 >= U:
table[L, U] = 0
else:
table[L, U] = min(j + max(dp(L, j), dp(j + 1, U)) for j in xrange(L, U))
return table[L, U]
return dp(1, n + 1)
|
|
609e0b76680156584bb1e06aa10a5425c55f8c01
|
wagtail/utils/widgets.py
|
wagtail/utils/widgets.py
|
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
from __future__ import absolute_import, unicode_literals
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
Fix unicode error caused by ascii format string
|
Fix unicode error caused by ascii format string
|
Python
|
bsd-3-clause
|
JoshBarr/wagtail,zerolab/wagtail,benjaoming/wagtail,takeshineshiro/wagtail,jordij/wagtail,iho/wagtail,chrxr/wagtail,iansprice/wagtail,mayapurmedia/wagtail,janusnic/wagtail,jordij/wagtail,kurtw/wagtail,jordij/wagtail,Klaudit/wagtail,jnns/wagtail,bjesus/wagtail,JoshBarr/wagtail,mikedingjan/wagtail,darith27/wagtail,takeflight/wagtail,Toshakins/wagtail,davecranwell/wagtail,stevenewey/wagtail,rv816/wagtail,JoshBarr/wagtail,gasman/wagtail,tangentlabs/wagtail,KimGlazebrook/wagtail-experiment,stevenewey/wagtail,mayapurmedia/wagtail,nealtodd/wagtail,rv816/wagtail,marctc/wagtail,FlipperPA/wagtail,thenewguy/wagtail,rsalmaso/wagtail,kurtw/wagtail,nutztherookie/wagtail,thenewguy/wagtail,gasman/wagtail,marctc/wagtail,chrxr/wagtail,nilnvoid/wagtail,FlipperPA/wagtail,Pennebaker/wagtail,bjesus/wagtail,hanpama/wagtail,nimasmi/wagtail,quru/wagtail,kurtrwall/wagtail,davecranwell/wagtail,inonit/wagtail,kurtrwall/wagtail,taedori81/wagtail,marctc/wagtail,janusnic/wagtail,mephizzle/wagtail,darith27/wagtail,mjec/wagtail,WQuanfeng/wagtail,jorge-marques/wagtail,mephizzle/wagtail,Pennebaker/wagtail,nimasmi/wagtail,torchbox/wagtail,torchbox/wagtail,nrsimha/wagtail,jorge-marques/wagtail,wagtail/wagtail,rjsproxy/wagtail,kaedroho/wagtail,rsalmaso/wagtail,nutztherookie/wagtail,nutztherookie/wagtail,kaedroho/wagtail,mixxorz/wagtail,mayapurmedia/wagtail,wagtail/wagtail,jordij/wagtail,timorieber/wagtail,jorge-marques/wagtail,iansprice/wagtail,zerolab/wagtail,bjesus/wagtail,jorge-marques/wagtail,mikedingjan/wagtail,iho/wagtail,serzans/wagtail,mikedingjan/wagtail,torchbox/wagtail,benjaoming/wagtail,mixxorz/wagtail,iansprice/wagtail,Pennebaker/wagtail,takeflight/wagtail,mikedingjan/wagtail,chimeno/wagtail,taedori81/wagtail,nimasmi/wagtail,iansprice/wagtail,stevenewey/wagtail,takeshineshiro/wagtail,Toshakins/wagtail,Pennebaker/wagtail,jnns/wagtail,marctc/wagtail,nrsimha/wagtail,hamsterbacke23/wagtail,dresiu/wagtail,takeflight/wagtail,m-sanders/wagtail,wagtail/wagtail,inonit/wagtail,mephizzle/wagtail,inonit/wagtail,jnns/wagtail,Tivix/wagtail,mixxorz/wagtail,dresiu/wagtail,gogobook/wagtail,Klaudit/wagtail,KimGlazebrook/wagtail-experiment,gasman/wagtail,tangentlabs/wagtail,jnns/wagtail,nilnvoid/wagtail,KimGlazebrook/wagtail-experiment,FlipperPA/wagtail,gogobook/wagtail,nutztherookie/wagtail,mjec/wagtail,chimeno/wagtail,mjec/wagtail,hanpama/wagtail,mjec/wagtail,KimGlazebrook/wagtail-experiment,chimeno/wagtail,kurtrwall/wagtail,kurtrwall/wagtail,kaedroho/wagtail,serzans/wagtail,mixxorz/wagtail,rsalmaso/wagtail,rv816/wagtail,m-sanders/wagtail,darith27/wagtail,hanpama/wagtail,takeflight/wagtail,mixxorz/wagtail,kurtw/wagtail,tangentlabs/wagtail,chimeno/wagtail,taedori81/wagtail,Toshakins/wagtail,wagtail/wagtail,rjsproxy/wagtail,gasman/wagtail,thenewguy/wagtail,thenewguy/wagtail,kurtw/wagtail,timorieber/wagtail,taedori81/wagtail,tangentlabs/wagtail,davecranwell/wagtail,quru/wagtail,hamsterbacke23/wagtail,nilnvoid/wagtail,zerolab/wagtail,stevenewey/wagtail,WQuanfeng/wagtail,nealtodd/wagtail,serzans/wagtail,nrsimha/wagtail,gogobook/wagtail,benjaoming/wagtail,gogobook/wagtail,Toshakins/wagtail,serzans/wagtail,bjesus/wagtail,takeshineshiro/wagtail,taedori81/wagtail,chrxr/wagtail,zerolab/wagtail,inonit/wagtail,JoshBarr/wagtail,quru/wagtail,jorge-marques/wagtail,darith27/wagtail,WQuanfeng/wagtail,quru/wagtail,dresiu/wagtail,chrxr/wagtail,hamsterbacke23/wagtail,janusnic/wagtail,mayapurmedia/wagtail,Klaudit/wagtail,FlipperPA/wagtail,mephizzle/wagtail,gasman/wagtail,Tivix/wagtail,kaedroho/wagtail,rv816/wagtail,nilnvoid/wagtail,Klaudit/wagtail,davecranwell/wagtail,hanpama/wagtail,iho/wagtail,timorieber/wagtail,benjaoming/wagtail,rsalmaso/wagtail,nimasmi/wagtail,hamsterbacke23/wagtail,zerolab/wagtail,kaedroho/wagtail,rjsproxy/wagtail,nealtodd/wagtail,takeshineshiro/wagtail,thenewguy/wagtail,torchbox/wagtail,iho/wagtail,WQuanfeng/wagtail,janusnic/wagtail,Tivix/wagtail,dresiu/wagtail,rjsproxy/wagtail,m-sanders/wagtail,timorieber/wagtail,m-sanders/wagtail,rsalmaso/wagtail,nrsimha/wagtail,nealtodd/wagtail,dresiu/wagtail,Tivix/wagtail,wagtail/wagtail,chimeno/wagtail
|
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
Fix unicode error caused by ascii format string
|
from __future__ import absolute_import, unicode_literals
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
<commit_before>from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
<commit_msg>Fix unicode error caused by ascii format string<commit_after>
|
from __future__ import absolute_import, unicode_literals
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
Fix unicode error caused by ascii format stringfrom __future__ import absolute_import, unicode_literals
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
<commit_before>from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
<commit_msg>Fix unicode error caused by ascii format string<commit_after>from __future__ import absolute_import, unicode_literals
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
class WidgetWithScript(Widget):
def render(self, name, value, attrs=None):
widget = super(WidgetWithScript, self).render(name, value, attrs)
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
if 'id_' is None:
return widget
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget, js)
return mark_safe(out)
def render_js_init(self, id_, name, value):
return ''
|
576d4b27d30f3bd827dfc53bf55ab6f7c95866a5
|
specchio/utils.py
|
specchio/utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fnmatch
def get_re_from_single_line(line):
"""Get regular expression from a single line in `.gitignore`
The rules of `.gitingore` followed http://git-scm.com/docs/gitignore
:param line: str -- single line from `.gitignore`
:return: tuple
0, None -- noting to pattern
1, str -- hash to pattern
2, str -- negate ignore path to pattern
3, str -- ignore path to pattern
"""
_line = line.strip()
# Deal with file name end with ` `
line = _line + " " if _line.endswith("\\") and line.endswith(" ") else _line
line = line.replace("\\ ", " ")
# Deal with `**` in folder path
line = line.replace("**", "*")
if line.endswith("/"):
line += "*"
if line == "":
# A blank line matches no files
return 0, None
elif line.startswith("#"):
# A line starting with `#` serves as a comment
return 0, None
elif line.startswith("\\#"):
# A line starting with `\#` serves as a pattern for hash
return 1, line.split("#")[-1].strip()
else:
if line.startswith("!"):
# A line starting with `!` negates the pattern
re_type = 2
line = line[1:]
else:
re_type = 3
# Deal with escape string
line = line.replace("\\", "")
if line.startswith("/"):
# Dealing with line start with `/`, just remove the head
return re_type, fnmatch.translate(line[1:])
else:
return re_type, fnmatch.translate(line)
|
Add func to get regular expression from a single line in `.gitignore`
|
Add func to get regular expression from a single line in `.gitignore`
The rule of the func followed http://git-scm.com/docs/gitignore
|
Python
|
mit
|
brickgao/specchio
|
Add func to get regular expression from a single line in `.gitignore`
The rule of the func followed http://git-scm.com/docs/gitignore
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fnmatch
def get_re_from_single_line(line):
"""Get regular expression from a single line in `.gitignore`
The rules of `.gitingore` followed http://git-scm.com/docs/gitignore
:param line: str -- single line from `.gitignore`
:return: tuple
0, None -- noting to pattern
1, str -- hash to pattern
2, str -- negate ignore path to pattern
3, str -- ignore path to pattern
"""
_line = line.strip()
# Deal with file name end with ` `
line = _line + " " if _line.endswith("\\") and line.endswith(" ") else _line
line = line.replace("\\ ", " ")
# Deal with `**` in folder path
line = line.replace("**", "*")
if line.endswith("/"):
line += "*"
if line == "":
# A blank line matches no files
return 0, None
elif line.startswith("#"):
# A line starting with `#` serves as a comment
return 0, None
elif line.startswith("\\#"):
# A line starting with `\#` serves as a pattern for hash
return 1, line.split("#")[-1].strip()
else:
if line.startswith("!"):
# A line starting with `!` negates the pattern
re_type = 2
line = line[1:]
else:
re_type = 3
# Deal with escape string
line = line.replace("\\", "")
if line.startswith("/"):
# Dealing with line start with `/`, just remove the head
return re_type, fnmatch.translate(line[1:])
else:
return re_type, fnmatch.translate(line)
|
<commit_before><commit_msg>Add func to get regular expression from a single line in `.gitignore`
The rule of the func followed http://git-scm.com/docs/gitignore<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fnmatch
def get_re_from_single_line(line):
"""Get regular expression from a single line in `.gitignore`
The rules of `.gitingore` followed http://git-scm.com/docs/gitignore
:param line: str -- single line from `.gitignore`
:return: tuple
0, None -- noting to pattern
1, str -- hash to pattern
2, str -- negate ignore path to pattern
3, str -- ignore path to pattern
"""
_line = line.strip()
# Deal with file name end with ` `
line = _line + " " if _line.endswith("\\") and line.endswith(" ") else _line
line = line.replace("\\ ", " ")
# Deal with `**` in folder path
line = line.replace("**", "*")
if line.endswith("/"):
line += "*"
if line == "":
# A blank line matches no files
return 0, None
elif line.startswith("#"):
# A line starting with `#` serves as a comment
return 0, None
elif line.startswith("\\#"):
# A line starting with `\#` serves as a pattern for hash
return 1, line.split("#")[-1].strip()
else:
if line.startswith("!"):
# A line starting with `!` negates the pattern
re_type = 2
line = line[1:]
else:
re_type = 3
# Deal with escape string
line = line.replace("\\", "")
if line.startswith("/"):
# Dealing with line start with `/`, just remove the head
return re_type, fnmatch.translate(line[1:])
else:
return re_type, fnmatch.translate(line)
|
Add func to get regular expression from a single line in `.gitignore`
The rule of the func followed http://git-scm.com/docs/gitignore#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fnmatch
def get_re_from_single_line(line):
"""Get regular expression from a single line in `.gitignore`
The rules of `.gitingore` followed http://git-scm.com/docs/gitignore
:param line: str -- single line from `.gitignore`
:return: tuple
0, None -- noting to pattern
1, str -- hash to pattern
2, str -- negate ignore path to pattern
3, str -- ignore path to pattern
"""
_line = line.strip()
# Deal with file name end with ` `
line = _line + " " if _line.endswith("\\") and line.endswith(" ") else _line
line = line.replace("\\ ", " ")
# Deal with `**` in folder path
line = line.replace("**", "*")
if line.endswith("/"):
line += "*"
if line == "":
# A blank line matches no files
return 0, None
elif line.startswith("#"):
# A line starting with `#` serves as a comment
return 0, None
elif line.startswith("\\#"):
# A line starting with `\#` serves as a pattern for hash
return 1, line.split("#")[-1].strip()
else:
if line.startswith("!"):
# A line starting with `!` negates the pattern
re_type = 2
line = line[1:]
else:
re_type = 3
# Deal with escape string
line = line.replace("\\", "")
if line.startswith("/"):
# Dealing with line start with `/`, just remove the head
return re_type, fnmatch.translate(line[1:])
else:
return re_type, fnmatch.translate(line)
|
<commit_before><commit_msg>Add func to get regular expression from a single line in `.gitignore`
The rule of the func followed http://git-scm.com/docs/gitignore<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import fnmatch
def get_re_from_single_line(line):
"""Get regular expression from a single line in `.gitignore`
The rules of `.gitingore` followed http://git-scm.com/docs/gitignore
:param line: str -- single line from `.gitignore`
:return: tuple
0, None -- noting to pattern
1, str -- hash to pattern
2, str -- negate ignore path to pattern
3, str -- ignore path to pattern
"""
_line = line.strip()
# Deal with file name end with ` `
line = _line + " " if _line.endswith("\\") and line.endswith(" ") else _line
line = line.replace("\\ ", " ")
# Deal with `**` in folder path
line = line.replace("**", "*")
if line.endswith("/"):
line += "*"
if line == "":
# A blank line matches no files
return 0, None
elif line.startswith("#"):
# A line starting with `#` serves as a comment
return 0, None
elif line.startswith("\\#"):
# A line starting with `\#` serves as a pattern for hash
return 1, line.split("#")[-1].strip()
else:
if line.startswith("!"):
# A line starting with `!` negates the pattern
re_type = 2
line = line[1:]
else:
re_type = 3
# Deal with escape string
line = line.replace("\\", "")
if line.startswith("/"):
# Dealing with line start with `/`, just remove the head
return re_type, fnmatch.translate(line[1:])
else:
return re_type, fnmatch.translate(line)
|
|
61e002969e63995a1355f51b58a0ec98d7a12532
|
froide/publicbody/migrations/0022_auto_20180726_1151.py
|
froide/publicbody/migrations/0022_auto_20180726_1151.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0021_proposedpublicbody'),
]
operations = [
migrations.AlterModelOptions(
name='proposedpublicbody',
options={'ordering': ('-created_at',), 'verbose_name': 'Proposed Public Body', 'verbose_name_plural': 'Proposed Public Bodies'},
),
]
|
Add order to proposed public bodies
|
Add order to proposed public bodies
|
Python
|
mit
|
fin/froide,stefanw/froide,fin/froide,fin/froide,stefanw/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide
|
Add order to proposed public bodies
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0021_proposedpublicbody'),
]
operations = [
migrations.AlterModelOptions(
name='proposedpublicbody',
options={'ordering': ('-created_at',), 'verbose_name': 'Proposed Public Body', 'verbose_name_plural': 'Proposed Public Bodies'},
),
]
|
<commit_before><commit_msg>Add order to proposed public bodies<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0021_proposedpublicbody'),
]
operations = [
migrations.AlterModelOptions(
name='proposedpublicbody',
options={'ordering': ('-created_at',), 'verbose_name': 'Proposed Public Body', 'verbose_name_plural': 'Proposed Public Bodies'},
),
]
|
Add order to proposed public bodies# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0021_proposedpublicbody'),
]
operations = [
migrations.AlterModelOptions(
name='proposedpublicbody',
options={'ordering': ('-created_at',), 'verbose_name': 'Proposed Public Body', 'verbose_name_plural': 'Proposed Public Bodies'},
),
]
|
<commit_before><commit_msg>Add order to proposed public bodies<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-26 09:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0021_proposedpublicbody'),
]
operations = [
migrations.AlterModelOptions(
name='proposedpublicbody',
options={'ordering': ('-created_at',), 'verbose_name': 'Proposed Public Body', 'verbose_name_plural': 'Proposed Public Bodies'},
),
]
|
|
5d404ecd05b46697e90e60775541eff33ac2cb22
|
functional/tests/network/v2/test_ip_availability.py
|
functional/tests/network/v2/test_ip_availability.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class IPAvailabilityTests(test.TestCase):
"""Functional tests for IP availability. """
NAME = uuid.uuid4().hex
NETWORK_NAME = uuid.uuid4().hex
FIELDS = ['network_name']
@classmethod
def setUpClass(cls):
# Create a network for the subnet.
cls.openstack('network create ' + cls.NETWORK_NAME)
opts = cls.get_show_opts(['name'])
raw_output = cls.openstack(
'subnet create --network ' + cls.NETWORK_NAME +
' --subnet-range 10.10.10.0/24 ' +
cls.NAME + opts
)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_subnet = cls.openstack('subnet delete ' + cls.NAME)
raw_network = cls.openstack('network delete ' + cls.NETWORK_NAME)
cls.assertOutput('', raw_subnet)
cls.assertOutput('', raw_network)
def test_ip_availability_list(self):
opts = ' -f csv -c "Network Name"'
raw_output = self.openstack('ip availability list' + opts)
self.assertIn(self.NETWORK_NAME, raw_output)
def test_ip_availability_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack(
'ip availability show ' + self.NETWORK_NAME + opts)
self.assertEqual(self.NETWORK_NAME + "\n", raw_output)
|
Add functional tests for IP availability
|
Add functional tests for IP availability
This patch adds functional tests for IP availability
Partially-Implements: blueprint neutron-ip-capacity
Change-Id: I1c1dc01801707fe8f0dc3c976e5d345d2b0db0ec
|
Python
|
apache-2.0
|
redhat-openstack/python-openstackclient,openstack/python-openstackclient,dtroyer/python-openstackclient,redhat-openstack/python-openstackclient,openstack/python-openstackclient,dtroyer/python-openstackclient
|
Add functional tests for IP availability
This patch adds functional tests for IP availability
Partially-Implements: blueprint neutron-ip-capacity
Change-Id: I1c1dc01801707fe8f0dc3c976e5d345d2b0db0ec
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class IPAvailabilityTests(test.TestCase):
"""Functional tests for IP availability. """
NAME = uuid.uuid4().hex
NETWORK_NAME = uuid.uuid4().hex
FIELDS = ['network_name']
@classmethod
def setUpClass(cls):
# Create a network for the subnet.
cls.openstack('network create ' + cls.NETWORK_NAME)
opts = cls.get_show_opts(['name'])
raw_output = cls.openstack(
'subnet create --network ' + cls.NETWORK_NAME +
' --subnet-range 10.10.10.0/24 ' +
cls.NAME + opts
)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_subnet = cls.openstack('subnet delete ' + cls.NAME)
raw_network = cls.openstack('network delete ' + cls.NETWORK_NAME)
cls.assertOutput('', raw_subnet)
cls.assertOutput('', raw_network)
def test_ip_availability_list(self):
opts = ' -f csv -c "Network Name"'
raw_output = self.openstack('ip availability list' + opts)
self.assertIn(self.NETWORK_NAME, raw_output)
def test_ip_availability_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack(
'ip availability show ' + self.NETWORK_NAME + opts)
self.assertEqual(self.NETWORK_NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for IP availability
This patch adds functional tests for IP availability
Partially-Implements: blueprint neutron-ip-capacity
Change-Id: I1c1dc01801707fe8f0dc3c976e5d345d2b0db0ec<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class IPAvailabilityTests(test.TestCase):
"""Functional tests for IP availability. """
NAME = uuid.uuid4().hex
NETWORK_NAME = uuid.uuid4().hex
FIELDS = ['network_name']
@classmethod
def setUpClass(cls):
# Create a network for the subnet.
cls.openstack('network create ' + cls.NETWORK_NAME)
opts = cls.get_show_opts(['name'])
raw_output = cls.openstack(
'subnet create --network ' + cls.NETWORK_NAME +
' --subnet-range 10.10.10.0/24 ' +
cls.NAME + opts
)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_subnet = cls.openstack('subnet delete ' + cls.NAME)
raw_network = cls.openstack('network delete ' + cls.NETWORK_NAME)
cls.assertOutput('', raw_subnet)
cls.assertOutput('', raw_network)
def test_ip_availability_list(self):
opts = ' -f csv -c "Network Name"'
raw_output = self.openstack('ip availability list' + opts)
self.assertIn(self.NETWORK_NAME, raw_output)
def test_ip_availability_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack(
'ip availability show ' + self.NETWORK_NAME + opts)
self.assertEqual(self.NETWORK_NAME + "\n", raw_output)
|
Add functional tests for IP availability
This patch adds functional tests for IP availability
Partially-Implements: blueprint neutron-ip-capacity
Change-Id: I1c1dc01801707fe8f0dc3c976e5d345d2b0db0ec# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class IPAvailabilityTests(test.TestCase):
"""Functional tests for IP availability. """
NAME = uuid.uuid4().hex
NETWORK_NAME = uuid.uuid4().hex
FIELDS = ['network_name']
@classmethod
def setUpClass(cls):
# Create a network for the subnet.
cls.openstack('network create ' + cls.NETWORK_NAME)
opts = cls.get_show_opts(['name'])
raw_output = cls.openstack(
'subnet create --network ' + cls.NETWORK_NAME +
' --subnet-range 10.10.10.0/24 ' +
cls.NAME + opts
)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_subnet = cls.openstack('subnet delete ' + cls.NAME)
raw_network = cls.openstack('network delete ' + cls.NETWORK_NAME)
cls.assertOutput('', raw_subnet)
cls.assertOutput('', raw_network)
def test_ip_availability_list(self):
opts = ' -f csv -c "Network Name"'
raw_output = self.openstack('ip availability list' + opts)
self.assertIn(self.NETWORK_NAME, raw_output)
def test_ip_availability_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack(
'ip availability show ' + self.NETWORK_NAME + opts)
self.assertEqual(self.NETWORK_NAME + "\n", raw_output)
|
<commit_before><commit_msg>Add functional tests for IP availability
This patch adds functional tests for IP availability
Partially-Implements: blueprint neutron-ip-capacity
Change-Id: I1c1dc01801707fe8f0dc3c976e5d345d2b0db0ec<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from functional.common import test
class IPAvailabilityTests(test.TestCase):
"""Functional tests for IP availability. """
NAME = uuid.uuid4().hex
NETWORK_NAME = uuid.uuid4().hex
FIELDS = ['network_name']
@classmethod
def setUpClass(cls):
# Create a network for the subnet.
cls.openstack('network create ' + cls.NETWORK_NAME)
opts = cls.get_show_opts(['name'])
raw_output = cls.openstack(
'subnet create --network ' + cls.NETWORK_NAME +
' --subnet-range 10.10.10.0/24 ' +
cls.NAME + opts
)
expected = cls.NAME + '\n'
cls.assertOutput(expected, raw_output)
@classmethod
def tearDownClass(cls):
raw_subnet = cls.openstack('subnet delete ' + cls.NAME)
raw_network = cls.openstack('network delete ' + cls.NETWORK_NAME)
cls.assertOutput('', raw_subnet)
cls.assertOutput('', raw_network)
def test_ip_availability_list(self):
opts = ' -f csv -c "Network Name"'
raw_output = self.openstack('ip availability list' + opts)
self.assertIn(self.NETWORK_NAME, raw_output)
def test_ip_availability_show(self):
opts = self.get_show_opts(self.FIELDS)
raw_output = self.openstack(
'ip availability show ' + self.NETWORK_NAME + opts)
self.assertEqual(self.NETWORK_NAME + "\n", raw_output)
|
|
8cc5c817115cac86f46d271ba52e1e0af4251bfe
|
analysis_plot_granule_fig.py
|
analysis_plot_granule_fig.py
|
# -*- coding:utf-8 -*-
"""
Select a specific simulation from a HDF5 file and plot its granule figure.
"""
import numpy as np
import matplotlib.pyplot as plt
import tables
import h5manager as h5m
from plotting import granule_pop_figure
from analysis import fftmax
plt.ion()
DB_FILENAME = "db_two_glom_beta_new_ps_interco_strength0_1_interco_rate0_1.h5"
DB = tables.openFile(DB_FILENAME)
# Get all simulation data
ATTRS = (('paramset', '_v_attrs', 'Common', 'inter_conn_rate', 0, 1),
('paramset', '_v_attrs', 'Common', 'inter_conn_strength', 0, 1),
('paramset', 'arrays', 'times'),
('paramset', '_v_attrs', 'Common', 'simu_dt'),
('results', 's_granule'),
('results', 's_syn_self'),
('results', '_v_attrs'))
ALL_SIMU_ATTRS = h5m.get_all_attrs(DB, ATTRS)
# Some useful functions
def deq(a, b, delta):
"""Delta equality"""
return abs(a - b) < delta
class DummyPkg:
def __init__(self, values, times):
self.values = values
self.times = times
# Get the specific simulations
def get_interco(simu, interco_rate, interco_strength):
return deq(simu[0], interco_rate, 0.02) and deq(simu[1], interco_strength, 0.02)
GOOD_SIMUS = filter(lambda x : get_interco(x, 0.793, 0.918), ALL_SIMU_ATTRS)
REDO_FFTMAX = []
for simu in GOOD_SIMUS:
gr_s = simu[4].read()
gr_s_syn_self = simu[5].read()
times = simu[2].read()
dt = float(simu[3])
granule_pop_figure(gr_s, gr_s_syn_self, times, dt)
plt.show()
dummy = DummyPkg(gr_s, times)
REDO_FFTMAX.append(fftmax(dummy, 2, dt))
print REDO_FFTMAX
# Finally close the DB
DB.close()
|
Add a plot script for std figure of choosen simu
|
Add a plot script for std figure of choosen simu
|
Python
|
mit
|
neuro-lyon/multiglom-model,neuro-lyon/multiglom-model
|
Add a plot script for std figure of choosen simu
|
# -*- coding:utf-8 -*-
"""
Select a specific simulation from a HDF5 file and plot its granule figure.
"""
import numpy as np
import matplotlib.pyplot as plt
import tables
import h5manager as h5m
from plotting import granule_pop_figure
from analysis import fftmax
plt.ion()
DB_FILENAME = "db_two_glom_beta_new_ps_interco_strength0_1_interco_rate0_1.h5"
DB = tables.openFile(DB_FILENAME)
# Get all simulation data
ATTRS = (('paramset', '_v_attrs', 'Common', 'inter_conn_rate', 0, 1),
('paramset', '_v_attrs', 'Common', 'inter_conn_strength', 0, 1),
('paramset', 'arrays', 'times'),
('paramset', '_v_attrs', 'Common', 'simu_dt'),
('results', 's_granule'),
('results', 's_syn_self'),
('results', '_v_attrs'))
ALL_SIMU_ATTRS = h5m.get_all_attrs(DB, ATTRS)
# Some useful functions
def deq(a, b, delta):
"""Delta equality"""
return abs(a - b) < delta
class DummyPkg:
def __init__(self, values, times):
self.values = values
self.times = times
# Get the specific simulations
def get_interco(simu, interco_rate, interco_strength):
return deq(simu[0], interco_rate, 0.02) and deq(simu[1], interco_strength, 0.02)
GOOD_SIMUS = filter(lambda x : get_interco(x, 0.793, 0.918), ALL_SIMU_ATTRS)
REDO_FFTMAX = []
for simu in GOOD_SIMUS:
gr_s = simu[4].read()
gr_s_syn_self = simu[5].read()
times = simu[2].read()
dt = float(simu[3])
granule_pop_figure(gr_s, gr_s_syn_self, times, dt)
plt.show()
dummy = DummyPkg(gr_s, times)
REDO_FFTMAX.append(fftmax(dummy, 2, dt))
print REDO_FFTMAX
# Finally close the DB
DB.close()
|
<commit_before><commit_msg>Add a plot script for std figure of choosen simu<commit_after>
|
# -*- coding:utf-8 -*-
"""
Select a specific simulation from a HDF5 file and plot its granule figure.
"""
import numpy as np
import matplotlib.pyplot as plt
import tables
import h5manager as h5m
from plotting import granule_pop_figure
from analysis import fftmax
plt.ion()
DB_FILENAME = "db_two_glom_beta_new_ps_interco_strength0_1_interco_rate0_1.h5"
DB = tables.openFile(DB_FILENAME)
# Get all simulation data
ATTRS = (('paramset', '_v_attrs', 'Common', 'inter_conn_rate', 0, 1),
('paramset', '_v_attrs', 'Common', 'inter_conn_strength', 0, 1),
('paramset', 'arrays', 'times'),
('paramset', '_v_attrs', 'Common', 'simu_dt'),
('results', 's_granule'),
('results', 's_syn_self'),
('results', '_v_attrs'))
ALL_SIMU_ATTRS = h5m.get_all_attrs(DB, ATTRS)
# Some useful functions
def deq(a, b, delta):
"""Delta equality"""
return abs(a - b) < delta
class DummyPkg:
def __init__(self, values, times):
self.values = values
self.times = times
# Get the specific simulations
def get_interco(simu, interco_rate, interco_strength):
return deq(simu[0], interco_rate, 0.02) and deq(simu[1], interco_strength, 0.02)
GOOD_SIMUS = filter(lambda x : get_interco(x, 0.793, 0.918), ALL_SIMU_ATTRS)
REDO_FFTMAX = []
for simu in GOOD_SIMUS:
gr_s = simu[4].read()
gr_s_syn_self = simu[5].read()
times = simu[2].read()
dt = float(simu[3])
granule_pop_figure(gr_s, gr_s_syn_self, times, dt)
plt.show()
dummy = DummyPkg(gr_s, times)
REDO_FFTMAX.append(fftmax(dummy, 2, dt))
print REDO_FFTMAX
# Finally close the DB
DB.close()
|
Add a plot script for std figure of choosen simu# -*- coding:utf-8 -*-
"""
Select a specific simulation from a HDF5 file and plot its granule figure.
"""
import numpy as np
import matplotlib.pyplot as plt
import tables
import h5manager as h5m
from plotting import granule_pop_figure
from analysis import fftmax
plt.ion()
DB_FILENAME = "db_two_glom_beta_new_ps_interco_strength0_1_interco_rate0_1.h5"
DB = tables.openFile(DB_FILENAME)
# Get all simulation data
ATTRS = (('paramset', '_v_attrs', 'Common', 'inter_conn_rate', 0, 1),
('paramset', '_v_attrs', 'Common', 'inter_conn_strength', 0, 1),
('paramset', 'arrays', 'times'),
('paramset', '_v_attrs', 'Common', 'simu_dt'),
('results', 's_granule'),
('results', 's_syn_self'),
('results', '_v_attrs'))
ALL_SIMU_ATTRS = h5m.get_all_attrs(DB, ATTRS)
# Some useful functions
def deq(a, b, delta):
"""Delta equality"""
return abs(a - b) < delta
class DummyPkg:
def __init__(self, values, times):
self.values = values
self.times = times
# Get the specific simulations
def get_interco(simu, interco_rate, interco_strength):
return deq(simu[0], interco_rate, 0.02) and deq(simu[1], interco_strength, 0.02)
GOOD_SIMUS = filter(lambda x : get_interco(x, 0.793, 0.918), ALL_SIMU_ATTRS)
REDO_FFTMAX = []
for simu in GOOD_SIMUS:
gr_s = simu[4].read()
gr_s_syn_self = simu[5].read()
times = simu[2].read()
dt = float(simu[3])
granule_pop_figure(gr_s, gr_s_syn_self, times, dt)
plt.show()
dummy = DummyPkg(gr_s, times)
REDO_FFTMAX.append(fftmax(dummy, 2, dt))
print REDO_FFTMAX
# Finally close the DB
DB.close()
|
<commit_before><commit_msg>Add a plot script for std figure of choosen simu<commit_after># -*- coding:utf-8 -*-
"""
Select a specific simulation from a HDF5 file and plot its granule figure.
"""
import numpy as np
import matplotlib.pyplot as plt
import tables
import h5manager as h5m
from plotting import granule_pop_figure
from analysis import fftmax
plt.ion()
DB_FILENAME = "db_two_glom_beta_new_ps_interco_strength0_1_interco_rate0_1.h5"
DB = tables.openFile(DB_FILENAME)
# Get all simulation data
ATTRS = (('paramset', '_v_attrs', 'Common', 'inter_conn_rate', 0, 1),
('paramset', '_v_attrs', 'Common', 'inter_conn_strength', 0, 1),
('paramset', 'arrays', 'times'),
('paramset', '_v_attrs', 'Common', 'simu_dt'),
('results', 's_granule'),
('results', 's_syn_self'),
('results', '_v_attrs'))
ALL_SIMU_ATTRS = h5m.get_all_attrs(DB, ATTRS)
# Some useful functions
def deq(a, b, delta):
"""Delta equality"""
return abs(a - b) < delta
class DummyPkg:
def __init__(self, values, times):
self.values = values
self.times = times
# Get the specific simulations
def get_interco(simu, interco_rate, interco_strength):
return deq(simu[0], interco_rate, 0.02) and deq(simu[1], interco_strength, 0.02)
GOOD_SIMUS = filter(lambda x : get_interco(x, 0.793, 0.918), ALL_SIMU_ATTRS)
REDO_FFTMAX = []
for simu in GOOD_SIMUS:
gr_s = simu[4].read()
gr_s_syn_self = simu[5].read()
times = simu[2].read()
dt = float(simu[3])
granule_pop_figure(gr_s, gr_s_syn_self, times, dt)
plt.show()
dummy = DummyPkg(gr_s, times)
REDO_FFTMAX.append(fftmax(dummy, 2, dt))
print REDO_FFTMAX
# Finally close the DB
DB.close()
|
|
d98d834aaadc49f0f4102f8f4acac96b09ea42b3
|
mdtraj/tests/test_load.py
|
mdtraj/tests/test_load.py
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests of generic loading functionality.
"""
from mdtraj import load
from mdtraj.testing import get_fn
def test_load_single():
"""
Just check for any raised errors coming from loading a single file.
"""
load(get_fn('frame0.pdb'))
def test_load_single_list():
"""
See if a single-element list of files is successfully loaded.
"""
load([get_fn('frame0.pdb')])
def test_load_many_list():
"""
See if a multi-element list of files is successfully loaded.
"""
traj = load(2 * [get_fn('frame0.pdb')], discard_overlapping_frames=False)
assert traj.n_frames == 2
|
Add tests for improved loading functionality.
|
Add tests for improved loading functionality.
|
Python
|
lgpl-2.1
|
leeping/mdtraj,ctk3b/mdtraj,tcmoore3/mdtraj,mattwthompson/mdtraj,msultan/mdtraj,tcmoore3/mdtraj,msultan/mdtraj,msultan/mdtraj,mdtraj/mdtraj,tcmoore3/mdtraj,ctk3b/mdtraj,tcmoore3/mdtraj,gph82/mdtraj,leeping/mdtraj,rmcgibbo/mdtraj,ctk3b/mdtraj,jchodera/mdtraj,dwhswenson/mdtraj,dwhswenson/mdtraj,rmcgibbo/mdtraj,mattwthompson/mdtraj,mdtraj/mdtraj,mdtraj/mdtraj,rmcgibbo/mdtraj,mattwthompson/mdtraj,jchodera/mdtraj,ctk3b/mdtraj,jchodera/mdtraj,gph82/mdtraj,mattwthompson/mdtraj,leeping/mdtraj,leeping/mdtraj,gph82/mdtraj,jchodera/mdtraj,dwhswenson/mdtraj,ctk3b/mdtraj,msultan/mdtraj
|
Add tests for improved loading functionality.
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests of generic loading functionality.
"""
from mdtraj import load
from mdtraj.testing import get_fn
def test_load_single():
"""
Just check for any raised errors coming from loading a single file.
"""
load(get_fn('frame0.pdb'))
def test_load_single_list():
"""
See if a single-element list of files is successfully loaded.
"""
load([get_fn('frame0.pdb')])
def test_load_many_list():
"""
See if a multi-element list of files is successfully loaded.
"""
traj = load(2 * [get_fn('frame0.pdb')], discard_overlapping_frames=False)
assert traj.n_frames == 2
|
<commit_before><commit_msg>Add tests for improved loading functionality.<commit_after>
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests of generic loading functionality.
"""
from mdtraj import load
from mdtraj.testing import get_fn
def test_load_single():
"""
Just check for any raised errors coming from loading a single file.
"""
load(get_fn('frame0.pdb'))
def test_load_single_list():
"""
See if a single-element list of files is successfully loaded.
"""
load([get_fn('frame0.pdb')])
def test_load_many_list():
"""
See if a multi-element list of files is successfully loaded.
"""
traj = load(2 * [get_fn('frame0.pdb')], discard_overlapping_frames=False)
assert traj.n_frames == 2
|
Add tests for improved loading functionality.##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests of generic loading functionality.
"""
from mdtraj import load
from mdtraj.testing import get_fn
def test_load_single():
"""
Just check for any raised errors coming from loading a single file.
"""
load(get_fn('frame0.pdb'))
def test_load_single_list():
"""
See if a single-element list of files is successfully loaded.
"""
load([get_fn('frame0.pdb')])
def test_load_many_list():
"""
See if a multi-element list of files is successfully loaded.
"""
traj = load(2 * [get_fn('frame0.pdb')], discard_overlapping_frames=False)
assert traj.n_frames == 2
|
<commit_before><commit_msg>Add tests for improved loading functionality.<commit_after>##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
Tests of generic loading functionality.
"""
from mdtraj import load
from mdtraj.testing import get_fn
def test_load_single():
"""
Just check for any raised errors coming from loading a single file.
"""
load(get_fn('frame0.pdb'))
def test_load_single_list():
"""
See if a single-element list of files is successfully loaded.
"""
load([get_fn('frame0.pdb')])
def test_load_many_list():
"""
See if a multi-element list of files is successfully loaded.
"""
traj = load(2 * [get_fn('frame0.pdb')], discard_overlapping_frames=False)
assert traj.n_frames == 2
|
|
245206b164d1b55aa68a97e88d1e2fe8fec8f7b9
|
exercises/chapter_08/exercise_08_12/exercise_08_12.py
|
exercises/chapter_08/exercise_08_12/exercise_08_12.py
|
# 8-12. Sandwiches
def make_sandwich(*ingredients):
"""Print the ingredients in the sandwich"""
print ("Making a sandwich with the following ingredients:")
for ingredient in ingredients:
print("\t- " + ingredient)
print("\n")
make_sandwich("Flatbread", "American Cheese", "Green Peppers")
make_sandwich("Italian bread", "Swiss cheese", "Avocado", "Mustard")
make_sandwich("Carrots")
|
Add solution to exercise 8.12.
|
Add solution to exercise 8.12.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add solution to exercise 8.12.
|
# 8-12. Sandwiches
def make_sandwich(*ingredients):
"""Print the ingredients in the sandwich"""
print ("Making a sandwich with the following ingredients:")
for ingredient in ingredients:
print("\t- " + ingredient)
print("\n")
make_sandwich("Flatbread", "American Cheese", "Green Peppers")
make_sandwich("Italian bread", "Swiss cheese", "Avocado", "Mustard")
make_sandwich("Carrots")
|
<commit_before><commit_msg>Add solution to exercise 8.12.<commit_after>
|
# 8-12. Sandwiches
def make_sandwich(*ingredients):
"""Print the ingredients in the sandwich"""
print ("Making a sandwich with the following ingredients:")
for ingredient in ingredients:
print("\t- " + ingredient)
print("\n")
make_sandwich("Flatbread", "American Cheese", "Green Peppers")
make_sandwich("Italian bread", "Swiss cheese", "Avocado", "Mustard")
make_sandwich("Carrots")
|
Add solution to exercise 8.12.# 8-12. Sandwiches
def make_sandwich(*ingredients):
"""Print the ingredients in the sandwich"""
print ("Making a sandwich with the following ingredients:")
for ingredient in ingredients:
print("\t- " + ingredient)
print("\n")
make_sandwich("Flatbread", "American Cheese", "Green Peppers")
make_sandwich("Italian bread", "Swiss cheese", "Avocado", "Mustard")
make_sandwich("Carrots")
|
<commit_before><commit_msg>Add solution to exercise 8.12.<commit_after># 8-12. Sandwiches
def make_sandwich(*ingredients):
"""Print the ingredients in the sandwich"""
print ("Making a sandwich with the following ingredients:")
for ingredient in ingredients:
print("\t- " + ingredient)
print("\n")
make_sandwich("Flatbread", "American Cheese", "Green Peppers")
make_sandwich("Italian bread", "Swiss cheese", "Avocado", "Mustard")
make_sandwich("Carrots")
|
|
12c2e1bfee2a6a8dae3dd4f38c9a6577f4e40f1a
|
lms/tests.py
|
lms/tests.py
|
"""Tests for the lms module itself."""
from django.test import TestCase
from edxmako import add_lookup, LOOKUP
from lms import startup
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
|
Test TemplateLookup dirs are not cleared during lms startup.
|
Test TemplateLookup dirs are not cleared during lms startup.
LMS-2498
|
Python
|
agpl-3.0
|
ahmadiga/min_edx,mushtaqak/edx-platform,ferabra/edx-platform,hastexo/edx-platform,kursitet/edx-platform,kmoocdev/edx-platform,shubhdev/openedx,y12uc231/edx-platform,cecep-edu/edx-platform,mtlchun/edx,Stanford-Online/edx-platform,nanolearningllc/edx-platform-cypress,chudaol/edx-platform,devs1991/test_edx_docmode,ahmedaljazzar/edx-platform,bitifirefly/edx-platform,JCBarahona/edX,Livit/Livit.Learn.EdX,nttks/jenkins-test,mjirayu/sit_academy,devs1991/test_edx_docmode,etzhou/edx-platform,motion2015/a3,ferabra/edx-platform,unicri/edx-platform,openfun/edx-platform,mitocw/edx-platform,edx-solutions/edx-platform,jruiperezv/ANALYSE,cselis86/edx-platform,torchingloom/edx-platform,naresh21/synergetics-edx-platform,gymnasium/edx-platform,zofuthan/edx-platform,wwj718/ANALYSE,jamiefolsom/edx-platform,DefyVentures/edx-platform,doganov/edx-platform,msegado/edx-platform,jolyonb/edx-platform,mahendra-r/edx-platform,kxliugang/edx-platform,teltek/edx-platform,chauhanhardik/populo_2,Endika/edx-platform,mbareta/edx-platform-ft,cecep-edu/edx-platform,zerobatu/edx-platform,JioEducation/edx-platform,jzoldak/edx-platform,jelugbo/tundex,nanolearning/edx-platform,jonathan-beard/edx-platform,shabab12/edx-platform,eduNEXT/edx-platform,leansoft/edx-platform,tiagochiavericosta/edx-platform,franosincic/edx-platform,WatanabeYasumasa/edx-platform,ahmadiga/min_edx,nttks/edx-platform,dkarakats/edx-platform,olexiim/edx-platform,xingyepei/edx-platform,shashank971/edx-platform,jelugbo/tundex,inares/edx-platform,MakeHer/edx-platform,jazkarta/edx-platform,jamiefolsom/edx-platform,4eek/edx-platform,antoviaque/edx-platform,utecuy/edx-platform,DNFcode/edx-platform,xinjiguaike/edx-platform,nagyistoce/edx-platform,jazkarta/edx-platform,DNFcode/edx-platform,nttks/jenkins-test,Kalyzee/edx-platform,pomegranited/edx-platform,J861449197/edx-platform,4eek/edx-platform,abdoosh00/edraak,abdoosh00/edraak,halvertoluke/edx-platform,cyanna/edx-platform,JCBarahona/edX,antonve/s4-project-mooc,dcosentino/edx-platform,jbassen/edx-platform,ampax/edx-platform-backup,a-parhom/edx-platform,leansoft/edx-platform,zhenzhai/edx-platform,nagyistoce/edx-platform,Edraak/edx-platform,shubhdev/openedx,WatanabeYasumasa/edx-platform,defance/edx-platform,chauhanhardik/populo,longmen21/edx-platform,arbrandes/edx-platform,zubair-arbi/edx-platform,tanmaykm/edx-platform,eemirtekin/edx-platform,doismellburning/edx-platform,DNFcode/edx-platform,DNFcode/edx-platform,nikolas/edx-platform,louyihua/edx-platform,synergeticsedx/deployment-wipro,hastexo/edx-platform,kmoocdev/edx-platform,wwj718/edx-platform,prarthitm/edxplatform,antoviaque/edx-platform,tanmaykm/edx-platform,simbs/edx-platform,xinjiguaike/edx-platform,xuxiao19910803/edx,msegado/edx-platform,xinjiguaike/edx-platform,bitifirefly/edx-platform,pabloborrego93/edx-platform,alexthered/kienhoc-platform,nagyistoce/edx-platform,lduarte1991/edx-platform,edx/edx-platform,motion2015/a3,prarthitm/edxplatform,olexiim/edx-platform,eemirtekin/edx-platform,ZLLab-Mooc/edx-platform,vismartltd/edx-platform,beacloudgenius/edx-platform,kmoocdev2/edx-platform,vismartltd/edx-platform,mbareta/edx-platform-ft,shubhdev/edxOnBaadal,leansoft/edx-platform,edx/edx-platform,jruiperezv/ANALYSE,motion2015/edx-platform,franosincic/edx-platform,proversity-org/edx-platform,EDUlib/edx-platform,LICEF/edx-platform,doganov/edx-platform,chauhanhardik/populo_2,teltek/edx-platform,philanthropy-u/edx-platform,rismalrv/edx-platform,arbrandes/edx-platform,deepsrijit1105/edx-platform,zadgroup/edx-platform,a-parhom/edx-platform,vismartltd/edx-platform,morenopc/edx-platform,dsajkl/reqiop,jazkarta/edx-platform-for-isc,alu042/edx-platform,B-MOOC/edx-platform,zubair-arbi/edx-platform,nttks/jenkins-test,teltek/edx-platform,iivic/BoiseStateX,pabloborrego93/edx-platform,IONISx/edx-platform,beni55/edx-platform,kamalx/edx-platform,atsolakid/edx-platform,Edraak/edraak-platform,jazkarta/edx-platform-for-isc,nanolearningllc/edx-platform-cypress-2,jazkarta/edx-platform-for-isc,dcosentino/edx-platform,eestay/edx-platform,benpatterson/edx-platform,IONISx/edx-platform,shurihell/testasia,ak2703/edx-platform,simbs/edx-platform,ferabra/edx-platform,doismellburning/edx-platform,jelugbo/tundex,ESOedX/edx-platform,lduarte1991/edx-platform,Shrhawk/edx-platform,rue89-tech/edx-platform,Kalyzee/edx-platform,chrisndodge/edx-platform,shurihell/testasia,miptliot/edx-platform,prarthitm/edxplatform,nttks/jenkins-test,morenopc/edx-platform,arbrandes/edx-platform,andyzsf/edx,antoviaque/edx-platform,jolyonb/edx-platform,bigdatauniversity/edx-platform,solashirai/edx-platform,ahmadiga/min_edx,edry/edx-platform,romain-li/edx-platform,knehez/edx-platform,ovnicraft/edx-platform,jamesblunt/edx-platform,mahendra-r/edx-platform,nikolas/edx-platform,jswope00/griffinx,AkA84/edx-platform,eduNEXT/edunext-platform,halvertoluke/edx-platform,jazztpt/edx-platform,mbareta/edx-platform-ft,shashank971/edx-platform,raccoongang/edx-platform,nttks/edx-platform,martynovp/edx-platform,amir-qayyum-khan/edx-platform,appsembler/edx-platform,atsolakid/edx-platform,don-github/edx-platform,shubhdev/edxOnBaadal,tanmaykm/edx-platform,atsolakid/edx-platform,Semi-global/edx-platform,IndonesiaX/edx-platform,ahmadio/edx-platform,Stanford-Online/edx-platform,morenopc/edx-platform,a-parhom/edx-platform,DNFcode/edx-platform,eemirtekin/edx-platform,zofuthan/edx-platform,polimediaupv/edx-platform,chauhanhardik/populo_2,jazkarta/edx-platform-for-isc,atsolakid/edx-platform,Edraak/edx-platform,pabloborrego93/edx-platform,tiagochiavericosta/edx-platform,tiagochiavericosta/edx-platform,waheedahmed/edx-platform,tanmaykm/edx-platform,sudheerchintala/LearnEraPlatForm,Edraak/edraak-platform,jazkarta/edx-platform,valtech-mooc/edx-platform,xingyepei/edx-platform,SravanthiSinha/edx-platform,cecep-edu/edx-platform,beacloudgenius/edx-platform,rue89-tech/edx-platform,DefyVentures/edx-platform,doganov/edx-platform,valtech-mooc/edx-platform,hkawasaki/kawasaki-aio8-2,fly19890211/edx-platform,nikolas/edx-platform,louyihua/edx-platform,xinjiguaike/edx-platform,jelugbo/tundex,xuxiao19910803/edx,ahmadio/edx-platform,alexthered/kienhoc-platform,hamzehd/edx-platform,shashank971/edx-platform,Shrhawk/edx-platform,beni55/edx-platform,chand3040/cloud_that,Kalyzee/edx-platform,UXE/local-edx,jazkarta/edx-platform,ferabra/edx-platform,auferack08/edx-platform,procangroup/edx-platform,kamalx/edx-platform,Semi-global/edx-platform,ahmadiga/min_edx,cyanna/edx-platform,Softmotions/edx-platform,inares/edx-platform,andyzsf/edx,B-MOOC/edx-platform,mcgachey/edx-platform,rhndg/openedx,pomegranited/edx-platform,carsongee/edx-platform,edry/edx-platform,jbzdak/edx-platform,abdoosh00/edraak,JioEducation/edx-platform,xuxiao19910803/edx,edx/edx-platform,SravanthiSinha/edx-platform,proversity-org/edx-platform,motion2015/a3,Livit/Livit.Learn.EdX,nttks/edx-platform,yokose-ks/edx-platform,chand3040/cloud_that,RPI-OPENEDX/edx-platform,antonve/s4-project-mooc,ubc/edx-platform,motion2015/edx-platform,TeachAtTUM/edx-platform,kmoocdev/edx-platform,hkawasaki/kawasaki-aio8-0,TeachAtTUM/edx-platform,Softmotions/edx-platform,franosincic/edx-platform,EDUlib/edx-platform,MSOpenTech/edx-platform,xuxiao19910803/edx-platform,cognitiveclass/edx-platform,xuxiao19910803/edx,IONISx/edx-platform,pepeportela/edx-platform,IONISx/edx-platform,deepsrijit1105/edx-platform,wwj718/ANALYSE,bdero/edx-platform,Unow/edx-platform,rhndg/openedx,fintech-circle/edx-platform,RPI-OPENEDX/edx-platform,adoosii/edx-platform,utecuy/edx-platform,valtech-mooc/edx-platform,peterm-itr/edx-platform,TeachAtTUM/edx-platform,chauhanhardik/populo_2,vikas1885/test1,deepsrijit1105/edx-platform,OmarIthawi/edx-platform,Edraak/circleci-edx-platform,zofuthan/edx-platform,jjmiranda/edx-platform,hkawasaki/kawasaki-aio8-1,cyanna/edx-platform,sameetb-cuelogic/edx-platform-test,B-MOOC/edx-platform,SivilTaram/edx-platform,jswope00/griffinx,eestay/edx-platform,iivic/BoiseStateX,hkawasaki/kawasaki-aio8-0,CourseTalk/edx-platform,nanolearningllc/edx-platform-cypress-2,MakeHer/edx-platform,marcore/edx-platform,xingyepei/edx-platform,hastexo/edx-platform,xuxiao19910803/edx-platform,nanolearning/edx-platform,wwj718/edx-platform,yokose-ks/edx-platform,CredoReference/edx-platform,analyseuc3m/ANALYSE-v1,chudaol/edx-platform,Ayub-Khan/edx-platform,solashirai/edx-platform,msegado/edx-platform,jbassen/edx-platform,sameetb-cuelogic/edx-platform-test,playm2mboy/edx-platform,don-github/edx-platform,analyseuc3m/ANALYSE-v1,marcore/edx-platform,don-github/edx-platform,olexiim/edx-platform,edx-solutions/edx-platform,y12uc231/edx-platform,BehavioralInsightsTeam/edx-platform,xuxiao19910803/edx-platform,LearnEra/LearnEraPlaftform,ampax/edx-platform-backup,don-github/edx-platform,lduarte1991/edx-platform,gsehub/edx-platform,fintech-circle/edx-platform,ampax/edx-platform-backup,OmarIthawi/edx-platform,zofuthan/edx-platform,halvertoluke/edx-platform,cselis86/edx-platform,martynovp/edx-platform,polimediaupv/edx-platform,mtlchun/edx,kursitet/edx-platform,vikas1885/test1,utecuy/edx-platform,raccoongang/edx-platform,devs1991/test_edx_docmode,10clouds/edx-platform,jbassen/edx-platform,sudheerchintala/LearnEraPlatForm,knehez/edx-platform,jswope00/GAI,pomegranited/edx-platform,romain-li/edx-platform,yokose-ks/edx-platform,4eek/edx-platform,ovnicraft/edx-platform,halvertoluke/edx-platform,kmoocdev2/edx-platform,zerobatu/edx-platform,don-github/edx-platform,kxliugang/edx-platform,waheedahmed/edx-platform,hmcmooc/muddx-platform,B-MOOC/edx-platform,cpennington/edx-platform,ovnicraft/edx-platform,chauhanhardik/populo,martynovp/edx-platform,hamzehd/edx-platform,MakeHer/edx-platform,louyihua/edx-platform,Edraak/edraak-platform,beacloudgenius/edx-platform,bitifirefly/edx-platform,devs1991/test_edx_docmode,stvstnfrd/edx-platform,gsehub/edx-platform,kxliugang/edx-platform,amir-qayyum-khan/edx-platform,Semi-global/edx-platform,kursitet/edx-platform,polimediaupv/edx-platform,carsongee/edx-platform,sameetb-cuelogic/edx-platform-test,cognitiveclass/edx-platform,appliedx/edx-platform,cselis86/edx-platform,mushtaqak/edx-platform,hkawasaki/kawasaki-aio8-2,leansoft/edx-platform,jamesblunt/edx-platform,hamzehd/edx-platform,rhndg/openedx,valtech-mooc/edx-platform,hmcmooc/muddx-platform,SravanthiSinha/edx-platform,zadgroup/edx-platform,nagyistoce/edx-platform,cecep-edu/edx-platform,jjmiranda/edx-platform,philanthropy-u/edx-platform,zofuthan/edx-platform,Shrhawk/edx-platform,tiagochiavericosta/edx-platform,Ayub-Khan/edx-platform,zhenzhai/edx-platform,ubc/edx-platform,kxliugang/edx-platform,J861449197/edx-platform,antoviaque/edx-platform,jbzdak/edx-platform,yokose-ks/edx-platform,SravanthiSinha/edx-platform,Livit/Livit.Learn.EdX,mcgachey/edx-platform,msegado/edx-platform,torchingloom/edx-platform,appsembler/edx-platform,nikolas/edx-platform,mbareta/edx-platform-ft,vasyarv/edx-platform,waheedahmed/edx-platform,ESOedX/edx-platform,unicri/edx-platform,synergeticsedx/deployment-wipro,jonathan-beard/edx-platform,vikas1885/test1,proversity-org/edx-platform,AkA84/edx-platform,abdoosh00/edraak,adoosii/edx-platform,beni55/edx-platform,kxliugang/edx-platform,mtlchun/edx,jamesblunt/edx-platform,alexthered/kienhoc-platform,etzhou/edx-platform,eduNEXT/edunext-platform,CourseTalk/edx-platform,jjmiranda/edx-platform,franosincic/edx-platform,torchingloom/edx-platform,inares/edx-platform,kamalx/edx-platform,jazkarta/edx-platform,IndonesiaX/edx-platform,eemirtekin/edx-platform,hkawasaki/kawasaki-aio8-2,sameetb-cuelogic/edx-platform-test,gymnasium/edx-platform,fly19890211/edx-platform,waheedahmed/edx-platform,y12uc231/edx-platform,shubhdev/openedx,mtlchun/edx,edry/edx-platform,cpennington/edx-platform,jzoldak/edx-platform,stvstnfrd/edx-platform,lduarte1991/edx-platform,longmen21/edx-platform,dsajkl/reqiop,zadgroup/edx-platform,Edraak/circleci-edx-platform,JCBarahona/edX,4eek/edx-platform,synergeticsedx/deployment-wipro,shashank971/edx-platform,franosincic/edx-platform,utecuy/edx-platform,mitocw/edx-platform,MakeHer/edx-platform,shurihell/testasia,jswope00/griffinx,Lektorium-LLC/edx-platform,deepsrijit1105/edx-platform,rhndg/openedx,dsajkl/reqiop,shubhdev/edx-platform,dkarakats/edx-platform,chudaol/edx-platform,procangroup/edx-platform,LearnEra/LearnEraPlaftform,xuxiao19910803/edx-platform,UXE/local-edx,romain-li/edx-platform,edry/edx-platform,doismellburning/edx-platform,DefyVentures/edx-platform,eduNEXT/edx-platform,jelugbo/tundex,ampax/edx-platform-backup,xingyepei/edx-platform,ZLLab-Mooc/edx-platform,nanolearning/edx-platform,iivic/BoiseStateX,rue89-tech/edx-platform,jzoldak/edx-platform,RPI-OPENEDX/edx-platform,pepeportela/edx-platform,shurihell/testasia,ubc/edx-platform,hmcmooc/muddx-platform,LICEF/edx-platform,mushtaqak/edx-platform,mahendra-r/edx-platform,mcgachey/edx-platform,alu042/edx-platform,pepeportela/edx-platform,fly19890211/edx-platform,CredoReference/edx-platform,cognitiveclass/edx-platform,SivilTaram/edx-platform,chudaol/edx-platform,ak2703/edx-platform,nttks/edx-platform,miptliot/edx-platform,chrisndodge/edx-platform,mjirayu/sit_academy,jbzdak/edx-platform,gymnasium/edx-platform,pomegranited/edx-platform,knehez/edx-platform,Kalyzee/edx-platform,IndonesiaX/edx-platform,fintech-circle/edx-platform,arifsetiawan/edx-platform,doganov/edx-platform,JCBarahona/edX,antonve/s4-project-mooc,naresh21/synergetics-edx-platform,bigdatauniversity/edx-platform,zhenzhai/edx-platform,dkarakats/edx-platform,jbassen/edx-platform,wwj718/ANALYSE,doismellburning/edx-platform,chauhanhardik/populo_2,cselis86/edx-platform,wwj718/ANALYSE,MSOpenTech/edx-platform,nanolearningllc/edx-platform-cypress,chauhanhardik/populo,antonve/s4-project-mooc,doismellburning/edx-platform,procangroup/edx-platform,fly19890211/edx-platform,jswope00/griffinx,doganov/edx-platform,raccoongang/edx-platform,nttks/jenkins-test,mcgachey/edx-platform,kamalx/edx-platform,adoosii/edx-platform,jbzdak/edx-platform,WatanabeYasumasa/edx-platform,playm2mboy/edx-platform,RPI-OPENEDX/edx-platform,AkA84/edx-platform,analyseuc3m/ANALYSE-v1,dkarakats/edx-platform,martynovp/edx-platform,Edraak/edx-platform,angelapper/edx-platform,hamzehd/edx-platform,appliedx/edx-platform,ovnicraft/edx-platform,itsjeyd/edx-platform,MSOpenTech/edx-platform,jswope00/GAI,UXE/local-edx,eestay/edx-platform,AkA84/edx-platform,edx-solutions/edx-platform,CourseTalk/edx-platform,openfun/edx-platform,itsjeyd/edx-platform,proversity-org/edx-platform,vikas1885/test1,marcore/edx-platform,chauhanhardik/populo,zerobatu/edx-platform,angelapper/edx-platform,CourseTalk/edx-platform,jruiperezv/ANALYSE,devs1991/test_edx_docmode,eemirtekin/edx-platform,jonathan-beard/edx-platform,rismalrv/edx-platform,openfun/edx-platform,vismartltd/edx-platform,zhenzhai/edx-platform,xuxiao19910803/edx-platform,etzhou/edx-platform,ovnicraft/edx-platform,fintech-circle/edx-platform,nanolearning/edx-platform,LearnEra/LearnEraPlaftform,angelapper/edx-platform,kmoocdev2/edx-platform,bitifirefly/edx-platform,mushtaqak/edx-platform,jazkarta/edx-platform-for-isc,nanolearningllc/edx-platform-cypress,carsongee/edx-platform,alu042/edx-platform,beni55/edx-platform,Softmotions/edx-platform,morenopc/edx-platform,auferack08/edx-platform,cognitiveclass/edx-platform,shabab12/edx-platform,etzhou/edx-platform,rismalrv/edx-platform,hkawasaki/kawasaki-aio8-2,chrisndodge/edx-platform,J861449197/edx-platform,jamiefolsom/edx-platform,jazztpt/edx-platform,andyzsf/edx,ampax/edx-platform,appliedx/edx-platform,romain-li/edx-platform,jazztpt/edx-platform,dcosentino/edx-platform,MSOpenTech/edx-platform,ahmadio/edx-platform,andyzsf/edx,IndonesiaX/edx-platform,cyanna/edx-platform,utecuy/edx-platform,UOMx/edx-platform,unicri/edx-platform,benpatterson/edx-platform,torchingloom/edx-platform,Shrhawk/edx-platform,dsajkl/123,rismalrv/edx-platform,B-MOOC/edx-platform,ampax/edx-platform,martynovp/edx-platform,kmoocdev/edx-platform,sudheerchintala/LearnEraPlatForm,Softmotions/edx-platform,peterm-itr/edx-platform,kursitet/edx-platform,SivilTaram/edx-platform,louyihua/edx-platform,ahmadiga/min_edx,appliedx/edx-platform,Endika/edx-platform,vasyarv/edx-platform,valtech-mooc/edx-platform,jonathan-beard/edx-platform,itsjeyd/edx-platform,Ayub-Khan/edx-platform,beni55/edx-platform,mitocw/edx-platform,shashank971/edx-platform,miptliot/edx-platform,Unow/edx-platform,Edraak/edraak-platform,devs1991/test_edx_docmode,stvstnfrd/edx-platform,dcosentino/edx-platform,carsongee/edx-platform,halvertoluke/edx-platform,appsembler/edx-platform,procangroup/edx-platform,Stanford-Online/edx-platform,mjirayu/sit_academy,hkawasaki/kawasaki-aio8-1,jzoldak/edx-platform,bigdatauniversity/edx-platform,eestay/edx-platform,caesar2164/edx-platform,playm2mboy/edx-platform,playm2mboy/edx-platform,hamzehd/edx-platform,Unow/edx-platform,nanolearningllc/edx-platform-cypress,shurihell/testasia,benpatterson/edx-platform,chand3040/cloud_that,MSOpenTech/edx-platform,jamesblunt/edx-platform,nttks/edx-platform,cyanna/edx-platform,ak2703/edx-platform,ampax/edx-platform,polimediaupv/edx-platform,naresh21/synergetics-edx-platform,arifsetiawan/edx-platform,Lektorium-LLC/edx-platform,10clouds/edx-platform,UOMx/edx-platform,benpatterson/edx-platform,ZLLab-Mooc/edx-platform,Endika/edx-platform,BehavioralInsightsTeam/edx-platform,kursitet/edx-platform,shubhdev/edx-platform,cpennington/edx-platform,shubhdev/edxOnBaadal,Edraak/circleci-edx-platform,edry/edx-platform,defance/edx-platform,rismalrv/edx-platform,yokose-ks/edx-platform,Edraak/edx-platform,simbs/edx-platform,nikolas/edx-platform,shubhdev/edx-platform,ahmadio/edx-platform,sudheerchintala/LearnEraPlatForm,jjmiranda/edx-platform,IONISx/edx-platform,SravanthiSinha/edx-platform,chand3040/cloud_that,LICEF/edx-platform,morenopc/edx-platform,jswope00/GAI,zubair-arbi/edx-platform,adoosii/edx-platform,dsajkl/123,chand3040/cloud_that,romain-li/edx-platform,ak2703/edx-platform,shubhdev/edx-platform,longmen21/edx-platform,tiagochiavericosta/edx-platform,msegado/edx-platform,cecep-edu/edx-platform,devs1991/test_edx_docmode,appsembler/edx-platform,hkawasaki/kawasaki-aio8-1,hkawasaki/kawasaki-aio8-0,LearnEra/LearnEraPlaftform,rue89-tech/edx-platform,caesar2164/edx-platform,Shrhawk/edx-platform,miptliot/edx-platform,mtlchun/edx,jswope00/GAI,alu042/edx-platform,nanolearning/edx-platform,y12uc231/edx-platform,caesar2164/edx-platform,iivic/BoiseStateX,eduNEXT/edunext-platform,vasyarv/edx-platform,Kalyzee/edx-platform,beacloudgenius/edx-platform,caesar2164/edx-platform,xinjiguaike/edx-platform,playm2mboy/edx-platform,xingyepei/edx-platform,pabloborrego93/edx-platform,antonve/s4-project-mooc,Softmotions/edx-platform,amir-qayyum-khan/edx-platform,y12uc231/edx-platform,Livit/Livit.Learn.EdX,Semi-global/edx-platform,EDUlib/edx-platform,bdero/edx-platform,arifsetiawan/edx-platform,Lektorium-LLC/edx-platform,CredoReference/edx-platform,wwj718/edx-platform,shubhdev/edxOnBaadal,alexthered/kienhoc-platform,hmcmooc/muddx-platform,4eek/edx-platform,shabab12/edx-platform,benpatterson/edx-platform,shubhdev/edx-platform,bdero/edx-platform,ahmedaljazzar/edx-platform,arbrandes/edx-platform,nagyistoce/edx-platform,shabab12/edx-platform,ferabra/edx-platform,jolyonb/edx-platform,synergeticsedx/deployment-wipro,Endika/edx-platform,iivic/BoiseStateX,Edraak/circleci-edx-platform,solashirai/edx-platform,ZLLab-Mooc/edx-platform,EDUlib/edx-platform,naresh21/synergetics-edx-platform,mitocw/edx-platform,mcgachey/edx-platform,zhenzhai/edx-platform,jswope00/griffinx,beacloudgenius/edx-platform,pepeportela/edx-platform,Lektorium-LLC/edx-platform,inares/edx-platform,wwj718/ANALYSE,motion2015/edx-platform,defance/edx-platform,gsehub/edx-platform,bdero/edx-platform,mjirayu/sit_academy,chauhanhardik/populo,edx/edx-platform,waheedahmed/edx-platform,hkawasaki/kawasaki-aio8-0,cognitiveclass/edx-platform,stvstnfrd/edx-platform,motion2015/edx-platform,DefyVentures/edx-platform,peterm-itr/edx-platform,gymnasium/edx-platform,rhndg/openedx,bigdatauniversity/edx-platform,JioEducation/edx-platform,eduNEXT/edunext-platform,openfun/edx-platform,olexiim/edx-platform,jamiefolsom/edx-platform,J861449197/edx-platform,LICEF/edx-platform,SivilTaram/edx-platform,atsolakid/edx-platform,knehez/edx-platform,shubhdev/edxOnBaadal,auferack08/edx-platform,mushtaqak/edx-platform,jamiefolsom/edx-platform,raccoongang/edx-platform,dkarakats/edx-platform,polimediaupv/edx-platform,vasyarv/edx-platform,ahmedaljazzar/edx-platform,dsajkl/123,philanthropy-u/edx-platform,hkawasaki/kawasaki-aio8-1,dsajkl/reqiop,chrisndodge/edx-platform,UXE/local-edx,vasyarv/edx-platform,defance/edx-platform,jazztpt/edx-platform,BehavioralInsightsTeam/edx-platform,marcore/edx-platform,hastexo/edx-platform,ZLLab-Mooc/edx-platform,kmoocdev2/edx-platform,CredoReference/edx-platform,fly19890211/edx-platform,pomegranited/edx-platform,ubc/edx-platform,adoosii/edx-platform,shubhdev/openedx,vikas1885/test1,jazztpt/edx-platform,solashirai/edx-platform,Ayub-Khan/edx-platform,eestay/edx-platform,edx-solutions/edx-platform,simbs/edx-platform,longmen21/edx-platform,OmarIthawi/edx-platform,SivilTaram/edx-platform,MakeHer/edx-platform,jbassen/edx-platform,a-parhom/edx-platform,mjirayu/sit_academy,DefyVentures/edx-platform,LICEF/edx-platform,10clouds/edx-platform,Edraak/circleci-edx-platform,BehavioralInsightsTeam/edx-platform,auferack08/edx-platform,cselis86/edx-platform,vismartltd/edx-platform,unicri/edx-platform,bigdatauniversity/edx-platform,xuxiao19910803/edx,chudaol/edx-platform,zadgroup/edx-platform,torchingloom/edx-platform,cpennington/edx-platform,unicri/edx-platform,J861449197/edx-platform,ampax/edx-platform,kamalx/edx-platform,kmoocdev/edx-platform,zadgroup/edx-platform,IndonesiaX/edx-platform,JioEducation/edx-platform,shubhdev/openedx,UOMx/edx-platform,jruiperezv/ANALYSE,inares/edx-platform,sameetb-cuelogic/edx-platform-test,simbs/edx-platform,nanolearningllc/edx-platform-cypress,peterm-itr/edx-platform,RPI-OPENEDX/edx-platform,zerobatu/edx-platform,Ayub-Khan/edx-platform,wwj718/edx-platform,arifsetiawan/edx-platform,itsjeyd/edx-platform,jolyonb/edx-platform,openfun/edx-platform,dsajkl/123,ubc/edx-platform,Unow/edx-platform,angelapper/edx-platform,OmarIthawi/edx-platform,longmen21/edx-platform,zerobatu/edx-platform,devs1991/test_edx_docmode,wwj718/edx-platform,ak2703/edx-platform,rue89-tech/edx-platform,etzhou/edx-platform,olexiim/edx-platform,jruiperezv/ANALYSE,ahmadio/edx-platform,kmoocdev2/edx-platform,arifsetiawan/edx-platform,nanolearningllc/edx-platform-cypress-2,prarthitm/edxplatform,jbzdak/edx-platform,ESOedX/edx-platform,jamesblunt/edx-platform,TeachAtTUM/edx-platform,dsajkl/123,Stanford-Online/edx-platform,philanthropy-u/edx-platform,10clouds/edx-platform,zubair-arbi/edx-platform,motion2015/a3,motion2015/a3,bitifirefly/edx-platform,nanolearningllc/edx-platform-cypress-2,eduNEXT/edx-platform,gsehub/edx-platform,zubair-arbi/edx-platform,Edraak/edx-platform,mahendra-r/edx-platform,ahmedaljazzar/edx-platform,solashirai/edx-platform,appliedx/edx-platform,WatanabeYasumasa/edx-platform,motion2015/edx-platform,AkA84/edx-platform,eduNEXT/edx-platform,knehez/edx-platform,analyseuc3m/ANALYSE-v1,mahendra-r/edx-platform,leansoft/edx-platform,alexthered/kienhoc-platform,teltek/edx-platform,UOMx/edx-platform,JCBarahona/edX,dcosentino/edx-platform,nanolearningllc/edx-platform-cypress-2,amir-qayyum-khan/edx-platform,Semi-global/edx-platform,ampax/edx-platform-backup,jonathan-beard/edx-platform,ESOedX/edx-platform
|
Test TemplateLookup dirs are not cleared during lms startup.
LMS-2498
|
"""Tests for the lms module itself."""
from django.test import TestCase
from edxmako import add_lookup, LOOKUP
from lms import startup
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
|
<commit_before><commit_msg>Test TemplateLookup dirs are not cleared during lms startup.
LMS-2498<commit_after>
|
"""Tests for the lms module itself."""
from django.test import TestCase
from edxmako import add_lookup, LOOKUP
from lms import startup
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
|
Test TemplateLookup dirs are not cleared during lms startup.
LMS-2498"""Tests for the lms module itself."""
from django.test import TestCase
from edxmako import add_lookup, LOOKUP
from lms import startup
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
|
<commit_before><commit_msg>Test TemplateLookup dirs are not cleared during lms startup.
LMS-2498<commit_after>"""Tests for the lms module itself."""
from django.test import TestCase
from edxmako import add_lookup, LOOKUP
from lms import startup
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
|
|
5dc0061d912bf660c7e7885bd2a42f5b92834b7b
|
scripts/update_taxonomies.py
|
scripts/update_taxonomies.py
|
import os
import json
from framework.mongo import set_up_storage
from website import settings
from website.project.taxonomies import Subject
from modularodm import Q, storage
from modularodm.exceptions import NoResultsFound, MultipleResultsFound
def update_taxonomies():
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomy is loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
_parent = None
if len(subjects) > 1:
try:
_parent = Subject.find_one(
Q('text', 'eq', subjects[-2]) &
Q('type', 'eq', type)
)
except Exception:
_parent = None
parent_id = None
if _parent:
parent_id = _parent._id
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except (NoResultsFound, MultipleResultsFound):
# If subject does not yet exist, create it
if parent_id:
subject = Subject(
type=type,
text=text,
parent_ids=[parent_id],
)
else:
subject = Subject(
type=type,
text=text,
parent_ids=[],
)
else:
# If subject does exist, append parent_id if not already added
subject.text = text
subject.type = type
if parent_id not in subject.parent_ids:
subject.parent_ids.append(parent_id)
subject.save()
if __name__ == '__main__':
set_up_storage([Subject], storage.MongoStorage)
update_taxonomies()
|
Add a script that loads the taxonomies as a standalone thing
|
Add a script that loads the taxonomies as a standalone thing
|
Python
|
apache-2.0
|
acshi/osf.io,aaxelb/osf.io,chrisseto/osf.io,cwisecarver/osf.io,mluo613/osf.io,acshi/osf.io,caneruguz/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,mattclark/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,monikagrabowska/osf.io,felliott/osf.io,saradbowman/osf.io,pattisdr/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,hmoco/osf.io,chennan47/osf.io,samchrisinger/osf.io,rdhyee/osf.io,Nesiehr/osf.io,leb2dg/osf.io,mluo613/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,adlius/osf.io,hmoco/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,Johnetordoff/osf.io,erinspace/osf.io,mattclark/osf.io,pattisdr/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,felliott/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,emetsger/osf.io,adlius/osf.io,acshi/osf.io,caseyrollins/osf.io,mfraezz/osf.io,rdhyee/osf.io,binoculars/osf.io,brianjgeiger/osf.io,binoculars/osf.io,mluo613/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,cslzchen/osf.io,baylee-d/osf.io,cwisecarver/osf.io,felliott/osf.io,TomBaxter/osf.io,emetsger/osf.io,hmoco/osf.io,samchrisinger/osf.io,mfraezz/osf.io,baylee-d/osf.io,icereval/osf.io,acshi/osf.io,caneruguz/osf.io,mattclark/osf.io,cwisecarver/osf.io,samchrisinger/osf.io,alexschiller/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,sloria/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,caseyrollins/osf.io,chennan47/osf.io,erinspace/osf.io,Nesiehr/osf.io,aaxelb/osf.io,erinspace/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,aaxelb/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,mluo613/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,icereval/osf.io,emetsger/osf.io,leb2dg/osf.io,icereval/osf.io,cslzchen/osf.io,caneruguz/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,hmoco/osf.io,laurenrevere/osf.io,chennan47/osf.io,chrisseto/osf.io,binoculars/osf.io,laurenrevere/osf.io,mluo613/osf.io,baylee-d/osf.io,Nesiehr/osf.io,Nesiehr/osf.io,caneruguz/osf.io,saradbowman/osf.io,adlius/osf.io,chrisseto/osf.io,alexschiller/osf.io,caseyrollins/osf.io,aaxelb/osf.io,acshi/osf.io,sloria/osf.io,adlius/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,felliott/osf.io
|
Add a script that loads the taxonomies as a standalone thing
|
import os
import json
from framework.mongo import set_up_storage
from website import settings
from website.project.taxonomies import Subject
from modularodm import Q, storage
from modularodm.exceptions import NoResultsFound, MultipleResultsFound
def update_taxonomies():
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomy is loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
_parent = None
if len(subjects) > 1:
try:
_parent = Subject.find_one(
Q('text', 'eq', subjects[-2]) &
Q('type', 'eq', type)
)
except Exception:
_parent = None
parent_id = None
if _parent:
parent_id = _parent._id
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except (NoResultsFound, MultipleResultsFound):
# If subject does not yet exist, create it
if parent_id:
subject = Subject(
type=type,
text=text,
parent_ids=[parent_id],
)
else:
subject = Subject(
type=type,
text=text,
parent_ids=[],
)
else:
# If subject does exist, append parent_id if not already added
subject.text = text
subject.type = type
if parent_id not in subject.parent_ids:
subject.parent_ids.append(parent_id)
subject.save()
if __name__ == '__main__':
set_up_storage([Subject], storage.MongoStorage)
update_taxonomies()
|
<commit_before><commit_msg>Add a script that loads the taxonomies as a standalone thing<commit_after>
|
import os
import json
from framework.mongo import set_up_storage
from website import settings
from website.project.taxonomies import Subject
from modularodm import Q, storage
from modularodm.exceptions import NoResultsFound, MultipleResultsFound
def update_taxonomies():
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomy is loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
_parent = None
if len(subjects) > 1:
try:
_parent = Subject.find_one(
Q('text', 'eq', subjects[-2]) &
Q('type', 'eq', type)
)
except Exception:
_parent = None
parent_id = None
if _parent:
parent_id = _parent._id
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except (NoResultsFound, MultipleResultsFound):
# If subject does not yet exist, create it
if parent_id:
subject = Subject(
type=type,
text=text,
parent_ids=[parent_id],
)
else:
subject = Subject(
type=type,
text=text,
parent_ids=[],
)
else:
# If subject does exist, append parent_id if not already added
subject.text = text
subject.type = type
if parent_id not in subject.parent_ids:
subject.parent_ids.append(parent_id)
subject.save()
if __name__ == '__main__':
set_up_storage([Subject], storage.MongoStorage)
update_taxonomies()
|
Add a script that loads the taxonomies as a standalone thingimport os
import json
from framework.mongo import set_up_storage
from website import settings
from website.project.taxonomies import Subject
from modularodm import Q, storage
from modularodm.exceptions import NoResultsFound, MultipleResultsFound
def update_taxonomies():
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomy is loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
_parent = None
if len(subjects) > 1:
try:
_parent = Subject.find_one(
Q('text', 'eq', subjects[-2]) &
Q('type', 'eq', type)
)
except Exception:
_parent = None
parent_id = None
if _parent:
parent_id = _parent._id
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except (NoResultsFound, MultipleResultsFound):
# If subject does not yet exist, create it
if parent_id:
subject = Subject(
type=type,
text=text,
parent_ids=[parent_id],
)
else:
subject = Subject(
type=type,
text=text,
parent_ids=[],
)
else:
# If subject does exist, append parent_id if not already added
subject.text = text
subject.type = type
if parent_id not in subject.parent_ids:
subject.parent_ids.append(parent_id)
subject.save()
if __name__ == '__main__':
set_up_storage([Subject], storage.MongoStorage)
update_taxonomies()
|
<commit_before><commit_msg>Add a script that loads the taxonomies as a standalone thing<commit_after>import os
import json
from framework.mongo import set_up_storage
from website import settings
from website.project.taxonomies import Subject
from modularodm import Q, storage
from modularodm.exceptions import NoResultsFound, MultipleResultsFound
def update_taxonomies():
# Flat taxonomy is stored locally, read in here
with open(
os.path.join(
settings.APP_PATH,
'website', 'static', 'plos_taxonomy.json'
)
) as fp:
taxonomy = json.load(fp)
# For now, only PLOS taxonomy is loaded, other types possibly considered in the future
type = 'plos'
for subject_path in taxonomy.get('data'):
subjects = subject_path.split('_')
text = subjects[-1]
# Search for parent subject, get id if it exists
_parent = None
if len(subjects) > 1:
try:
_parent = Subject.find_one(
Q('text', 'eq', subjects[-2]) &
Q('type', 'eq', type)
)
except Exception:
_parent = None
parent_id = None
if _parent:
parent_id = _parent._id
try:
subject = Subject.find_one(
Q('text', 'eq', text) &
Q('type', 'eq', type)
)
except (NoResultsFound, MultipleResultsFound):
# If subject does not yet exist, create it
if parent_id:
subject = Subject(
type=type,
text=text,
parent_ids=[parent_id],
)
else:
subject = Subject(
type=type,
text=text,
parent_ids=[],
)
else:
# If subject does exist, append parent_id if not already added
subject.text = text
subject.type = type
if parent_id not in subject.parent_ids:
subject.parent_ids.append(parent_id)
subject.save()
if __name__ == '__main__':
set_up_storage([Subject], storage.MongoStorage)
update_taxonomies()
|
|
a3ffef803d3bde1bb771217f3ed5dd4509a2c82c
|
tests/test_03_login.py
|
tests/test_03_login.py
|
"""Test login to an ICAT server.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
# Try out three different users: root, useroffice, and acord. Normal
# users like acord might use a different authentication plugin then
# system users as root and useroffice. We want to try out both cases.
@pytest.mark.parametrize("user", ["root", "useroffice", "acord"])
def test_login(icatconfigfile, user):
"""Login to the ICAT server.
"""
args = ["-c", icatconfigfile, "-s", user]
conf = icat.config.Config().getconfig(args)
client = icat.Client(conf.url, **conf.client_kwargs)
sessionId = client.login(conf.auth, conf.credentials)
assert sessionId
assert sessionId == client.sessionId
username = client.getUserName()
assert username == user
print("\nLogged in as %s to %s." % (user, conf.url))
client.logout()
assert client.sessionId is None
# Verify that the logout was effective, e.g. that the sessionId is
# invalidated.
client.sessionId = sessionId
with pytest.raises(icat.exception.ICATSessionError):
username = client.getUserName()
# Avoid a spurious SessionError at exit in the implicit logout()
client.sessionId = None
|
Add test to login to the ICAT server.
|
Add test to login to the ICAT server.
|
Python
|
apache-2.0
|
icatproject/python-icat
|
Add test to login to the ICAT server.
|
"""Test login to an ICAT server.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
# Try out three different users: root, useroffice, and acord. Normal
# users like acord might use a different authentication plugin then
# system users as root and useroffice. We want to try out both cases.
@pytest.mark.parametrize("user", ["root", "useroffice", "acord"])
def test_login(icatconfigfile, user):
"""Login to the ICAT server.
"""
args = ["-c", icatconfigfile, "-s", user]
conf = icat.config.Config().getconfig(args)
client = icat.Client(conf.url, **conf.client_kwargs)
sessionId = client.login(conf.auth, conf.credentials)
assert sessionId
assert sessionId == client.sessionId
username = client.getUserName()
assert username == user
print("\nLogged in as %s to %s." % (user, conf.url))
client.logout()
assert client.sessionId is None
# Verify that the logout was effective, e.g. that the sessionId is
# invalidated.
client.sessionId = sessionId
with pytest.raises(icat.exception.ICATSessionError):
username = client.getUserName()
# Avoid a spurious SessionError at exit in the implicit logout()
client.sessionId = None
|
<commit_before><commit_msg>Add test to login to the ICAT server.<commit_after>
|
"""Test login to an ICAT server.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
# Try out three different users: root, useroffice, and acord. Normal
# users like acord might use a different authentication plugin then
# system users as root and useroffice. We want to try out both cases.
@pytest.mark.parametrize("user", ["root", "useroffice", "acord"])
def test_login(icatconfigfile, user):
"""Login to the ICAT server.
"""
args = ["-c", icatconfigfile, "-s", user]
conf = icat.config.Config().getconfig(args)
client = icat.Client(conf.url, **conf.client_kwargs)
sessionId = client.login(conf.auth, conf.credentials)
assert sessionId
assert sessionId == client.sessionId
username = client.getUserName()
assert username == user
print("\nLogged in as %s to %s." % (user, conf.url))
client.logout()
assert client.sessionId is None
# Verify that the logout was effective, e.g. that the sessionId is
# invalidated.
client.sessionId = sessionId
with pytest.raises(icat.exception.ICATSessionError):
username = client.getUserName()
# Avoid a spurious SessionError at exit in the implicit logout()
client.sessionId = None
|
Add test to login to the ICAT server."""Test login to an ICAT server.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
# Try out three different users: root, useroffice, and acord. Normal
# users like acord might use a different authentication plugin then
# system users as root and useroffice. We want to try out both cases.
@pytest.mark.parametrize("user", ["root", "useroffice", "acord"])
def test_login(icatconfigfile, user):
"""Login to the ICAT server.
"""
args = ["-c", icatconfigfile, "-s", user]
conf = icat.config.Config().getconfig(args)
client = icat.Client(conf.url, **conf.client_kwargs)
sessionId = client.login(conf.auth, conf.credentials)
assert sessionId
assert sessionId == client.sessionId
username = client.getUserName()
assert username == user
print("\nLogged in as %s to %s." % (user, conf.url))
client.logout()
assert client.sessionId is None
# Verify that the logout was effective, e.g. that the sessionId is
# invalidated.
client.sessionId = sessionId
with pytest.raises(icat.exception.ICATSessionError):
username = client.getUserName()
# Avoid a spurious SessionError at exit in the implicit logout()
client.sessionId = None
|
<commit_before><commit_msg>Add test to login to the ICAT server.<commit_after>"""Test login to an ICAT server.
"""
from __future__ import print_function
import pytest
import icat
import icat.config
# Try out three different users: root, useroffice, and acord. Normal
# users like acord might use a different authentication plugin then
# system users as root and useroffice. We want to try out both cases.
@pytest.mark.parametrize("user", ["root", "useroffice", "acord"])
def test_login(icatconfigfile, user):
"""Login to the ICAT server.
"""
args = ["-c", icatconfigfile, "-s", user]
conf = icat.config.Config().getconfig(args)
client = icat.Client(conf.url, **conf.client_kwargs)
sessionId = client.login(conf.auth, conf.credentials)
assert sessionId
assert sessionId == client.sessionId
username = client.getUserName()
assert username == user
print("\nLogged in as %s to %s." % (user, conf.url))
client.logout()
assert client.sessionId is None
# Verify that the logout was effective, e.g. that the sessionId is
# invalidated.
client.sessionId = sessionId
with pytest.raises(icat.exception.ICATSessionError):
username = client.getUserName()
# Avoid a spurious SessionError at exit in the implicit logout()
client.sessionId = None
|
|
0777f77dc2f8a25da7cb572bf4f9e36c7f88fc26
|
tests/test_validate.py
|
tests/test_validate.py
|
"""tests/test_validate.py.
Tests to ensure hug's custom validation methods work as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
TEST_SCHEMA = {'first': 'Timothy', 'place': 'Seattle'}
def test_all():
'''Test to ensure hug's all validation function works as expected to combine validators'''
assert not hug.validate.all(hug.validate.contains_one_of('first', 'year'),
hug.validate.contains_one_of('last', 'place'))(None, TEST_SCHEMA)
assert hug.validate.all(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
def test_any():
'''Test to ensure hug's any validation function works as expected to combine validators'''
assert not hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
assert hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('no', 'way'))(None, TEST_SCHEMA)
def test_contains_one_of():
'''Test to ensure hug's contains_one_of validation function works as expected to ensure presence of a field'''
assert hug.validate.contains_one_of('no', 'way')(None, TEST_SCHEMA)
assert not hug.validate.contains_one_of('last', 'place')(None, TEST_SCHEMA)
|
Implement tests for validate module
|
Implement tests for validate module
|
Python
|
mit
|
timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug
|
Implement tests for validate module
|
"""tests/test_validate.py.
Tests to ensure hug's custom validation methods work as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
TEST_SCHEMA = {'first': 'Timothy', 'place': 'Seattle'}
def test_all():
'''Test to ensure hug's all validation function works as expected to combine validators'''
assert not hug.validate.all(hug.validate.contains_one_of('first', 'year'),
hug.validate.contains_one_of('last', 'place'))(None, TEST_SCHEMA)
assert hug.validate.all(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
def test_any():
'''Test to ensure hug's any validation function works as expected to combine validators'''
assert not hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
assert hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('no', 'way'))(None, TEST_SCHEMA)
def test_contains_one_of():
'''Test to ensure hug's contains_one_of validation function works as expected to ensure presence of a field'''
assert hug.validate.contains_one_of('no', 'way')(None, TEST_SCHEMA)
assert not hug.validate.contains_one_of('last', 'place')(None, TEST_SCHEMA)
|
<commit_before><commit_msg>Implement tests for validate module<commit_after>
|
"""tests/test_validate.py.
Tests to ensure hug's custom validation methods work as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
TEST_SCHEMA = {'first': 'Timothy', 'place': 'Seattle'}
def test_all():
'''Test to ensure hug's all validation function works as expected to combine validators'''
assert not hug.validate.all(hug.validate.contains_one_of('first', 'year'),
hug.validate.contains_one_of('last', 'place'))(None, TEST_SCHEMA)
assert hug.validate.all(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
def test_any():
'''Test to ensure hug's any validation function works as expected to combine validators'''
assert not hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
assert hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('no', 'way'))(None, TEST_SCHEMA)
def test_contains_one_of():
'''Test to ensure hug's contains_one_of validation function works as expected to ensure presence of a field'''
assert hug.validate.contains_one_of('no', 'way')(None, TEST_SCHEMA)
assert not hug.validate.contains_one_of('last', 'place')(None, TEST_SCHEMA)
|
Implement tests for validate module"""tests/test_validate.py.
Tests to ensure hug's custom validation methods work as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
TEST_SCHEMA = {'first': 'Timothy', 'place': 'Seattle'}
def test_all():
'''Test to ensure hug's all validation function works as expected to combine validators'''
assert not hug.validate.all(hug.validate.contains_one_of('first', 'year'),
hug.validate.contains_one_of('last', 'place'))(None, TEST_SCHEMA)
assert hug.validate.all(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
def test_any():
'''Test to ensure hug's any validation function works as expected to combine validators'''
assert not hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
assert hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('no', 'way'))(None, TEST_SCHEMA)
def test_contains_one_of():
'''Test to ensure hug's contains_one_of validation function works as expected to ensure presence of a field'''
assert hug.validate.contains_one_of('no', 'way')(None, TEST_SCHEMA)
assert not hug.validate.contains_one_of('last', 'place')(None, TEST_SCHEMA)
|
<commit_before><commit_msg>Implement tests for validate module<commit_after>"""tests/test_validate.py.
Tests to ensure hug's custom validation methods work as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
TEST_SCHEMA = {'first': 'Timothy', 'place': 'Seattle'}
def test_all():
'''Test to ensure hug's all validation function works as expected to combine validators'''
assert not hug.validate.all(hug.validate.contains_one_of('first', 'year'),
hug.validate.contains_one_of('last', 'place'))(None, TEST_SCHEMA)
assert hug.validate.all(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
def test_any():
'''Test to ensure hug's any validation function works as expected to combine validators'''
assert not hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('first', 'place'))(None, TEST_SCHEMA)
assert hug.validate.any(hug.validate.contains_one_of('last', 'year'),
hug.validate.contains_one_of('no', 'way'))(None, TEST_SCHEMA)
def test_contains_one_of():
'''Test to ensure hug's contains_one_of validation function works as expected to ensure presence of a field'''
assert hug.validate.contains_one_of('no', 'way')(None, TEST_SCHEMA)
assert not hug.validate.contains_one_of('last', 'place')(None, TEST_SCHEMA)
|
|
da1e46d550669970ce0188d417af3764a046a33f
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandAddPerson.py
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandAddPerson.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddPerson(EventState):
'''
Add a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= already_exit return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'already_exit', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotionProbability:
data.update({'peopleIsOperator': True})
else:
data.update({'peopleIsOperator': False})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(url, data=data)
if response.status_code == 400:
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][
0] == u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
elif response.status_code == 500:
return 'error'
Logger.logwarn(response.status_code)
Logger.loginfo(response.content)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
return 'done'
|
Add a step for add a person in Wonderland database.
|
Add a step for add a person in Wonderland database.
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a step for add a person in Wonderland database.
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddPerson(EventState):
'''
Add a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= already_exit return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'already_exit', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotionProbability:
data.update({'peopleIsOperator': True})
else:
data.update({'peopleIsOperator': False})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(url, data=data)
if response.status_code == 400:
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][
0] == u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
elif response.status_code == 500:
return 'error'
Logger.logwarn(response.status_code)
Logger.loginfo(response.content)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
return 'done'
|
<commit_before><commit_msg>Add a step for add a person in Wonderland database.<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddPerson(EventState):
'''
Add a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= already_exit return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'already_exit', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotionProbability:
data.update({'peopleIsOperator': True})
else:
data.update({'peopleIsOperator': False})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(url, data=data)
if response.status_code == 400:
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][
0] == u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
elif response.status_code == 500:
return 'error'
Logger.logwarn(response.status_code)
Logger.loginfo(response.content)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
return 'done'
|
Add a step for add a person in Wonderland database.#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddPerson(EventState):
'''
Add a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= already_exit return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'already_exit', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotionProbability:
data.update({'peopleIsOperator': True})
else:
data.update({'peopleIsOperator': False})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(url, data=data)
if response.status_code == 400:
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][
0] == u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
elif response.status_code == 500:
return 'error'
Logger.logwarn(response.status_code)
Logger.loginfo(response.content)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
return 'done'
|
<commit_before><commit_msg>Add a step for add a person in Wonderland database.<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState, Logger
"""
Created on 17/05/2018
@author: Lucas Maurice
"""
class WonderlandAddPerson(EventState):
'''
Add a person.
># entity sara_msgs/Entity
<= done return when the add correctly append
<= already_exit return when the entity already exist
<= bad_request return when error reading data
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandAddPerson, self).__init__(input_keys=['entity'],
outcomes=['done', 'already_exit', 'bad_request', 'error'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/"
entity = userdata.entity
data = {'peopleRecognitionId': entity.face.id}
if entity.color is not None:
data.update({'peopleColor': entity.color})
if entity.pose is not None:
data.update({'peoplePose': entity.pose})
if entity.poseProbability is not None:
data.update({'peoplePoseAccuracy': entity.color})
if entity.face.gender is not None:
data.update({'peopleGender': entity.face.gender})
if entity.face.genderProbability is not None:
data.update({'peopleGenderAccuracy': entity.face.genderProbability})
if entity.face.emotion is not None:
data.update({'peopleEmotion': entity.face.emotion})
if entity.face.emotionProbability is not None:
data.update({'peopleEmotionAccuracy': entity.face.emotionProbability})
if entity.face.emotionProbability:
data.update({'peopleIsOperator': True})
else:
data.update({'peopleIsOperator': False})
if len(entity.aliases) > 0:
data.update({'peopleName': entity.aliases[0]})
# try the request
try:
response = requests.post(url, data=data)
if response.status_code == 400:
data = json.loads(response.content)
if 'peopleRecognitionId' in data and data['peopleRecognitionId'][
0] == u'people with this peopleRecognitionId already exists.':
return 'already_exit'
else:
return 'bad_request'
elif response.status_code == 500:
return 'error'
Logger.logwarn(response.status_code)
Logger.loginfo(response.content)
except requests.exceptions.RequestException as e:
Logger.logerr(e)
return 'error'
return 'done'
|
|
269dbda9c702ddd4809632558a73f4e4eae31d89
|
scripts/python/light_rpi.py
|
scripts/python/light_rpi.py
|
#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
import RPIO
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gPin = int(sys.argv[4])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
RPIO.output(gPin,True)
updateState("state","1")
RPIO.input(gPin)
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
RPIO.output(gPin,True)
updateState("state","0")
RPIO.input(gPin)
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
Add script to control lights from Raspberry Pi.
|
Add script to control lights from Raspberry Pi.
|
Python
|
apache-2.0
|
JoeriHermans/Intelligent-Automation-System,JoeriHermans/Intelligent-Automation-System,JoeriHermans/Intelligent-Automation-System,JoeriHermans/Intelligent-Automation-System
|
Add script to control lights from Raspberry Pi.
|
#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
import RPIO
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gPin = int(sys.argv[4])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
RPIO.output(gPin,True)
updateState("state","1")
RPIO.input(gPin)
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
RPIO.output(gPin,True)
updateState("state","0")
RPIO.input(gPin)
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
<commit_before><commit_msg>Add script to control lights from Raspberry Pi.<commit_after>
|
#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
import RPIO
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gPin = int(sys.argv[4])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
RPIO.output(gPin,True)
updateState("state","1")
RPIO.input(gPin)
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
RPIO.output(gPin,True)
updateState("state","0")
RPIO.input(gPin)
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
Add script to control lights from Raspberry Pi.#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
import RPIO
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gPin = int(sys.argv[4])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
RPIO.output(gPin,True)
updateState("state","1")
RPIO.input(gPin)
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
RPIO.output(gPin,True)
updateState("state","0")
RPIO.input(gPin)
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
<commit_before><commit_msg>Add script to control lights from Raspberry Pi.<commit_after>#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
import RPIO
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gPin = int(sys.argv[4])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
RPIO.output(gPin,True)
updateState("state","1")
RPIO.input(gPin)
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
RPIO.output(gPin,True)
updateState("state","0")
RPIO.input(gPin)
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
|
332015ed4eceaa4eac9869c6e0553ad3beacd30a
|
test/os_win7.py
|
test/os_win7.py
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sys
from mock import MagicMock
_winreg = MagicMock
if sys.version_info[0] < 3:
sys.modules['_winreg'] = _winreg
else:
sys.modules['winreg'] = _winreg
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
pass
def test_os_supported(self):
pass
if __name__ == '__main__':
unittest.main()
|
Test win 7 with winreg module mock
|
Test win 7 with winreg module mock
|
Python
|
apache-2.0
|
jupe/mbed-ls,jupe/mbed-ls
|
Test win 7 with winreg module mock
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sys
from mock import MagicMock
_winreg = MagicMock
if sys.version_info[0] < 3:
sys.modules['_winreg'] = _winreg
else:
sys.modules['winreg'] = _winreg
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
pass
def test_os_supported(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test win 7 with winreg module mock<commit_after>
|
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sys
from mock import MagicMock
_winreg = MagicMock
if sys.version_info[0] < 3:
sys.modules['_winreg'] = _winreg
else:
sys.modules['winreg'] = _winreg
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
pass
def test_os_supported(self):
pass
if __name__ == '__main__':
unittest.main()
|
Test win 7 with winreg module mock#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sys
from mock import MagicMock
_winreg = MagicMock
if sys.version_info[0] < 3:
sys.modules['_winreg'] = _winreg
else:
sys.modules['winreg'] = _winreg
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
pass
def test_os_supported(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test win 7 with winreg module mock<commit_after>#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import sys
from mock import MagicMock
_winreg = MagicMock
if sys.version_info[0] < 3:
sys.modules['_winreg'] = _winreg
else:
sys.modules['winreg'] = _winreg
from mbed_lstools.lstools_win7 import MbedLsToolsWin7
class Win7TestCase(unittest.TestCase):
""" Basic test cases checking trivial asserts
"""
def setUp(self):
pass
def test_os_supported(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
d2dbe3b863418c10739e94d16e7f2a6f12c52524
|
problem_builder/migrations/0002_auto_20160121_1525.py
|
problem_builder/migrations/0002_auto_20160121_1525.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem_builder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_uid', models.CharField(max_length=32)),
('block_id', models.CharField(max_length=255, db_index=True)),
('notified', models.BooleanField(default=False, db_index=True)),
('shared_by', models.ForeignKey(related_name='problem_builder_shared_by', to=settings.AUTH_USER_MODEL)),
('shared_with', models.ForeignKey(related_name='problem_builder_shared_with', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='share',
unique_together=set([('shared_by', 'shared_with', 'block_id')]),
),
]
|
Add in Django migration for shares.
|
Add in Django migration for shares.
|
Python
|
agpl-3.0
|
proversity-org/problem-builder,proversity-org/problem-builder,proversity-org/problem-builder
|
Add in Django migration for shares.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem_builder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_uid', models.CharField(max_length=32)),
('block_id', models.CharField(max_length=255, db_index=True)),
('notified', models.BooleanField(default=False, db_index=True)),
('shared_by', models.ForeignKey(related_name='problem_builder_shared_by', to=settings.AUTH_USER_MODEL)),
('shared_with', models.ForeignKey(related_name='problem_builder_shared_with', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='share',
unique_together=set([('shared_by', 'shared_with', 'block_id')]),
),
]
|
<commit_before><commit_msg>Add in Django migration for shares.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem_builder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_uid', models.CharField(max_length=32)),
('block_id', models.CharField(max_length=255, db_index=True)),
('notified', models.BooleanField(default=False, db_index=True)),
('shared_by', models.ForeignKey(related_name='problem_builder_shared_by', to=settings.AUTH_USER_MODEL)),
('shared_with', models.ForeignKey(related_name='problem_builder_shared_with', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='share',
unique_together=set([('shared_by', 'shared_with', 'block_id')]),
),
]
|
Add in Django migration for shares.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem_builder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_uid', models.CharField(max_length=32)),
('block_id', models.CharField(max_length=255, db_index=True)),
('notified', models.BooleanField(default=False, db_index=True)),
('shared_by', models.ForeignKey(related_name='problem_builder_shared_by', to=settings.AUTH_USER_MODEL)),
('shared_with', models.ForeignKey(related_name='problem_builder_shared_with', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='share',
unique_together=set([('shared_by', 'shared_with', 'block_id')]),
),
]
|
<commit_before><commit_msg>Add in Django migration for shares.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem_builder', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Share',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('submission_uid', models.CharField(max_length=32)),
('block_id', models.CharField(max_length=255, db_index=True)),
('notified', models.BooleanField(default=False, db_index=True)),
('shared_by', models.ForeignKey(related_name='problem_builder_shared_by', to=settings.AUTH_USER_MODEL)),
('shared_with', models.ForeignKey(related_name='problem_builder_shared_with', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='share',
unique_together=set([('shared_by', 'shared_with', 'block_id')]),
),
]
|
|
6536f808ad7cea2970ba5db46c9178dfc8739a1b
|
tests/unknownlength.py
|
tests/unknownlength.py
|
import progressbar
def test_unknown_length():
pb = progressbar.ProgressBar(widgets=[progressbar.AnimatedMarker()],
max_value=progressbar.UnknownLength)
assert pb.max_value is progressbar.UnknownLength
|
Add failing test on Py3: initialising ProgressBar with UnknownLength
|
Add failing test on Py3: initialising ProgressBar with UnknownLength
|
Python
|
bsd-3-clause
|
WoLpH/python-progressbar
|
Add failing test on Py3: initialising ProgressBar with UnknownLength
|
import progressbar
def test_unknown_length():
pb = progressbar.ProgressBar(widgets=[progressbar.AnimatedMarker()],
max_value=progressbar.UnknownLength)
assert pb.max_value is progressbar.UnknownLength
|
<commit_before><commit_msg>Add failing test on Py3: initialising ProgressBar with UnknownLength<commit_after>
|
import progressbar
def test_unknown_length():
pb = progressbar.ProgressBar(widgets=[progressbar.AnimatedMarker()],
max_value=progressbar.UnknownLength)
assert pb.max_value is progressbar.UnknownLength
|
Add failing test on Py3: initialising ProgressBar with UnknownLengthimport progressbar
def test_unknown_length():
pb = progressbar.ProgressBar(widgets=[progressbar.AnimatedMarker()],
max_value=progressbar.UnknownLength)
assert pb.max_value is progressbar.UnknownLength
|
<commit_before><commit_msg>Add failing test on Py3: initialising ProgressBar with UnknownLength<commit_after>import progressbar
def test_unknown_length():
pb = progressbar.ProgressBar(widgets=[progressbar.AnimatedMarker()],
max_value=progressbar.UnknownLength)
assert pb.max_value is progressbar.UnknownLength
|
|
7ee4bacff94874a0dd3a82360e4dcbd1ea73bc01
|
python--learnings/lambda_map_filter_reduce.py
|
python--learnings/lambda_map_filter_reduce.py
|
#!/usr/bin/env python
#
# Topics: Lambdas, Maps, Filters, Reduce
#
# Background: Lambda examples and functionality, including maps, filters, and reduces.
#
# Sources:
# - https://www.python-course.eu/lambda.php
import unittest
from functools import reduce
# test functionality
class TestMethods(unittest.TestCase):
def test_lambda(self):
'''Anonymous function creation'''
l_function = lambda x: "name: {}".format(x)
self.assertEqual(l_function('Joe'), 'name: Joe')
def test_map(self):
'''Injects all values in list into function provided'''
names = ['Joe', 'Sally']
adjusted_names = map(lambda x: "name: {}".format(x), names)
self.assertEqual(list(adjusted_names), ['name: Joe', 'name: Sally'])
def test_filter(self):
'''Captures values in provided list based on function provided'''
vals = ['a', 'b', 'a', 'c']
filtered_vals = filter(lambda x: x == 'a', vals)
self.assertEqual(list(filtered_vals), ['a', 'a'])
def test_reduce(self):
'''Continuously runs function over all values provided.'''
vals = [1, 2, 3]
sum_of_vals = reduce(lambda x, y: x + y, vals)
self.assertEqual(sum_of_vals, 6)
# main execution
if __name__ == '__main__':
unittest.main()
|
Add Python Lambda, Map, Filter, Reduce Test
|
Add Python Lambda, Map, Filter, Reduce Test
Add simple functionality for Lambda, Map, Filter, and Reduce.
|
Python
|
mit
|
jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox,jekhokie/scriptbox
|
Add Python Lambda, Map, Filter, Reduce Test
Add simple functionality for Lambda, Map, Filter, and Reduce.
|
#!/usr/bin/env python
#
# Topics: Lambdas, Maps, Filters, Reduce
#
# Background: Lambda examples and functionality, including maps, filters, and reduces.
#
# Sources:
# - https://www.python-course.eu/lambda.php
import unittest
from functools import reduce
# test functionality
class TestMethods(unittest.TestCase):
def test_lambda(self):
'''Anonymous function creation'''
l_function = lambda x: "name: {}".format(x)
self.assertEqual(l_function('Joe'), 'name: Joe')
def test_map(self):
'''Injects all values in list into function provided'''
names = ['Joe', 'Sally']
adjusted_names = map(lambda x: "name: {}".format(x), names)
self.assertEqual(list(adjusted_names), ['name: Joe', 'name: Sally'])
def test_filter(self):
'''Captures values in provided list based on function provided'''
vals = ['a', 'b', 'a', 'c']
filtered_vals = filter(lambda x: x == 'a', vals)
self.assertEqual(list(filtered_vals), ['a', 'a'])
def test_reduce(self):
'''Continuously runs function over all values provided.'''
vals = [1, 2, 3]
sum_of_vals = reduce(lambda x, y: x + y, vals)
self.assertEqual(sum_of_vals, 6)
# main execution
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add Python Lambda, Map, Filter, Reduce Test
Add simple functionality for Lambda, Map, Filter, and Reduce.<commit_after>
|
#!/usr/bin/env python
#
# Topics: Lambdas, Maps, Filters, Reduce
#
# Background: Lambda examples and functionality, including maps, filters, and reduces.
#
# Sources:
# - https://www.python-course.eu/lambda.php
import unittest
from functools import reduce
# test functionality
class TestMethods(unittest.TestCase):
def test_lambda(self):
'''Anonymous function creation'''
l_function = lambda x: "name: {}".format(x)
self.assertEqual(l_function('Joe'), 'name: Joe')
def test_map(self):
'''Injects all values in list into function provided'''
names = ['Joe', 'Sally']
adjusted_names = map(lambda x: "name: {}".format(x), names)
self.assertEqual(list(adjusted_names), ['name: Joe', 'name: Sally'])
def test_filter(self):
'''Captures values in provided list based on function provided'''
vals = ['a', 'b', 'a', 'c']
filtered_vals = filter(lambda x: x == 'a', vals)
self.assertEqual(list(filtered_vals), ['a', 'a'])
def test_reduce(self):
'''Continuously runs function over all values provided.'''
vals = [1, 2, 3]
sum_of_vals = reduce(lambda x, y: x + y, vals)
self.assertEqual(sum_of_vals, 6)
# main execution
if __name__ == '__main__':
unittest.main()
|
Add Python Lambda, Map, Filter, Reduce Test
Add simple functionality for Lambda, Map, Filter, and Reduce.#!/usr/bin/env python
#
# Topics: Lambdas, Maps, Filters, Reduce
#
# Background: Lambda examples and functionality, including maps, filters, and reduces.
#
# Sources:
# - https://www.python-course.eu/lambda.php
import unittest
from functools import reduce
# test functionality
class TestMethods(unittest.TestCase):
def test_lambda(self):
'''Anonymous function creation'''
l_function = lambda x: "name: {}".format(x)
self.assertEqual(l_function('Joe'), 'name: Joe')
def test_map(self):
'''Injects all values in list into function provided'''
names = ['Joe', 'Sally']
adjusted_names = map(lambda x: "name: {}".format(x), names)
self.assertEqual(list(adjusted_names), ['name: Joe', 'name: Sally'])
def test_filter(self):
'''Captures values in provided list based on function provided'''
vals = ['a', 'b', 'a', 'c']
filtered_vals = filter(lambda x: x == 'a', vals)
self.assertEqual(list(filtered_vals), ['a', 'a'])
def test_reduce(self):
'''Continuously runs function over all values provided.'''
vals = [1, 2, 3]
sum_of_vals = reduce(lambda x, y: x + y, vals)
self.assertEqual(sum_of_vals, 6)
# main execution
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add Python Lambda, Map, Filter, Reduce Test
Add simple functionality for Lambda, Map, Filter, and Reduce.<commit_after>#!/usr/bin/env python
#
# Topics: Lambdas, Maps, Filters, Reduce
#
# Background: Lambda examples and functionality, including maps, filters, and reduces.
#
# Sources:
# - https://www.python-course.eu/lambda.php
import unittest
from functools import reduce
# test functionality
class TestMethods(unittest.TestCase):
def test_lambda(self):
'''Anonymous function creation'''
l_function = lambda x: "name: {}".format(x)
self.assertEqual(l_function('Joe'), 'name: Joe')
def test_map(self):
'''Injects all values in list into function provided'''
names = ['Joe', 'Sally']
adjusted_names = map(lambda x: "name: {}".format(x), names)
self.assertEqual(list(adjusted_names), ['name: Joe', 'name: Sally'])
def test_filter(self):
'''Captures values in provided list based on function provided'''
vals = ['a', 'b', 'a', 'c']
filtered_vals = filter(lambda x: x == 'a', vals)
self.assertEqual(list(filtered_vals), ['a', 'a'])
def test_reduce(self):
'''Continuously runs function over all values provided.'''
vals = [1, 2, 3]
sum_of_vals = reduce(lambda x, y: x + y, vals)
self.assertEqual(sum_of_vals, 6)
# main execution
if __name__ == '__main__':
unittest.main()
|
|
851631bd98139d4dd45c8fce6a080615d45283f4
|
thinc/tests/unit/test_sparse_embed.py
|
thinc/tests/unit/test_sparse_embed.py
|
from ...neural._classes.sparse_embed import SparseEmbed
import numpy
def test_create_sparse_embed():
embed = SparseEmbed(300)
def test_predict_sparse_embed():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors = embed.predict(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
def test_sparse_embed_begin_update():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors, finish_update = embed.begin_update(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
gradient = embed.ops.allocate(vectors.shape)
finish_update(gradient)
|
Add test for deprecated sparse_embed module
|
Add test for deprecated sparse_embed module
|
Python
|
mit
|
spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc
|
Add test for deprecated sparse_embed module
|
from ...neural._classes.sparse_embed import SparseEmbed
import numpy
def test_create_sparse_embed():
embed = SparseEmbed(300)
def test_predict_sparse_embed():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors = embed.predict(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
def test_sparse_embed_begin_update():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors, finish_update = embed.begin_update(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
gradient = embed.ops.allocate(vectors.shape)
finish_update(gradient)
|
<commit_before><commit_msg>Add test for deprecated sparse_embed module<commit_after>
|
from ...neural._classes.sparse_embed import SparseEmbed
import numpy
def test_create_sparse_embed():
embed = SparseEmbed(300)
def test_predict_sparse_embed():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors = embed.predict(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
def test_sparse_embed_begin_update():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors, finish_update = embed.begin_update(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
gradient = embed.ops.allocate(vectors.shape)
finish_update(gradient)
|
Add test for deprecated sparse_embed modulefrom ...neural._classes.sparse_embed import SparseEmbed
import numpy
def test_create_sparse_embed():
embed = SparseEmbed(300)
def test_predict_sparse_embed():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors = embed.predict(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
def test_sparse_embed_begin_update():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors, finish_update = embed.begin_update(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
gradient = embed.ops.allocate(vectors.shape)
finish_update(gradient)
|
<commit_before><commit_msg>Add test for deprecated sparse_embed module<commit_after>from ...neural._classes.sparse_embed import SparseEmbed
import numpy
def test_create_sparse_embed():
embed = SparseEmbed(300)
def test_predict_sparse_embed():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors = embed.predict(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
def test_sparse_embed_begin_update():
embed = SparseEmbed(300)
ids = numpy.ones((10,), dtype='uint64')
vectors, finish_update = embed.begin_update(ids)
assert vectors.shape == (10, 300)
assert vectors.sum() != 0.
gradient = embed.ops.allocate(vectors.shape)
finish_update(gradient)
|
|
d9351b85301e701b7054acf57fb3be6b1a6cac01
|
build_recipes.py
|
build_recipes.py
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
build_log_file = open(log_dir + 'build_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'build', recipes_dir + package],
stdout=build_log_file, stderr=build_log_file)
if err is 0:
msg = "Succesfully build conda package for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to conda package for %s\n" % (package)
failures.append(package)
print(msg)
build_log_file.close()
successful_recipes_file = open(log_dir + 'successful_builds', 'w')
failed_recipes_file = open(log_dir + 'failed_builds', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
Add script to build conda recipes
|
Add script to build conda recipes
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
Python
|
bsd-3-clause
|
ContinuumIO/pypi-conda-builds
|
Add script to build conda recipes
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
build_log_file = open(log_dir + 'build_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'build', recipes_dir + package],
stdout=build_log_file, stderr=build_log_file)
if err is 0:
msg = "Succesfully build conda package for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to conda package for %s\n" % (package)
failures.append(package)
print(msg)
build_log_file.close()
successful_recipes_file = open(log_dir + 'successful_builds', 'w')
failed_recipes_file = open(log_dir + 'failed_builds', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
<commit_before><commit_msg>Add script to build conda recipes
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>
|
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
build_log_file = open(log_dir + 'build_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'build', recipes_dir + package],
stdout=build_log_file, stderr=build_log_file)
if err is 0:
msg = "Succesfully build conda package for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to conda package for %s\n" % (package)
failures.append(package)
print(msg)
build_log_file.close()
successful_recipes_file = open(log_dir + 'successful_builds', 'w')
failed_recipes_file = open(log_dir + 'failed_builds', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
Add script to build conda recipes
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com>import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
build_log_file = open(log_dir + 'build_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'build', recipes_dir + package],
stdout=build_log_file, stderr=build_log_file)
if err is 0:
msg = "Succesfully build conda package for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to conda package for %s\n" % (package)
failures.append(package)
print(msg)
build_log_file.close()
successful_recipes_file = open(log_dir + 'successful_builds', 'w')
failed_recipes_file = open(log_dir + 'failed_builds', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
<commit_before><commit_msg>Add script to build conda recipes
Signed-off-by: Harsh Gupta <c4bd8559369e527b4bb1785ff84e8ff50fde87c0@gmail.com><commit_after>import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("package_list", help="List of packages for which" +
" recipies will be created")
args = parser.parse_args()
package_names = [package.strip() for package in
open(args.package_list, 'r').readlines()]
log_dir = "./logs/"
recipes_dir = "./recipes/"
build_log_file = open(log_dir + 'build_log', 'w')
successes = []
failures = []
for package in package_names:
msg = "Creating Conda recipe for %s\n" % (package)
print(msg)
err = subprocess.call(['conda', 'build', recipes_dir + package],
stdout=build_log_file, stderr=build_log_file)
if err is 0:
msg = "Succesfully build conda package for %s\n" % (package)
successes.append(package)
else:
msg = "Failed to conda package for %s\n" % (package)
failures.append(package)
print(msg)
build_log_file.close()
successful_recipes_file = open(log_dir + 'successful_builds', 'w')
failed_recipes_file = open(log_dir + 'failed_builds', 'w')
successful_recipes_file.write('\n'.join(successes))
failed_recipes_file.write('\n'.join(failures))
successful_recipes_file.close()
failed_recipes_file.close()
|
|
eec5e9e5bf467384b77aadf2aaf2a0176d7b3d7b
|
test.py
|
test.py
|
import argcommand
import argparse
##
class Say( argcommand.Command ):
what = argcommand.Argument( "WORD", default = "Something", help = "the text you want to print" )
times = argcommand.Argument( "--times", "-t", type = int, default = 1, metavar = "T", help = "how many times you want to repeat the text" )
#
def run( self ):
print self.times * self.what
class FirstLine( argcommand.Command ):
files = argcommand.Argument( "FILE", type = argparse.FileType( "r" ), nargs = "+",
help = "The files to read" )
def run( self ):
for file_ in self.files:
print file_.readline()
class TestCommand( argcommand.Command ):
subcommands = [ Say, FirstLine ]
if "__main__" == __name__:
TestCommand.execute()
|
Test classes from the README.md file
|
Test classes from the README.md file
|
Python
|
mit
|
avinoamr/argcommand
|
Test classes from the README.md file
|
import argcommand
import argparse
##
class Say( argcommand.Command ):
what = argcommand.Argument( "WORD", default = "Something", help = "the text you want to print" )
times = argcommand.Argument( "--times", "-t", type = int, default = 1, metavar = "T", help = "how many times you want to repeat the text" )
#
def run( self ):
print self.times * self.what
class FirstLine( argcommand.Command ):
files = argcommand.Argument( "FILE", type = argparse.FileType( "r" ), nargs = "+",
help = "The files to read" )
def run( self ):
for file_ in self.files:
print file_.readline()
class TestCommand( argcommand.Command ):
subcommands = [ Say, FirstLine ]
if "__main__" == __name__:
TestCommand.execute()
|
<commit_before><commit_msg>Test classes from the README.md file<commit_after>
|
import argcommand
import argparse
##
class Say( argcommand.Command ):
what = argcommand.Argument( "WORD", default = "Something", help = "the text you want to print" )
times = argcommand.Argument( "--times", "-t", type = int, default = 1, metavar = "T", help = "how many times you want to repeat the text" )
#
def run( self ):
print self.times * self.what
class FirstLine( argcommand.Command ):
files = argcommand.Argument( "FILE", type = argparse.FileType( "r" ), nargs = "+",
help = "The files to read" )
def run( self ):
for file_ in self.files:
print file_.readline()
class TestCommand( argcommand.Command ):
subcommands = [ Say, FirstLine ]
if "__main__" == __name__:
TestCommand.execute()
|
Test classes from the README.md fileimport argcommand
import argparse
##
class Say( argcommand.Command ):
what = argcommand.Argument( "WORD", default = "Something", help = "the text you want to print" )
times = argcommand.Argument( "--times", "-t", type = int, default = 1, metavar = "T", help = "how many times you want to repeat the text" )
#
def run( self ):
print self.times * self.what
class FirstLine( argcommand.Command ):
files = argcommand.Argument( "FILE", type = argparse.FileType( "r" ), nargs = "+",
help = "The files to read" )
def run( self ):
for file_ in self.files:
print file_.readline()
class TestCommand( argcommand.Command ):
subcommands = [ Say, FirstLine ]
if "__main__" == __name__:
TestCommand.execute()
|
<commit_before><commit_msg>Test classes from the README.md file<commit_after>import argcommand
import argparse
##
class Say( argcommand.Command ):
what = argcommand.Argument( "WORD", default = "Something", help = "the text you want to print" )
times = argcommand.Argument( "--times", "-t", type = int, default = 1, metavar = "T", help = "how many times you want to repeat the text" )
#
def run( self ):
print self.times * self.what
class FirstLine( argcommand.Command ):
files = argcommand.Argument( "FILE", type = argparse.FileType( "r" ), nargs = "+",
help = "The files to read" )
def run( self ):
for file_ in self.files:
print file_.readline()
class TestCommand( argcommand.Command ):
subcommands = [ Say, FirstLine ]
if "__main__" == __name__:
TestCommand.execute()
|
|
756b5fa59d12e67fe2ed4f223ddc77b00f7686b9
|
www/tests/test_file.py
|
www/tests/test_file.py
|
from browser import document, html, window
with open('test.html') as f:
f.read()
with open('files/text-utf8.txt') as f:
f.read()
with open('compression/du cote de chez swann.txt', 'rb') as f:
assert len(f.read()) == 1_056_294
with open('compression/du cote de chez swann.txt', 'r') as f:
assert len(f.readlines()) == 2118
with open('compression/du cote de chez swann.txt', 'r') as f:
counter = 0
for line in f:
counter += 1
assert counter == 2118
try:
with open('files/text-latin1.txt') as f:
f.read()
raise Exception('should have raised UnicodeDecodeError')
except UnicodeDecodeError:
pass
with open('files/text-latin1.txt', encoding='latin1') as f:
assert f.read() == "bébé"
with open('files/text-latin9.txt', encoding='iso-8859-15') as f:
assert f.read() == "sœur"
# image file
with open('../brython.png', 'rb') as f:
content = f.read()
assert isinstance(content, bytes)
assert len(content) == 2011
print("passed all tests...")
|
Add test for file objects created by open()
|
Add test for file objects created by open()
|
Python
|
bsd-3-clause
|
brython-dev/brython,brython-dev/brython,brython-dev/brython
|
Add test for file objects created by open()
|
from browser import document, html, window
with open('test.html') as f:
f.read()
with open('files/text-utf8.txt') as f:
f.read()
with open('compression/du cote de chez swann.txt', 'rb') as f:
assert len(f.read()) == 1_056_294
with open('compression/du cote de chez swann.txt', 'r') as f:
assert len(f.readlines()) == 2118
with open('compression/du cote de chez swann.txt', 'r') as f:
counter = 0
for line in f:
counter += 1
assert counter == 2118
try:
with open('files/text-latin1.txt') as f:
f.read()
raise Exception('should have raised UnicodeDecodeError')
except UnicodeDecodeError:
pass
with open('files/text-latin1.txt', encoding='latin1') as f:
assert f.read() == "bébé"
with open('files/text-latin9.txt', encoding='iso-8859-15') as f:
assert f.read() == "sœur"
# image file
with open('../brython.png', 'rb') as f:
content = f.read()
assert isinstance(content, bytes)
assert len(content) == 2011
print("passed all tests...")
|
<commit_before><commit_msg>Add test for file objects created by open()<commit_after>
|
from browser import document, html, window
with open('test.html') as f:
f.read()
with open('files/text-utf8.txt') as f:
f.read()
with open('compression/du cote de chez swann.txt', 'rb') as f:
assert len(f.read()) == 1_056_294
with open('compression/du cote de chez swann.txt', 'r') as f:
assert len(f.readlines()) == 2118
with open('compression/du cote de chez swann.txt', 'r') as f:
counter = 0
for line in f:
counter += 1
assert counter == 2118
try:
with open('files/text-latin1.txt') as f:
f.read()
raise Exception('should have raised UnicodeDecodeError')
except UnicodeDecodeError:
pass
with open('files/text-latin1.txt', encoding='latin1') as f:
assert f.read() == "bébé"
with open('files/text-latin9.txt', encoding='iso-8859-15') as f:
assert f.read() == "sœur"
# image file
with open('../brython.png', 'rb') as f:
content = f.read()
assert isinstance(content, bytes)
assert len(content) == 2011
print("passed all tests...")
|
Add test for file objects created by open()from browser import document, html, window
with open('test.html') as f:
f.read()
with open('files/text-utf8.txt') as f:
f.read()
with open('compression/du cote de chez swann.txt', 'rb') as f:
assert len(f.read()) == 1_056_294
with open('compression/du cote de chez swann.txt', 'r') as f:
assert len(f.readlines()) == 2118
with open('compression/du cote de chez swann.txt', 'r') as f:
counter = 0
for line in f:
counter += 1
assert counter == 2118
try:
with open('files/text-latin1.txt') as f:
f.read()
raise Exception('should have raised UnicodeDecodeError')
except UnicodeDecodeError:
pass
with open('files/text-latin1.txt', encoding='latin1') as f:
assert f.read() == "bébé"
with open('files/text-latin9.txt', encoding='iso-8859-15') as f:
assert f.read() == "sœur"
# image file
with open('../brython.png', 'rb') as f:
content = f.read()
assert isinstance(content, bytes)
assert len(content) == 2011
print("passed all tests...")
|
<commit_before><commit_msg>Add test for file objects created by open()<commit_after>from browser import document, html, window
with open('test.html') as f:
f.read()
with open('files/text-utf8.txt') as f:
f.read()
with open('compression/du cote de chez swann.txt', 'rb') as f:
assert len(f.read()) == 1_056_294
with open('compression/du cote de chez swann.txt', 'r') as f:
assert len(f.readlines()) == 2118
with open('compression/du cote de chez swann.txt', 'r') as f:
counter = 0
for line in f:
counter += 1
assert counter == 2118
try:
with open('files/text-latin1.txt') as f:
f.read()
raise Exception('should have raised UnicodeDecodeError')
except UnicodeDecodeError:
pass
with open('files/text-latin1.txt', encoding='latin1') as f:
assert f.read() == "bébé"
with open('files/text-latin9.txt', encoding='iso-8859-15') as f:
assert f.read() == "sœur"
# image file
with open('../brython.png', 'rb') as f:
content = f.read()
assert isinstance(content, bytes)
assert len(content) == 2011
print("passed all tests...")
|
|
034719380206d42a1bc065cb5805d700cd5f2366
|
pentagon/filters.py
|
pentagon/filters.py
|
import re
def get_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = get_filters()
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
if __name__ == '__main__':
for k,v in filter.all.items():
print(v)
|
Add a jinja filter and factory
|
Add a jinja filter and factory
|
Python
|
apache-2.0
|
reactiveops/pentagon,reactiveops/pentagon,reactiveops/pentagon
|
Add a jinja filter and factory
|
import re
def get_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = get_filters()
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
if __name__ == '__main__':
for k,v in filter.all.items():
print(v)
|
<commit_before><commit_msg>Add a jinja filter and factory<commit_after>
|
import re
def get_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = get_filters()
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
if __name__ == '__main__':
for k,v in filter.all.items():
print(v)
|
Add a jinja filter and factoryimport re
def get_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = get_filters()
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
if __name__ == '__main__':
for k,v in filter.all.items():
print(v)
|
<commit_before><commit_msg>Add a jinja filter and factory<commit_after>import re
def get_filters():
"""Register a function with decorator"""
registry = {}
def registrar(func):
registry[func.__name__] = func
return func
registrar.all = registry
return registrar
filter = get_filters()
@filter
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input)
if __name__ == '__main__':
for k,v in filter.all.items():
print(v)
|
|
20cf65dfc76b097a63b21b1661bf40e01764af71
|
pybb/management/commands/pybb_delete_invalid_topics.py
|
pybb/management/commands/pybb_delete_invalid_topics.py
|
from optparse import make_option
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from pybb.models import Topic
class Command(BaseCommand):
help = 'Resave all posts.'
def handle(self, *args, **kwargs):
check_time = datetime.now() - timedelta(seconds=10)
topics = Topic.objects.filter(created__lt=check_time)\
.annotate(counter=Count('posts'))\
.filter(counter=0)
count = topics.count()
print 'Found %d invalid topics' % count
if count:
answer = raw_input('Are you sure you want delete them? [y/n]:')
if answer.lower() == 'y':
print 'Deleting topics'
topics.delete()
print 'Deletion completed'
else:
print 'Aborting'
|
Add management command to delete invalid topics
|
Add management command to delete invalid topics
|
Python
|
bsd-3-clause
|
gpetukhov/pybb,gpetukhov/pybb,gpetukhov/pybb
|
Add management command to delete invalid topics
|
from optparse import make_option
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from pybb.models import Topic
class Command(BaseCommand):
help = 'Resave all posts.'
def handle(self, *args, **kwargs):
check_time = datetime.now() - timedelta(seconds=10)
topics = Topic.objects.filter(created__lt=check_time)\
.annotate(counter=Count('posts'))\
.filter(counter=0)
count = topics.count()
print 'Found %d invalid topics' % count
if count:
answer = raw_input('Are you sure you want delete them? [y/n]:')
if answer.lower() == 'y':
print 'Deleting topics'
topics.delete()
print 'Deletion completed'
else:
print 'Aborting'
|
<commit_before><commit_msg>Add management command to delete invalid topics<commit_after>
|
from optparse import make_option
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from pybb.models import Topic
class Command(BaseCommand):
help = 'Resave all posts.'
def handle(self, *args, **kwargs):
check_time = datetime.now() - timedelta(seconds=10)
topics = Topic.objects.filter(created__lt=check_time)\
.annotate(counter=Count('posts'))\
.filter(counter=0)
count = topics.count()
print 'Found %d invalid topics' % count
if count:
answer = raw_input('Are you sure you want delete them? [y/n]:')
if answer.lower() == 'y':
print 'Deleting topics'
topics.delete()
print 'Deletion completed'
else:
print 'Aborting'
|
Add management command to delete invalid topicsfrom optparse import make_option
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from pybb.models import Topic
class Command(BaseCommand):
help = 'Resave all posts.'
def handle(self, *args, **kwargs):
check_time = datetime.now() - timedelta(seconds=10)
topics = Topic.objects.filter(created__lt=check_time)\
.annotate(counter=Count('posts'))\
.filter(counter=0)
count = topics.count()
print 'Found %d invalid topics' % count
if count:
answer = raw_input('Are you sure you want delete them? [y/n]:')
if answer.lower() == 'y':
print 'Deleting topics'
topics.delete()
print 'Deletion completed'
else:
print 'Aborting'
|
<commit_before><commit_msg>Add management command to delete invalid topics<commit_after>from optparse import make_option
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from pybb.models import Topic
class Command(BaseCommand):
help = 'Resave all posts.'
def handle(self, *args, **kwargs):
check_time = datetime.now() - timedelta(seconds=10)
topics = Topic.objects.filter(created__lt=check_time)\
.annotate(counter=Count('posts'))\
.filter(counter=0)
count = topics.count()
print 'Found %d invalid topics' % count
if count:
answer = raw_input('Are you sure you want delete them? [y/n]:')
if answer.lower() == 'y':
print 'Deleting topics'
topics.delete()
print 'Deletion completed'
else:
print 'Aborting'
|
|
d80c5c93d993905a4950bc00dea29096da1de459
|
src/mmw/apps/modeling/migrations/0021_old_scenarios.py
|
src/mmw/apps/modeling/migrations/0021_old_scenarios.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
old_scenarios = Scenario.objects.filter(
project__model_package='tr-55'
)
for scenario in old_scenarios:
scenario.results = '[]'
scenario.modification_hash = ''
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_old_scenarios'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
Add migration to flush saved tr-55 results
|
Add migration to flush saved tr-55 results
|
Python
|
apache-2.0
|
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
|
Add migration to flush saved tr-55 results
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
old_scenarios = Scenario.objects.filter(
project__model_package='tr-55'
)
for scenario in old_scenarios:
scenario.results = '[]'
scenario.modification_hash = ''
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_old_scenarios'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
<commit_before><commit_msg>Add migration to flush saved tr-55 results<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
old_scenarios = Scenario.objects.filter(
project__model_package='tr-55'
)
for scenario in old_scenarios:
scenario.results = '[]'
scenario.modification_hash = ''
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_old_scenarios'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
Add migration to flush saved tr-55 results# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
old_scenarios = Scenario.objects.filter(
project__model_package='tr-55'
)
for scenario in old_scenarios:
scenario.results = '[]'
scenario.modification_hash = ''
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_old_scenarios'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
<commit_before><commit_msg>Add migration to flush saved tr-55 results<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def clear_old_scenario_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
old_scenarios = Scenario.objects.filter(
project__model_package='tr-55'
)
for scenario in old_scenarios:
scenario.results = '[]'
scenario.modification_hash = ''
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_old_scenarios'),
]
operations = [
migrations.RunPython(clear_old_scenario_results)
]
|
|
0acecb5dd4ee96136b9929a9b1adef953a1a5a63
|
unittest-build.py
|
unittest-build.py
|
#!/usr/bin/python
import sys
import os
if sys.platform == 'win32':
os.system ( 'devenv /build libSequantoAutomation.sln' )
else:
os.system ( 'make' )
|
Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)
|
Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)
|
Python
|
apache-2.0
|
seqzap/sequanto-automation,micronpn/sequanto-automation,seqzap/sequanto-automation,seqzap/sequanto-automation,seqzap/sequanto-automation,micronpn/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,micronpn/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation,micronpn/sequanto-automation,micronpn/sequanto-automation,seqzap/sequanto-automation,rasmus-toftdahl-olesen/sequanto-automation
|
Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)
|
#!/usr/bin/python
import sys
import os
if sys.platform == 'win32':
os.system ( 'devenv /build libSequantoAutomation.sln' )
else:
os.system ( 'make' )
|
<commit_before><commit_msg>Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)<commit_after>
|
#!/usr/bin/python
import sys
import os
if sys.platform == 'win32':
os.system ( 'devenv /build libSequantoAutomation.sln' )
else:
os.system ( 'make' )
|
Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)#!/usr/bin/python
import sys
import os
if sys.platform == 'win32':
os.system ( 'devenv /build libSequantoAutomation.sln' )
else:
os.system ( 'make' )
|
<commit_before><commit_msg>Add python script to build the project using the platforms standard builder (used when we do automatic unit-testing using Bitten at Sequanto)<commit_after>#!/usr/bin/python
import sys
import os
if sys.platform == 'win32':
os.system ( 'devenv /build libSequantoAutomation.sln' )
else:
os.system ( 'make' )
|
|
9b05d4fab047be287dcd7c872e6a2a706a06e2f6
|
libs/build_client.py
|
libs/build_client.py
|
#
# Build Script by Moloch
# Requires Py2Exe for Python 2.7
#
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe': {'bundle_files': 1,'compressed': 1, 'optimize': 2}},
windows = [{'script': 'RtbClient.py', 'uac_info': 'requireAdministrator', 'icon_resources': [(1, 'rtb.ico')] }],
zipfile = None,
)
|
Add build script for client
|
Add build script for client
|
Python
|
apache-2.0
|
Richard-West/RootTheBox,Und3rf10w/RootTheBox,Richard-West/RootTheBox,Richard-West/RootTheBox,sigma-random/RootTheBox,shanshar06/RootTheBox,Und3rf10w/RootTheBox,sigma-random/RootTheBox,shanshar06/RootTheBox,moloch--/RootTheBox,Richard-West/RootTheBox,moloch--/RootTheBox,Und3rf10w/RootTheBox,sigma-random/RootTheBox,shanshar06/RootTheBox,Und3rf10w/RootTheBox,remi1411/RootTheBox,moloch--/RootTheBox,remi1411/RootTheBox,remi1411/RootTheBox,moloch--/RootTheBox,shanshar06/RootTheBox,sigma-random/RootTheBox,remi1411/RootTheBox
|
Add build script for client
|
#
# Build Script by Moloch
# Requires Py2Exe for Python 2.7
#
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe': {'bundle_files': 1,'compressed': 1, 'optimize': 2}},
windows = [{'script': 'RtbClient.py', 'uac_info': 'requireAdministrator', 'icon_resources': [(1, 'rtb.ico')] }],
zipfile = None,
)
|
<commit_before><commit_msg>Add build script for client<commit_after>
|
#
# Build Script by Moloch
# Requires Py2Exe for Python 2.7
#
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe': {'bundle_files': 1,'compressed': 1, 'optimize': 2}},
windows = [{'script': 'RtbClient.py', 'uac_info': 'requireAdministrator', 'icon_resources': [(1, 'rtb.ico')] }],
zipfile = None,
)
|
Add build script for client#
# Build Script by Moloch
# Requires Py2Exe for Python 2.7
#
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe': {'bundle_files': 1,'compressed': 1, 'optimize': 2}},
windows = [{'script': 'RtbClient.py', 'uac_info': 'requireAdministrator', 'icon_resources': [(1, 'rtb.ico')] }],
zipfile = None,
)
|
<commit_before><commit_msg>Add build script for client<commit_after>#
# Build Script by Moloch
# Requires Py2Exe for Python 2.7
#
from distutils.core import setup
import py2exe, sys, os
sys.argv.append('py2exe')
setup(
options = {'py2exe': {'bundle_files': 1,'compressed': 1, 'optimize': 2}},
windows = [{'script': 'RtbClient.py', 'uac_info': 'requireAdministrator', 'icon_resources': [(1, 'rtb.ico')] }],
zipfile = None,
)
|
|
fd059a3f1a971706a45d9ac1f2cc20f73825a520
|
website/management/commands/jd_pre.py
|
website/management/commands/jd_pre.py
|
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
class Command(BaseCommand):
def handle(self, *args, **options):
site = Site.objects.get()
site.domain = 'website.jongedemocraten.nl'
site.name = 'Landelijk'
site.save()
|
Add management script to rename the default site
|
Add management script to rename the default site
Django 1.8 automatically names the default website instead of asking
for the domain name it should use. Our import script depends on
setting the domain name of the default site before importing content.
This script can be run to set the domain and name of the default site
appropriately.
|
Python
|
mit
|
jonge-democraten/website,jonge-democraten/website,jonge-democraten/website,jonge-democraten/website
|
Add management script to rename the default site
Django 1.8 automatically names the default website instead of asking
for the domain name it should use. Our import script depends on
setting the domain name of the default site before importing content.
This script can be run to set the domain and name of the default site
appropriately.
|
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
class Command(BaseCommand):
def handle(self, *args, **options):
site = Site.objects.get()
site.domain = 'website.jongedemocraten.nl'
site.name = 'Landelijk'
site.save()
|
<commit_before><commit_msg>Add management script to rename the default site
Django 1.8 automatically names the default website instead of asking
for the domain name it should use. Our import script depends on
setting the domain name of the default site before importing content.
This script can be run to set the domain and name of the default site
appropriately.<commit_after>
|
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
class Command(BaseCommand):
def handle(self, *args, **options):
site = Site.objects.get()
site.domain = 'website.jongedemocraten.nl'
site.name = 'Landelijk'
site.save()
|
Add management script to rename the default site
Django 1.8 automatically names the default website instead of asking
for the domain name it should use. Our import script depends on
setting the domain name of the default site before importing content.
This script can be run to set the domain and name of the default site
appropriately.from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
class Command(BaseCommand):
def handle(self, *args, **options):
site = Site.objects.get()
site.domain = 'website.jongedemocraten.nl'
site.name = 'Landelijk'
site.save()
|
<commit_before><commit_msg>Add management script to rename the default site
Django 1.8 automatically names the default website instead of asking
for the domain name it should use. Our import script depends on
setting the domain name of the default site before importing content.
This script can be run to set the domain and name of the default site
appropriately.<commit_after>from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
class Command(BaseCommand):
def handle(self, *args, **options):
site = Site.objects.get()
site.domain = 'website.jongedemocraten.nl'
site.name = 'Landelijk'
site.save()
|
|
44b10d1f941aeed155b869588d4d732fe0881ede
|
pinger/ext/plugins/stdout.py
|
pinger/ext/plugins/stdout.py
|
from pinger.ext import ActionProvider
class StdOut(ActionProvider):
"""
Receives a response and prints it
"""
title = 'StdOut'
def receive(self, name, url, status, errors, elapsed):
print 'name={name} url={url} elapsed={elapsed} status={status}'.format(name=name,
url=url,
status=status,
elapsed=elapsed.total_seconds() if elapsed else 'TIMEOUT')
for error in errors:
print 'error={name} message={message} ' \
'expected_result={expected_result} actual_result={actual_result}'.format(**error)
|
Add STDout plugin, fix status representation
|
Add STDout plugin, fix status representation
|
Python
|
mit
|
pedrospdc/pinger
|
Add STDout plugin, fix status representation
|
from pinger.ext import ActionProvider
class StdOut(ActionProvider):
"""
Receives a response and prints it
"""
title = 'StdOut'
def receive(self, name, url, status, errors, elapsed):
print 'name={name} url={url} elapsed={elapsed} status={status}'.format(name=name,
url=url,
status=status,
elapsed=elapsed.total_seconds() if elapsed else 'TIMEOUT')
for error in errors:
print 'error={name} message={message} ' \
'expected_result={expected_result} actual_result={actual_result}'.format(**error)
|
<commit_before><commit_msg>Add STDout plugin, fix status representation<commit_after>
|
from pinger.ext import ActionProvider
class StdOut(ActionProvider):
"""
Receives a response and prints it
"""
title = 'StdOut'
def receive(self, name, url, status, errors, elapsed):
print 'name={name} url={url} elapsed={elapsed} status={status}'.format(name=name,
url=url,
status=status,
elapsed=elapsed.total_seconds() if elapsed else 'TIMEOUT')
for error in errors:
print 'error={name} message={message} ' \
'expected_result={expected_result} actual_result={actual_result}'.format(**error)
|
Add STDout plugin, fix status representationfrom pinger.ext import ActionProvider
class StdOut(ActionProvider):
"""
Receives a response and prints it
"""
title = 'StdOut'
def receive(self, name, url, status, errors, elapsed):
print 'name={name} url={url} elapsed={elapsed} status={status}'.format(name=name,
url=url,
status=status,
elapsed=elapsed.total_seconds() if elapsed else 'TIMEOUT')
for error in errors:
print 'error={name} message={message} ' \
'expected_result={expected_result} actual_result={actual_result}'.format(**error)
|
<commit_before><commit_msg>Add STDout plugin, fix status representation<commit_after>from pinger.ext import ActionProvider
class StdOut(ActionProvider):
"""
Receives a response and prints it
"""
title = 'StdOut'
def receive(self, name, url, status, errors, elapsed):
print 'name={name} url={url} elapsed={elapsed} status={status}'.format(name=name,
url=url,
status=status,
elapsed=elapsed.total_seconds() if elapsed else 'TIMEOUT')
for error in errors:
print 'error={name} message={message} ' \
'expected_result={expected_result} actual_result={actual_result}'.format(**error)
|
|
2cbfce0e8620792a7fb8c93711ab6163544213ec
|
apps/curia_vista/management/commands/update_cantons.py
|
apps/curia_vista/management/commands/update_cantons.py
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import cantons from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/cantons?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
cantons = ElementTree.fromstring(response.content)
if not cantons:
raise CommandError("Not a valid XML file: {}".format(source))
for canton in cantons:
canton_id = canton.find('id').text
canton_updated = canton.find('updated').text
canton_code = canton.find('code').text
canton_model, created = Canton.objects.update_or_create(id=canton_id, defaults={'updated': canton_updated,
'code': canton_code})
canton_model.full_clean()
canton_model.save()
print(canton_model)
|
Add import script for cantons data
|
Add import script for cantons data
|
Python
|
agpl-3.0
|
rettichschnidi/politkarma,rettichschnidi/politkarma,rettichschnidi/politkarma,rettichschnidi/politkarma
|
Add import script for cantons data
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import cantons from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/cantons?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
cantons = ElementTree.fromstring(response.content)
if not cantons:
raise CommandError("Not a valid XML file: {}".format(source))
for canton in cantons:
canton_id = canton.find('id').text
canton_updated = canton.find('updated').text
canton_code = canton.find('code').text
canton_model, created = Canton.objects.update_or_create(id=canton_id, defaults={'updated': canton_updated,
'code': canton_code})
canton_model.full_clean()
canton_model.save()
print(canton_model)
|
<commit_before><commit_msg>Add import script for cantons data<commit_after>
|
from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import cantons from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/cantons?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
cantons = ElementTree.fromstring(response.content)
if not cantons:
raise CommandError("Not a valid XML file: {}".format(source))
for canton in cantons:
canton_id = canton.find('id').text
canton_updated = canton.find('updated').text
canton_code = canton.find('code').text
canton_model, created = Canton.objects.update_or_create(id=canton_id, defaults={'updated': canton_updated,
'code': canton_code})
canton_model.full_clean()
canton_model.save()
print(canton_model)
|
Add import script for cantons datafrom xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import cantons from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/cantons?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
cantons = ElementTree.fromstring(response.content)
if not cantons:
raise CommandError("Not a valid XML file: {}".format(source))
for canton in cantons:
canton_id = canton.find('id').text
canton_updated = canton.find('updated').text
canton_code = canton.find('code').text
canton_model, created = Canton.objects.update_or_create(id=canton_id, defaults={'updated': canton_updated,
'code': canton_code})
canton_model.full_clean()
canton_model.save()
print(canton_model)
|
<commit_before><commit_msg>Add import script for cantons data<commit_after>from xml.etree import ElementTree
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import requests
from apps.curia_vista.models import *
class Command(BaseCommand):
help = 'Import cantons from parlament.ch'
@transaction.atomic
def handle(self, *args, **options):
source = 'http://ws.parlament.ch/cantons?format=xml&lang=de'
headers = {'User-Agent': 'Mozilla'}
self.stdout.write("Importing: {}".format(source))
try:
response = requests.get(source, headers=headers)
except Exception as e:
raise CommandError("Could not fetch file from {}".format(source))
cantons = ElementTree.fromstring(response.content)
if not cantons:
raise CommandError("Not a valid XML file: {}".format(source))
for canton in cantons:
canton_id = canton.find('id').text
canton_updated = canton.find('updated').text
canton_code = canton.find('code').text
canton_model, created = Canton.objects.update_or_create(id=canton_id, defaults={'updated': canton_updated,
'code': canton_code})
canton_model.full_clean()
canton_model.save()
print(canton_model)
|
|
3fcb449325713fa9bfffe737b09dab333675766b
|
gnowsys-ndf/gnowsys_ndf/ndf/management/commands/update_attribute_type.py
|
gnowsys-ndf/gnowsys_ndf/ndf/management/commands/update_attribute_type.py
|
''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from django_mongokit import get_database
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, Triple, HistoryManager
from gnowsys_ndf.ndf.rcslib import RCS
####################################################################################################################
class Command(BaseCommand):
"""This update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo.
"""
help = "\tThis update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo."
def handle(self, *args, **options):
history_manager = HistoryManager()
rcs_obj = RCS()
collection = get_database()[Triple.collection_name]
cur = collection.Triple.find( {'_type': 'GAttribute'} )
for n in cur:
if type(n.attribute_type) == ObjectId:
# Retrieving attribute-type document(or object) from it's ObjectId stored in GAttribute's attribute_type field
attr_type = collection.Node.one( {'_id': n.attribute_type} )
# Replacing GAttribute's attribute_type field value from ObjectId to DBRef
n.attribute_type = attr_type
subject_doc = collection.Node.one({'_id': n.subject})
n.name = subject_doc.name + " -- " + n.attribute_type['name'] + " -- " + n.object_value
if history_manager.create_or_replace_json_file(n):
fp = history_manager.get_file_path(n)
message = "This document (" + n.name + ") is created on " + subject_doc.created_at.strftime("%d %B %Y")
rcs_obj.checkin(fp, 1, message.encode('utf-8'), "-i")
n.save()
# --- End of handle() ---
|
Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)
|
Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)
|
Python
|
agpl-3.0
|
AvadootNachankar/gstudio,supriyasawant/gstudio,gnowledge/gstudio,gnowledge/gstudio,olympian94/gstudio,sunnychaudhari/gstudio,makfire/gstudio,olympian94/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio,makfire/gstudio,AvadootNachankar/gstudio,supriyasawant/gstudio,sunnychaudhari/gstudio,Dhiru/gstudio,makfire/gstudio,supriyasawant/gstudio,gnowledge/gstudio,gnowledge/gstudio,supriyasawant/gstudio,olympian94/gstudio,Dhiru/gstudio,Dhiru/gstudio,Dhiru/gstudio,olympian94/gstudio,olympian94/gstudio,AvadootNachankar/gstudio,sunnychaudhari/gstudio,makfire/gstudio,olympian94/gstudio,sunnychaudhari/gstudio
|
Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)
|
''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from django_mongokit import get_database
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, Triple, HistoryManager
from gnowsys_ndf.ndf.rcslib import RCS
####################################################################################################################
class Command(BaseCommand):
"""This update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo.
"""
help = "\tThis update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo."
def handle(self, *args, **options):
history_manager = HistoryManager()
rcs_obj = RCS()
collection = get_database()[Triple.collection_name]
cur = collection.Triple.find( {'_type': 'GAttribute'} )
for n in cur:
if type(n.attribute_type) == ObjectId:
# Retrieving attribute-type document(or object) from it's ObjectId stored in GAttribute's attribute_type field
attr_type = collection.Node.one( {'_id': n.attribute_type} )
# Replacing GAttribute's attribute_type field value from ObjectId to DBRef
n.attribute_type = attr_type
subject_doc = collection.Node.one({'_id': n.subject})
n.name = subject_doc.name + " -- " + n.attribute_type['name'] + " -- " + n.object_value
if history_manager.create_or_replace_json_file(n):
fp = history_manager.get_file_path(n)
message = "This document (" + n.name + ") is created on " + subject_doc.created_at.strftime("%d %B %Y")
rcs_obj.checkin(fp, 1, message.encode('utf-8'), "-i")
n.save()
# --- End of handle() ---
|
<commit_before><commit_msg>Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)<commit_after>
|
''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from django_mongokit import get_database
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, Triple, HistoryManager
from gnowsys_ndf.ndf.rcslib import RCS
####################################################################################################################
class Command(BaseCommand):
"""This update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo.
"""
help = "\tThis update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo."
def handle(self, *args, **options):
history_manager = HistoryManager()
rcs_obj = RCS()
collection = get_database()[Triple.collection_name]
cur = collection.Triple.find( {'_type': 'GAttribute'} )
for n in cur:
if type(n.attribute_type) == ObjectId:
# Retrieving attribute-type document(or object) from it's ObjectId stored in GAttribute's attribute_type field
attr_type = collection.Node.one( {'_id': n.attribute_type} )
# Replacing GAttribute's attribute_type field value from ObjectId to DBRef
n.attribute_type = attr_type
subject_doc = collection.Node.one({'_id': n.subject})
n.name = subject_doc.name + " -- " + n.attribute_type['name'] + " -- " + n.object_value
if history_manager.create_or_replace_json_file(n):
fp = history_manager.get_file_path(n)
message = "This document (" + n.name + ") is created on " + subject_doc.created_at.strftime("%d %B %Y")
rcs_obj.checkin(fp, 1, message.encode('utf-8'), "-i")
n.save()
# --- End of handle() ---
|
Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from django_mongokit import get_database
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, Triple, HistoryManager
from gnowsys_ndf.ndf.rcslib import RCS
####################################################################################################################
class Command(BaseCommand):
"""This update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo.
"""
help = "\tThis update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo."
def handle(self, *args, **options):
history_manager = HistoryManager()
rcs_obj = RCS()
collection = get_database()[Triple.collection_name]
cur = collection.Triple.find( {'_type': 'GAttribute'} )
for n in cur:
if type(n.attribute_type) == ObjectId:
# Retrieving attribute-type document(or object) from it's ObjectId stored in GAttribute's attribute_type field
attr_type = collection.Node.one( {'_id': n.attribute_type} )
# Replacing GAttribute's attribute_type field value from ObjectId to DBRef
n.attribute_type = attr_type
subject_doc = collection.Node.one({'_id': n.subject})
n.name = subject_doc.name + " -- " + n.attribute_type['name'] + " -- " + n.object_value
if history_manager.create_or_replace_json_file(n):
fp = history_manager.get_file_path(n)
message = "This document (" + n.name + ") is created on " + subject_doc.created_at.strftime("%d %B %Y")
rcs_obj.checkin(fp, 1, message.encode('utf-8'), "-i")
n.save()
# --- End of handle() ---
|
<commit_before><commit_msg>Update script for updating attribute_type field of GAttribute instances from ObjectId to DBRef (AttributeType)<commit_after>''' imports from installed packages '''
from django.core.management.base import BaseCommand, CommandError
from django_mongokit import get_database
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
''' imports from application folders/files '''
from gnowsys_ndf.ndf.models import Node, Triple, HistoryManager
from gnowsys_ndf.ndf.rcslib import RCS
####################################################################################################################
class Command(BaseCommand):
"""This update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo.
"""
help = "\tThis update-script updates attribute_type field's value (of all GAttribute instances) from ObjectId to it's corresponding DBRef; and also creates it's version-file(history) in rcs-repo."
def handle(self, *args, **options):
history_manager = HistoryManager()
rcs_obj = RCS()
collection = get_database()[Triple.collection_name]
cur = collection.Triple.find( {'_type': 'GAttribute'} )
for n in cur:
if type(n.attribute_type) == ObjectId:
# Retrieving attribute-type document(or object) from it's ObjectId stored in GAttribute's attribute_type field
attr_type = collection.Node.one( {'_id': n.attribute_type} )
# Replacing GAttribute's attribute_type field value from ObjectId to DBRef
n.attribute_type = attr_type
subject_doc = collection.Node.one({'_id': n.subject})
n.name = subject_doc.name + " -- " + n.attribute_type['name'] + " -- " + n.object_value
if history_manager.create_or_replace_json_file(n):
fp = history_manager.get_file_path(n)
message = "This document (" + n.name + ") is created on " + subject_doc.created_at.strftime("%d %B %Y")
rcs_obj.checkin(fp, 1, message.encode('utf-8'), "-i")
n.save()
# --- End of handle() ---
|
|
7ef782ff8ff684777516a1e76e3a6c2c937e70cd
|
geoip/management/commands/save_ballot_coordinates.py
|
geoip/management/commands/save_ballot_coordinates.py
|
from django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
|
Add command for populating latitude and longitude
|
Add command for populating latitude and longitude
|
Python
|
mit
|
wevote/WeVoteServer,wevote/WebAppPublic,jainanisha90/WeVoteServer,wevote/WebAppPublic,jainanisha90/WeVoteServer,wevote/WeVoteServer,wevote/WeVoteServer,jainanisha90/WeVoteServer,wevote/WeVoteServer,wevote/WebAppPublic,wevote/WebAppPublic
|
Add command for populating latitude and longitude
|
from django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
|
<commit_before><commit_msg>Add command for populating latitude and longitude<commit_after>
|
from django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
|
Add command for populating latitude and longitudefrom django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
|
<commit_before><commit_msg>Add command for populating latitude and longitude<commit_after>from django.core.management.base import BaseCommand
from geopy.geocoders import get_geocoder_for_service
from geopy.exc import GeocoderQuotaExceeded
from ballot.models import BallotReturned
class Command(BaseCommand):
help = 'Populates the latitude and longitude fields of BallotReturned'
def populate_latitude_for_ballots(self):
for b in BallotReturned.objects.filter(latitude=None).order_by('id'):
full_ballot_address = '{}, {}, {} {}'.format(
b.normalized_line1, b.normalized_city, b.normalized_state, b.normalized_zip)
location = self.google_client.geocode(full_ballot_address)
if location is None:
raise Exception('Could not find a location for ballot {}'.format(b.id))
b.latitude, b.longitude = location.latitude, location.longitude
print('ballot {}, found latitude {}, longitude {}'.format(b.id, b.latitude, b.longitude))
b.save()
def handle(self, *args, **options):
self.google_client = get_geocoder_for_service('google')()
while BallotReturned.objects.filter(latitude=None).exists():
try:
self.populate_latitude_for_ballots()
except GeocoderQuotaExceeded:
self.google_client = get_geocoder_for_service('google')()
print('Success! All BallotReturned objects now have latitude and longitude populated.')
|
|
eabbdee7c85c62c93f8f8993041b7f763c36c0dd
|
count_lines.py
|
count_lines.py
|
"""Script to count the lines in a corpus of texts.
This information can be used to normalize the results.
Input: directory containing text files
Output: csv containing <text_id>, <# lines>
Usage: python count_lines.py <dir in> <file out>
"""
import argparse
import glob
import os
import codecs
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', help='the directory containing the'
' files with the correct labels.')
parser.add_argument('out_file', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
in_dir = args.in_dir
out_file = args.out_file
data = {'#lines': []}
index = []
text_files = glob.glob('{}/*.txt'.format(in_dir))
for t in text_files:
text_id = os.path.basename(t).replace('.txt', '')
with codecs.open(t, 'rb', 'utf8') as f:
data['#lines'].append(len(f.readlines()))
index.append(text_id)
df = pd.DataFrame(data=data, index=index)
df.to_csv(out_file)
|
Add script to count the lines in a corpus
|
Add script to count the lines in a corpus
The results is saved to a csv file.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to count the lines in a corpus
The results is saved to a csv file.
|
"""Script to count the lines in a corpus of texts.
This information can be used to normalize the results.
Input: directory containing text files
Output: csv containing <text_id>, <# lines>
Usage: python count_lines.py <dir in> <file out>
"""
import argparse
import glob
import os
import codecs
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', help='the directory containing the'
' files with the correct labels.')
parser.add_argument('out_file', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
in_dir = args.in_dir
out_file = args.out_file
data = {'#lines': []}
index = []
text_files = glob.glob('{}/*.txt'.format(in_dir))
for t in text_files:
text_id = os.path.basename(t).replace('.txt', '')
with codecs.open(t, 'rb', 'utf8') as f:
data['#lines'].append(len(f.readlines()))
index.append(text_id)
df = pd.DataFrame(data=data, index=index)
df.to_csv(out_file)
|
<commit_before><commit_msg>Add script to count the lines in a corpus
The results is saved to a csv file.<commit_after>
|
"""Script to count the lines in a corpus of texts.
This information can be used to normalize the results.
Input: directory containing text files
Output: csv containing <text_id>, <# lines>
Usage: python count_lines.py <dir in> <file out>
"""
import argparse
import glob
import os
import codecs
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', help='the directory containing the'
' files with the correct labels.')
parser.add_argument('out_file', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
in_dir = args.in_dir
out_file = args.out_file
data = {'#lines': []}
index = []
text_files = glob.glob('{}/*.txt'.format(in_dir))
for t in text_files:
text_id = os.path.basename(t).replace('.txt', '')
with codecs.open(t, 'rb', 'utf8') as f:
data['#lines'].append(len(f.readlines()))
index.append(text_id)
df = pd.DataFrame(data=data, index=index)
df.to_csv(out_file)
|
Add script to count the lines in a corpus
The results is saved to a csv file."""Script to count the lines in a corpus of texts.
This information can be used to normalize the results.
Input: directory containing text files
Output: csv containing <text_id>, <# lines>
Usage: python count_lines.py <dir in> <file out>
"""
import argparse
import glob
import os
import codecs
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', help='the directory containing the'
' files with the correct labels.')
parser.add_argument('out_file', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
in_dir = args.in_dir
out_file = args.out_file
data = {'#lines': []}
index = []
text_files = glob.glob('{}/*.txt'.format(in_dir))
for t in text_files:
text_id = os.path.basename(t).replace('.txt', '')
with codecs.open(t, 'rb', 'utf8') as f:
data['#lines'].append(len(f.readlines()))
index.append(text_id)
df = pd.DataFrame(data=data, index=index)
df.to_csv(out_file)
|
<commit_before><commit_msg>Add script to count the lines in a corpus
The results is saved to a csv file.<commit_after>"""Script to count the lines in a corpus of texts.
This information can be used to normalize the results.
Input: directory containing text files
Output: csv containing <text_id>, <# lines>
Usage: python count_lines.py <dir in> <file out>
"""
import argparse
import glob
import os
import codecs
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', help='the directory containing the'
' files with the correct labels.')
parser.add_argument('out_file', help='the directory where the output '
'files should be written.')
args = parser.parse_args()
in_dir = args.in_dir
out_file = args.out_file
data = {'#lines': []}
index = []
text_files = glob.glob('{}/*.txt'.format(in_dir))
for t in text_files:
text_id = os.path.basename(t).replace('.txt', '')
with codecs.open(t, 'rb', 'utf8') as f:
data['#lines'].append(len(f.readlines()))
index.append(text_id)
df = pd.DataFrame(data=data, index=index)
df.to_csv(out_file)
|
|
034d0871a87b6929656b163c0c2d65ed09f97520
|
bin/convert_file.py
|
bin/convert_file.py
|
import argparse
import os
import sys
import csv
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.abspath(os.path.join(file_path, os.pardir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert csv file into correct format')
parser.add_argument('input', help='Path of file to fix')
parser.add_argument('log', help='Path of file to write correct format to')
args = parser.parse_args()
with open(args.log, 'wb') as outputfile:
with open(args.input) as inputfile:
writer = csv.writer(outputfile)
reader = csv.DictReader(inputfile)
# for field in reader.fieldnames:
# field = field.strip()
writer.writerow([
'Column', 'AET', 'AST', 'Date',
'House', 'ISSID', 'Name Committee',
'OST', 'PMG Name',
'alt', 'attendance', 'chairperson',
'first_name', 'party_affiliation',
'province', 'surname', 'title'])
for row in reader:
# Clean field names
for key in row.iterkeys():
new_key = key.strip(' \t\n\r')
row[new_key] = row.pop(key)
col = reader.line_num
aet = row['AET']
ast = row['AST']
date = row['Date']
house = row['House']
issid = row['ISSID']
committee = row['Name Committee']
ost = row['OST']
pmg = row['PMG Name']
alt = None
chair = 'TRUE'
province = None
# Write chairperson
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
alt, row['Attendance Chair'], chair, row['First Name Chairperson'],
row['Party Affiliation Chairperson'], province,
row['Surname Chairperson'], row['Title Chairperson']
])
chair = 'FALSE'
for i in range(1, 35):
i = str(i)
if row['Attendance Member ' + i]:
try:
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
row['Alt (Y or N) Member ' + i], row['Attendance Member ' + i], chair, row['First Name Member ' + i],
row['Party Affiliation Member ' + i], province,
row['Surname Member ' + i], row['Title Member ' + i]
])
except KeyError as e:
import ipdb; ipdb.set_trace()
print e
|
Add script to reshape attendance data
|
Add script to reshape attendance data
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add script to reshape attendance data
|
import argparse
import os
import sys
import csv
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.abspath(os.path.join(file_path, os.pardir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert csv file into correct format')
parser.add_argument('input', help='Path of file to fix')
parser.add_argument('log', help='Path of file to write correct format to')
args = parser.parse_args()
with open(args.log, 'wb') as outputfile:
with open(args.input) as inputfile:
writer = csv.writer(outputfile)
reader = csv.DictReader(inputfile)
# for field in reader.fieldnames:
# field = field.strip()
writer.writerow([
'Column', 'AET', 'AST', 'Date',
'House', 'ISSID', 'Name Committee',
'OST', 'PMG Name',
'alt', 'attendance', 'chairperson',
'first_name', 'party_affiliation',
'province', 'surname', 'title'])
for row in reader:
# Clean field names
for key in row.iterkeys():
new_key = key.strip(' \t\n\r')
row[new_key] = row.pop(key)
col = reader.line_num
aet = row['AET']
ast = row['AST']
date = row['Date']
house = row['House']
issid = row['ISSID']
committee = row['Name Committee']
ost = row['OST']
pmg = row['PMG Name']
alt = None
chair = 'TRUE'
province = None
# Write chairperson
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
alt, row['Attendance Chair'], chair, row['First Name Chairperson'],
row['Party Affiliation Chairperson'], province,
row['Surname Chairperson'], row['Title Chairperson']
])
chair = 'FALSE'
for i in range(1, 35):
i = str(i)
if row['Attendance Member ' + i]:
try:
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
row['Alt (Y or N) Member ' + i], row['Attendance Member ' + i], chair, row['First Name Member ' + i],
row['Party Affiliation Member ' + i], province,
row['Surname Member ' + i], row['Title Member ' + i]
])
except KeyError as e:
import ipdb; ipdb.set_trace()
print e
|
<commit_before><commit_msg>Add script to reshape attendance data<commit_after>
|
import argparse
import os
import sys
import csv
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.abspath(os.path.join(file_path, os.pardir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert csv file into correct format')
parser.add_argument('input', help='Path of file to fix')
parser.add_argument('log', help='Path of file to write correct format to')
args = parser.parse_args()
with open(args.log, 'wb') as outputfile:
with open(args.input) as inputfile:
writer = csv.writer(outputfile)
reader = csv.DictReader(inputfile)
# for field in reader.fieldnames:
# field = field.strip()
writer.writerow([
'Column', 'AET', 'AST', 'Date',
'House', 'ISSID', 'Name Committee',
'OST', 'PMG Name',
'alt', 'attendance', 'chairperson',
'first_name', 'party_affiliation',
'province', 'surname', 'title'])
for row in reader:
# Clean field names
for key in row.iterkeys():
new_key = key.strip(' \t\n\r')
row[new_key] = row.pop(key)
col = reader.line_num
aet = row['AET']
ast = row['AST']
date = row['Date']
house = row['House']
issid = row['ISSID']
committee = row['Name Committee']
ost = row['OST']
pmg = row['PMG Name']
alt = None
chair = 'TRUE'
province = None
# Write chairperson
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
alt, row['Attendance Chair'], chair, row['First Name Chairperson'],
row['Party Affiliation Chairperson'], province,
row['Surname Chairperson'], row['Title Chairperson']
])
chair = 'FALSE'
for i in range(1, 35):
i = str(i)
if row['Attendance Member ' + i]:
try:
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
row['Alt (Y or N) Member ' + i], row['Attendance Member ' + i], chair, row['First Name Member ' + i],
row['Party Affiliation Member ' + i], province,
row['Surname Member ' + i], row['Title Member ' + i]
])
except KeyError as e:
import ipdb; ipdb.set_trace()
print e
|
Add script to reshape attendance dataimport argparse
import os
import sys
import csv
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.abspath(os.path.join(file_path, os.pardir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert csv file into correct format')
parser.add_argument('input', help='Path of file to fix')
parser.add_argument('log', help='Path of file to write correct format to')
args = parser.parse_args()
with open(args.log, 'wb') as outputfile:
with open(args.input) as inputfile:
writer = csv.writer(outputfile)
reader = csv.DictReader(inputfile)
# for field in reader.fieldnames:
# field = field.strip()
writer.writerow([
'Column', 'AET', 'AST', 'Date',
'House', 'ISSID', 'Name Committee',
'OST', 'PMG Name',
'alt', 'attendance', 'chairperson',
'first_name', 'party_affiliation',
'province', 'surname', 'title'])
for row in reader:
# Clean field names
for key in row.iterkeys():
new_key = key.strip(' \t\n\r')
row[new_key] = row.pop(key)
col = reader.line_num
aet = row['AET']
ast = row['AST']
date = row['Date']
house = row['House']
issid = row['ISSID']
committee = row['Name Committee']
ost = row['OST']
pmg = row['PMG Name']
alt = None
chair = 'TRUE'
province = None
# Write chairperson
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
alt, row['Attendance Chair'], chair, row['First Name Chairperson'],
row['Party Affiliation Chairperson'], province,
row['Surname Chairperson'], row['Title Chairperson']
])
chair = 'FALSE'
for i in range(1, 35):
i = str(i)
if row['Attendance Member ' + i]:
try:
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
row['Alt (Y or N) Member ' + i], row['Attendance Member ' + i], chair, row['First Name Member ' + i],
row['Party Affiliation Member ' + i], province,
row['Surname Member ' + i], row['Title Member ' + i]
])
except KeyError as e:
import ipdb; ipdb.set_trace()
print e
|
<commit_before><commit_msg>Add script to reshape attendance data<commit_after>import argparse
import os
import sys
import csv
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.abspath(os.path.join(file_path, os.pardir)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert csv file into correct format')
parser.add_argument('input', help='Path of file to fix')
parser.add_argument('log', help='Path of file to write correct format to')
args = parser.parse_args()
with open(args.log, 'wb') as outputfile:
with open(args.input) as inputfile:
writer = csv.writer(outputfile)
reader = csv.DictReader(inputfile)
# for field in reader.fieldnames:
# field = field.strip()
writer.writerow([
'Column', 'AET', 'AST', 'Date',
'House', 'ISSID', 'Name Committee',
'OST', 'PMG Name',
'alt', 'attendance', 'chairperson',
'first_name', 'party_affiliation',
'province', 'surname', 'title'])
for row in reader:
# Clean field names
for key in row.iterkeys():
new_key = key.strip(' \t\n\r')
row[new_key] = row.pop(key)
col = reader.line_num
aet = row['AET']
ast = row['AST']
date = row['Date']
house = row['House']
issid = row['ISSID']
committee = row['Name Committee']
ost = row['OST']
pmg = row['PMG Name']
alt = None
chair = 'TRUE'
province = None
# Write chairperson
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
alt, row['Attendance Chair'], chair, row['First Name Chairperson'],
row['Party Affiliation Chairperson'], province,
row['Surname Chairperson'], row['Title Chairperson']
])
chair = 'FALSE'
for i in range(1, 35):
i = str(i)
if row['Attendance Member ' + i]:
try:
writer.writerow([
col, aet, ast, date, house, issid, committee, ost, pmg,
row['Alt (Y or N) Member ' + i], row['Attendance Member ' + i], chair, row['First Name Member ' + i],
row['Party Affiliation Member ' + i], province,
row['Surname Member ' + i], row['Title Member ' + i]
])
except KeyError as e:
import ipdb; ipdb.set_trace()
print e
|
|
786fbfe7548b887141d6a3961cc85d8d770c02d6
|
Sample/Python/Sample.py
|
Sample/Python/Sample.py
|
import ctypes
import os
# Error
T_ERROR_SUCCESS = 0 # Success
T_ERROR_EMPTY = 1 # Empty expression
T_ERROR_DIVZERO = 2 # Division by zero
T_ERROR_TOKEN = 3 # Unknown token type
T_ERROR_FUNC = 4 # Math function not found
T_ERROR_FUNC_FACTORIAL = 5 # Argument to factorial must be positive
T_ERROR_BRACKETS = 6 # Unbalanced round brackets
hDll = ctypes.windll.LoadLibrary("..\\..\\ExprSolver.dll")
lpszExpr = "(1*2)*(6^5)+sin(30)+cos(2)*int(4.6)"
ExprTestFunc = hDll["ExprSolver_Test"]
ExprTestFunc.restype = ctypes.c_double
dResult = ExprTestFunc(ctypes.c_char_p(lpszExpr))
if hDll.ExprSolver_GetLastError() == 0:
print "Test: " + str(dResult)
else:
print "Error"
os.system("pause")
|
Create sample file for Python
|
Create sample file for Python
|
Python
|
mit
|
rchockxm/ExprSolver,rchockxm/ExprSolver,rchockxm/ExprSolver
|
Create sample file for Python
|
import ctypes
import os
# Error
T_ERROR_SUCCESS = 0 # Success
T_ERROR_EMPTY = 1 # Empty expression
T_ERROR_DIVZERO = 2 # Division by zero
T_ERROR_TOKEN = 3 # Unknown token type
T_ERROR_FUNC = 4 # Math function not found
T_ERROR_FUNC_FACTORIAL = 5 # Argument to factorial must be positive
T_ERROR_BRACKETS = 6 # Unbalanced round brackets
hDll = ctypes.windll.LoadLibrary("..\\..\\ExprSolver.dll")
lpszExpr = "(1*2)*(6^5)+sin(30)+cos(2)*int(4.6)"
ExprTestFunc = hDll["ExprSolver_Test"]
ExprTestFunc.restype = ctypes.c_double
dResult = ExprTestFunc(ctypes.c_char_p(lpszExpr))
if hDll.ExprSolver_GetLastError() == 0:
print "Test: " + str(dResult)
else:
print "Error"
os.system("pause")
|
<commit_before><commit_msg>Create sample file for Python<commit_after>
|
import ctypes
import os
# Error
T_ERROR_SUCCESS = 0 # Success
T_ERROR_EMPTY = 1 # Empty expression
T_ERROR_DIVZERO = 2 # Division by zero
T_ERROR_TOKEN = 3 # Unknown token type
T_ERROR_FUNC = 4 # Math function not found
T_ERROR_FUNC_FACTORIAL = 5 # Argument to factorial must be positive
T_ERROR_BRACKETS = 6 # Unbalanced round brackets
hDll = ctypes.windll.LoadLibrary("..\\..\\ExprSolver.dll")
lpszExpr = "(1*2)*(6^5)+sin(30)+cos(2)*int(4.6)"
ExprTestFunc = hDll["ExprSolver_Test"]
ExprTestFunc.restype = ctypes.c_double
dResult = ExprTestFunc(ctypes.c_char_p(lpszExpr))
if hDll.ExprSolver_GetLastError() == 0:
print "Test: " + str(dResult)
else:
print "Error"
os.system("pause")
|
Create sample file for Pythonimport ctypes
import os
# Error
T_ERROR_SUCCESS = 0 # Success
T_ERROR_EMPTY = 1 # Empty expression
T_ERROR_DIVZERO = 2 # Division by zero
T_ERROR_TOKEN = 3 # Unknown token type
T_ERROR_FUNC = 4 # Math function not found
T_ERROR_FUNC_FACTORIAL = 5 # Argument to factorial must be positive
T_ERROR_BRACKETS = 6 # Unbalanced round brackets
hDll = ctypes.windll.LoadLibrary("..\\..\\ExprSolver.dll")
lpszExpr = "(1*2)*(6^5)+sin(30)+cos(2)*int(4.6)"
ExprTestFunc = hDll["ExprSolver_Test"]
ExprTestFunc.restype = ctypes.c_double
dResult = ExprTestFunc(ctypes.c_char_p(lpszExpr))
if hDll.ExprSolver_GetLastError() == 0:
print "Test: " + str(dResult)
else:
print "Error"
os.system("pause")
|
<commit_before><commit_msg>Create sample file for Python<commit_after>import ctypes
import os
# Error
T_ERROR_SUCCESS = 0 # Success
T_ERROR_EMPTY = 1 # Empty expression
T_ERROR_DIVZERO = 2 # Division by zero
T_ERROR_TOKEN = 3 # Unknown token type
T_ERROR_FUNC = 4 # Math function not found
T_ERROR_FUNC_FACTORIAL = 5 # Argument to factorial must be positive
T_ERROR_BRACKETS = 6 # Unbalanced round brackets
hDll = ctypes.windll.LoadLibrary("..\\..\\ExprSolver.dll")
lpszExpr = "(1*2)*(6^5)+sin(30)+cos(2)*int(4.6)"
ExprTestFunc = hDll["ExprSolver_Test"]
ExprTestFunc.restype = ctypes.c_double
dResult = ExprTestFunc(ctypes.c_char_p(lpszExpr))
if hDll.ExprSolver_GetLastError() == 0:
print "Test: " + str(dResult)
else:
print "Error"
os.system("pause")
|
|
3b0d4279aa00dd1c34a617ba40ba1c3700c9b0c7
|
Cura/Event.py
|
Cura/Event.py
|
class Event:
MousePressEvent = 1
MouseMoveEvent = 2
MouseReleaseEvent = 3
KeyPressEvent = 4
KeyReleaseEvent = 5
SceneChangeEvent = 6
ToolActivateEvent = 7
ToolDeactivateEvent = 8
def __init__(self, type):
super().__init__()
self._type = type
@property
def type(self):
return self._type
class MouseEvent(Event):
LeftButton = "left"
RightButton = "right"
MiddleButton = "middle"
def __init__(self, type, x = 0, y = 0, lastX = None, lastY = None, buttons = []):
super().__init__(type)
self._x = x
self._y = y
self._lastX = lastX
self._lastY = lastY
self._buttons = buttons
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def lastX(self):
return self._lastX
@property
def lastY(self):
return self._lastY
@property
def deltaX(self):
if self._lastX != None:
return self._x - self._lastX
return 0
@property
def deltaY(self):
if self._lastY != None:
return self._y - self._lastY
return 0
@property
def buttons(self):
return self._buttons
class KeyEvent(Event):
def __init__(self, type, key):
super().__init__(type)
self._key = key
@property
def key(self):
return self._key
class SceneChangeEvent(Event):
def __init__(self):
super().__init__(self.SceneChangeEvent)
class ToolEvent(Event):
def __init__(self, type):
super().__init__(type)
|
Add an event class and several standard subclasses
|
Add an event class and several standard subclasses
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add an event class and several standard subclasses
|
class Event:
MousePressEvent = 1
MouseMoveEvent = 2
MouseReleaseEvent = 3
KeyPressEvent = 4
KeyReleaseEvent = 5
SceneChangeEvent = 6
ToolActivateEvent = 7
ToolDeactivateEvent = 8
def __init__(self, type):
super().__init__()
self._type = type
@property
def type(self):
return self._type
class MouseEvent(Event):
LeftButton = "left"
RightButton = "right"
MiddleButton = "middle"
def __init__(self, type, x = 0, y = 0, lastX = None, lastY = None, buttons = []):
super().__init__(type)
self._x = x
self._y = y
self._lastX = lastX
self._lastY = lastY
self._buttons = buttons
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def lastX(self):
return self._lastX
@property
def lastY(self):
return self._lastY
@property
def deltaX(self):
if self._lastX != None:
return self._x - self._lastX
return 0
@property
def deltaY(self):
if self._lastY != None:
return self._y - self._lastY
return 0
@property
def buttons(self):
return self._buttons
class KeyEvent(Event):
def __init__(self, type, key):
super().__init__(type)
self._key = key
@property
def key(self):
return self._key
class SceneChangeEvent(Event):
def __init__(self):
super().__init__(self.SceneChangeEvent)
class ToolEvent(Event):
def __init__(self, type):
super().__init__(type)
|
<commit_before><commit_msg>Add an event class and several standard subclasses<commit_after>
|
class Event:
MousePressEvent = 1
MouseMoveEvent = 2
MouseReleaseEvent = 3
KeyPressEvent = 4
KeyReleaseEvent = 5
SceneChangeEvent = 6
ToolActivateEvent = 7
ToolDeactivateEvent = 8
def __init__(self, type):
super().__init__()
self._type = type
@property
def type(self):
return self._type
class MouseEvent(Event):
LeftButton = "left"
RightButton = "right"
MiddleButton = "middle"
def __init__(self, type, x = 0, y = 0, lastX = None, lastY = None, buttons = []):
super().__init__(type)
self._x = x
self._y = y
self._lastX = lastX
self._lastY = lastY
self._buttons = buttons
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def lastX(self):
return self._lastX
@property
def lastY(self):
return self._lastY
@property
def deltaX(self):
if self._lastX != None:
return self._x - self._lastX
return 0
@property
def deltaY(self):
if self._lastY != None:
return self._y - self._lastY
return 0
@property
def buttons(self):
return self._buttons
class KeyEvent(Event):
def __init__(self, type, key):
super().__init__(type)
self._key = key
@property
def key(self):
return self._key
class SceneChangeEvent(Event):
def __init__(self):
super().__init__(self.SceneChangeEvent)
class ToolEvent(Event):
def __init__(self, type):
super().__init__(type)
|
Add an event class and several standard subclassesclass Event:
MousePressEvent = 1
MouseMoveEvent = 2
MouseReleaseEvent = 3
KeyPressEvent = 4
KeyReleaseEvent = 5
SceneChangeEvent = 6
ToolActivateEvent = 7
ToolDeactivateEvent = 8
def __init__(self, type):
super().__init__()
self._type = type
@property
def type(self):
return self._type
class MouseEvent(Event):
LeftButton = "left"
RightButton = "right"
MiddleButton = "middle"
def __init__(self, type, x = 0, y = 0, lastX = None, lastY = None, buttons = []):
super().__init__(type)
self._x = x
self._y = y
self._lastX = lastX
self._lastY = lastY
self._buttons = buttons
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def lastX(self):
return self._lastX
@property
def lastY(self):
return self._lastY
@property
def deltaX(self):
if self._lastX != None:
return self._x - self._lastX
return 0
@property
def deltaY(self):
if self._lastY != None:
return self._y - self._lastY
return 0
@property
def buttons(self):
return self._buttons
class KeyEvent(Event):
def __init__(self, type, key):
super().__init__(type)
self._key = key
@property
def key(self):
return self._key
class SceneChangeEvent(Event):
def __init__(self):
super().__init__(self.SceneChangeEvent)
class ToolEvent(Event):
def __init__(self, type):
super().__init__(type)
|
<commit_before><commit_msg>Add an event class and several standard subclasses<commit_after>class Event:
MousePressEvent = 1
MouseMoveEvent = 2
MouseReleaseEvent = 3
KeyPressEvent = 4
KeyReleaseEvent = 5
SceneChangeEvent = 6
ToolActivateEvent = 7
ToolDeactivateEvent = 8
def __init__(self, type):
super().__init__()
self._type = type
@property
def type(self):
return self._type
class MouseEvent(Event):
LeftButton = "left"
RightButton = "right"
MiddleButton = "middle"
def __init__(self, type, x = 0, y = 0, lastX = None, lastY = None, buttons = []):
super().__init__(type)
self._x = x
self._y = y
self._lastX = lastX
self._lastY = lastY
self._buttons = buttons
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def lastX(self):
return self._lastX
@property
def lastY(self):
return self._lastY
@property
def deltaX(self):
if self._lastX != None:
return self._x - self._lastX
return 0
@property
def deltaY(self):
if self._lastY != None:
return self._y - self._lastY
return 0
@property
def buttons(self):
return self._buttons
class KeyEvent(Event):
def __init__(self, type, key):
super().__init__(type)
self._key = key
@property
def key(self):
return self._key
class SceneChangeEvent(Event):
def __init__(self):
super().__init__(self.SceneChangeEvent)
class ToolEvent(Event):
def __init__(self, type):
super().__init__(type)
|
|
b854548b0eca1b90395e257e2878b935846f6d0e
|
components/lie_graph/lie_graph/graph_io/io_json_format.py
|
components/lie_graph/lie_graph/graph_io/io_json_format.py
|
# -*- coding: utf-8 -*-
"""
file: io_json_format.py
Functions for exporting and importing graphs in structured JSON format
"""
import logging
import json
from lie_graph.graph_io.io_helpers import open_anything
from lie_graph.graph_io.io_dict_format import read_dict, write_dict
def read_json(json_file, graph=None, node_key_tag=None, edge_key_tag=None, valuestring='value'):
"""
Parse (hierarchical) JSON data structure to a graph
:param json_file: json data to parse
:type json_file: File, string, stream or URL
:param graph: Graph object to import dictionary data in
:type graph: :lie_graph:Graph
:param node_key_tag: Data key to use for parsed node labels.
:type node_key_tag: :py:str
:param edge_key_tag: Data key to use for parsed edge labels.
:type edge_key_tag: :py:str
:param valuestring: Data key to use for dictionary values.
:type valuestring: :py:str
:return: GraphAxis object
:rtype: :lie_graph:GraphAxis
"""
# Try parsing the string using default Python json parser
json_file = open_anything(json_file)
try:
json_file = json.load(json_file)
except IOError:
logging.error('Unable to decode JSON string')
return
return read_dict(json_file, graph=graph, node_key_tag=node_key_tag, edge_key_tag=edge_key_tag,
valuestring=valuestring)
def write_json(graph, keystring=None, valuestring=None, default=None, root_nid=None, include_root=False):
"""
Export a graph to a (nested) JSON structure
Convert graph representation of the dictionary tree into JSON
using a nested or flattened representation of the dictionary hierarchy.
Dictionary keys and values are obtained from the node attributes using
`keystring` and `valuestring`. The keystring is set to graph node_key_tag
by default.
:param graph: Graph object to export
:type graph: :lie_graph:GraphAxis
:param keystring: key used to identify dictionary 'key' in node
attributes
:type keystring: :py:str
:param valuestring: key used to identify dictionary 'value' in node
attributes
:type valuestring: :py:str
:param default: value to use when node value was not found using
valuestring.
:type default: mixed
:param include_root: Include the root node in the hierarchy
:type include_root: :py:bool
:param root_nid: Root node ID in graph hierarchy
:rtype: :py:json
"""
return json.dumps(write_dict(graph, keystring=keystring, valuestring=valuestring, default=default,
root_nid=root_nid, include_root=include_root))
|
Add importer and exporter for JSON files
|
Add importer and exporter for JSON files
Wrapper around read_dict and write_dict
|
Python
|
apache-2.0
|
MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio,MD-Studio/MDStudio
|
Add importer and exporter for JSON files
Wrapper around read_dict and write_dict
|
# -*- coding: utf-8 -*-
"""
file: io_json_format.py
Functions for exporting and importing graphs in structured JSON format
"""
import logging
import json
from lie_graph.graph_io.io_helpers import open_anything
from lie_graph.graph_io.io_dict_format import read_dict, write_dict
def read_json(json_file, graph=None, node_key_tag=None, edge_key_tag=None, valuestring='value'):
"""
Parse (hierarchical) JSON data structure to a graph
:param json_file: json data to parse
:type json_file: File, string, stream or URL
:param graph: Graph object to import dictionary data in
:type graph: :lie_graph:Graph
:param node_key_tag: Data key to use for parsed node labels.
:type node_key_tag: :py:str
:param edge_key_tag: Data key to use for parsed edge labels.
:type edge_key_tag: :py:str
:param valuestring: Data key to use for dictionary values.
:type valuestring: :py:str
:return: GraphAxis object
:rtype: :lie_graph:GraphAxis
"""
# Try parsing the string using default Python json parser
json_file = open_anything(json_file)
try:
json_file = json.load(json_file)
except IOError:
logging.error('Unable to decode JSON string')
return
return read_dict(json_file, graph=graph, node_key_tag=node_key_tag, edge_key_tag=edge_key_tag,
valuestring=valuestring)
def write_json(graph, keystring=None, valuestring=None, default=None, root_nid=None, include_root=False):
"""
Export a graph to a (nested) JSON structure
Convert graph representation of the dictionary tree into JSON
using a nested or flattened representation of the dictionary hierarchy.
Dictionary keys and values are obtained from the node attributes using
`keystring` and `valuestring`. The keystring is set to graph node_key_tag
by default.
:param graph: Graph object to export
:type graph: :lie_graph:GraphAxis
:param keystring: key used to identify dictionary 'key' in node
attributes
:type keystring: :py:str
:param valuestring: key used to identify dictionary 'value' in node
attributes
:type valuestring: :py:str
:param default: value to use when node value was not found using
valuestring.
:type default: mixed
:param include_root: Include the root node in the hierarchy
:type include_root: :py:bool
:param root_nid: Root node ID in graph hierarchy
:rtype: :py:json
"""
return json.dumps(write_dict(graph, keystring=keystring, valuestring=valuestring, default=default,
root_nid=root_nid, include_root=include_root))
|
<commit_before><commit_msg>Add importer and exporter for JSON files
Wrapper around read_dict and write_dict<commit_after>
|
# -*- coding: utf-8 -*-
"""
file: io_json_format.py
Functions for exporting and importing graphs in structured JSON format
"""
import logging
import json
from lie_graph.graph_io.io_helpers import open_anything
from lie_graph.graph_io.io_dict_format import read_dict, write_dict
def read_json(json_file, graph=None, node_key_tag=None, edge_key_tag=None, valuestring='value'):
"""
Parse (hierarchical) JSON data structure to a graph
:param json_file: json data to parse
:type json_file: File, string, stream or URL
:param graph: Graph object to import dictionary data in
:type graph: :lie_graph:Graph
:param node_key_tag: Data key to use for parsed node labels.
:type node_key_tag: :py:str
:param edge_key_tag: Data key to use for parsed edge labels.
:type edge_key_tag: :py:str
:param valuestring: Data key to use for dictionary values.
:type valuestring: :py:str
:return: GraphAxis object
:rtype: :lie_graph:GraphAxis
"""
# Try parsing the string using default Python json parser
json_file = open_anything(json_file)
try:
json_file = json.load(json_file)
except IOError:
logging.error('Unable to decode JSON string')
return
return read_dict(json_file, graph=graph, node_key_tag=node_key_tag, edge_key_tag=edge_key_tag,
valuestring=valuestring)
def write_json(graph, keystring=None, valuestring=None, default=None, root_nid=None, include_root=False):
"""
Export a graph to a (nested) JSON structure
Convert graph representation of the dictionary tree into JSON
using a nested or flattened representation of the dictionary hierarchy.
Dictionary keys and values are obtained from the node attributes using
`keystring` and `valuestring`. The keystring is set to graph node_key_tag
by default.
:param graph: Graph object to export
:type graph: :lie_graph:GraphAxis
:param keystring: key used to identify dictionary 'key' in node
attributes
:type keystring: :py:str
:param valuestring: key used to identify dictionary 'value' in node
attributes
:type valuestring: :py:str
:param default: value to use when node value was not found using
valuestring.
:type default: mixed
:param include_root: Include the root node in the hierarchy
:type include_root: :py:bool
:param root_nid: Root node ID in graph hierarchy
:rtype: :py:json
"""
return json.dumps(write_dict(graph, keystring=keystring, valuestring=valuestring, default=default,
root_nid=root_nid, include_root=include_root))
|
Add importer and exporter for JSON files
Wrapper around read_dict and write_dict# -*- coding: utf-8 -*-
"""
file: io_json_format.py
Functions for exporting and importing graphs in structured JSON format
"""
import logging
import json
from lie_graph.graph_io.io_helpers import open_anything
from lie_graph.graph_io.io_dict_format import read_dict, write_dict
def read_json(json_file, graph=None, node_key_tag=None, edge_key_tag=None, valuestring='value'):
"""
Parse (hierarchical) JSON data structure to a graph
:param json_file: json data to parse
:type json_file: File, string, stream or URL
:param graph: Graph object to import dictionary data in
:type graph: :lie_graph:Graph
:param node_key_tag: Data key to use for parsed node labels.
:type node_key_tag: :py:str
:param edge_key_tag: Data key to use for parsed edge labels.
:type edge_key_tag: :py:str
:param valuestring: Data key to use for dictionary values.
:type valuestring: :py:str
:return: GraphAxis object
:rtype: :lie_graph:GraphAxis
"""
# Try parsing the string using default Python json parser
json_file = open_anything(json_file)
try:
json_file = json.load(json_file)
except IOError:
logging.error('Unable to decode JSON string')
return
return read_dict(json_file, graph=graph, node_key_tag=node_key_tag, edge_key_tag=edge_key_tag,
valuestring=valuestring)
def write_json(graph, keystring=None, valuestring=None, default=None, root_nid=None, include_root=False):
"""
Export a graph to a (nested) JSON structure
Convert graph representation of the dictionary tree into JSON
using a nested or flattened representation of the dictionary hierarchy.
Dictionary keys and values are obtained from the node attributes using
`keystring` and `valuestring`. The keystring is set to graph node_key_tag
by default.
:param graph: Graph object to export
:type graph: :lie_graph:GraphAxis
:param keystring: key used to identify dictionary 'key' in node
attributes
:type keystring: :py:str
:param valuestring: key used to identify dictionary 'value' in node
attributes
:type valuestring: :py:str
:param default: value to use when node value was not found using
valuestring.
:type default: mixed
:param include_root: Include the root node in the hierarchy
:type include_root: :py:bool
:param root_nid: Root node ID in graph hierarchy
:rtype: :py:json
"""
return json.dumps(write_dict(graph, keystring=keystring, valuestring=valuestring, default=default,
root_nid=root_nid, include_root=include_root))
|
<commit_before><commit_msg>Add importer and exporter for JSON files
Wrapper around read_dict and write_dict<commit_after># -*- coding: utf-8 -*-
"""
file: io_json_format.py
Functions for exporting and importing graphs in structured JSON format
"""
import logging
import json
from lie_graph.graph_io.io_helpers import open_anything
from lie_graph.graph_io.io_dict_format import read_dict, write_dict
def read_json(json_file, graph=None, node_key_tag=None, edge_key_tag=None, valuestring='value'):
"""
Parse (hierarchical) JSON data structure to a graph
:param json_file: json data to parse
:type json_file: File, string, stream or URL
:param graph: Graph object to import dictionary data in
:type graph: :lie_graph:Graph
:param node_key_tag: Data key to use for parsed node labels.
:type node_key_tag: :py:str
:param edge_key_tag: Data key to use for parsed edge labels.
:type edge_key_tag: :py:str
:param valuestring: Data key to use for dictionary values.
:type valuestring: :py:str
:return: GraphAxis object
:rtype: :lie_graph:GraphAxis
"""
# Try parsing the string using default Python json parser
json_file = open_anything(json_file)
try:
json_file = json.load(json_file)
except IOError:
logging.error('Unable to decode JSON string')
return
return read_dict(json_file, graph=graph, node_key_tag=node_key_tag, edge_key_tag=edge_key_tag,
valuestring=valuestring)
def write_json(graph, keystring=None, valuestring=None, default=None, root_nid=None, include_root=False):
"""
Export a graph to a (nested) JSON structure
Convert graph representation of the dictionary tree into JSON
using a nested or flattened representation of the dictionary hierarchy.
Dictionary keys and values are obtained from the node attributes using
`keystring` and `valuestring`. The keystring is set to graph node_key_tag
by default.
:param graph: Graph object to export
:type graph: :lie_graph:GraphAxis
:param keystring: key used to identify dictionary 'key' in node
attributes
:type keystring: :py:str
:param valuestring: key used to identify dictionary 'value' in node
attributes
:type valuestring: :py:str
:param default: value to use when node value was not found using
valuestring.
:type default: mixed
:param include_root: Include the root node in the hierarchy
:type include_root: :py:bool
:param root_nid: Root node ID in graph hierarchy
:rtype: :py:json
"""
return json.dumps(write_dict(graph, keystring=keystring, valuestring=valuestring, default=default,
root_nid=root_nid, include_root=include_root))
|
|
eab9ca4d17d67d7c2977d2753c4a2d6be768d1f9
|
kobo/django/auth/migrations/0002_auto_20220203_1511.py
|
kobo/django/auth/migrations/0002_auto_20220203_1511.py
|
# Generated by Django 2.2.24 on 2022-02-03 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo_auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 255 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username'),
),
]
|
Add migration for User model generated by Django 2
|
Add migration for User model generated by Django 2
|
Python
|
lgpl-2.1
|
release-engineering/kobo,release-engineering/kobo,release-engineering/kobo,release-engineering/kobo
|
Add migration for User model generated by Django 2
|
# Generated by Django 2.2.24 on 2022-02-03 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo_auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 255 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add migration for User model generated by Django 2<commit_after>
|
# Generated by Django 2.2.24 on 2022-02-03 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo_auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 255 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username'),
),
]
|
Add migration for User model generated by Django 2# Generated by Django 2.2.24 on 2022-02-03 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo_auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 255 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add migration for User model generated by Django 2<commit_after># Generated by Django 2.2.24 on 2022-02-03 15:11
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kobo_auth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 255 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=255, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], verbose_name='username'),
),
]
|
|
32bbe90127c573cf69c7c9128e6f0e95756974a9
|
test/integration/test_api.py
|
test/integration/test_api.py
|
import unittest
import changeling.api
import changeling.fakes
class TestChangeAPI(unittest.TestCase):
def setUp(self):
self.fake_storage = changeling.fakes.FakeStorage([])
self.api = changeling.api.ChangeAPI(self.fake_storage)
def test_list_zero_changes(self):
self.assertEqual([], list(self.api.list()))
|
Add integration tests for ChangeAPI
|
Add integration tests for ChangeAPI
|
Python
|
apache-2.0
|
bcwaldon/changeling,bcwaldon/changeling
|
Add integration tests for ChangeAPI
|
import unittest
import changeling.api
import changeling.fakes
class TestChangeAPI(unittest.TestCase):
def setUp(self):
self.fake_storage = changeling.fakes.FakeStorage([])
self.api = changeling.api.ChangeAPI(self.fake_storage)
def test_list_zero_changes(self):
self.assertEqual([], list(self.api.list()))
|
<commit_before><commit_msg>Add integration tests for ChangeAPI<commit_after>
|
import unittest
import changeling.api
import changeling.fakes
class TestChangeAPI(unittest.TestCase):
def setUp(self):
self.fake_storage = changeling.fakes.FakeStorage([])
self.api = changeling.api.ChangeAPI(self.fake_storage)
def test_list_zero_changes(self):
self.assertEqual([], list(self.api.list()))
|
Add integration tests for ChangeAPI
import unittest
import changeling.api
import changeling.fakes
class TestChangeAPI(unittest.TestCase):
def setUp(self):
self.fake_storage = changeling.fakes.FakeStorage([])
self.api = changeling.api.ChangeAPI(self.fake_storage)
def test_list_zero_changes(self):
self.assertEqual([], list(self.api.list()))
|
<commit_before><commit_msg>Add integration tests for ChangeAPI<commit_after>
import unittest
import changeling.api
import changeling.fakes
class TestChangeAPI(unittest.TestCase):
def setUp(self):
self.fake_storage = changeling.fakes.FakeStorage([])
self.api = changeling.api.ChangeAPI(self.fake_storage)
def test_list_zero_changes(self):
self.assertEqual([], list(self.api.list()))
|
|
358d97dd3f2628444899fdd0e5021420014b27a3
|
scripts/add_kms_secret.py
|
scripts/add_kms_secret.py
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/secrets/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import getpass
import sys
import boto3
from botocore.exceptions import ClientError
import click
secrets_client = boto3.client("secretsmanager")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
def store_config_key(project_id, label, config_key):
name = f"{project_id}/secrets/{label}/{config_key}"
config_value = getpass.getpass()
try:
resp = secrets_client.create_secret(
Name=name,
Description=f"Config secret populated by {__file__}",
SecretString=config_value,
)
except ClientError as err:
if err.response["Error"]["Code"] == "ResourceExistsException":
resp = secrets_client.put_secret_value(
SecretId=name, SecretString=config_value
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Updated secret {name} -> [secret]")
else:
print(f"Unexpected error from PutSecretValue: {resp}")
sys.exit(1)
else:
raise
else:
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Created secret {name} -> [secret]")
else:
print(f"Unexpected error from CreateSecret: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
Add a script for storing secrets
|
Add a script for storing secrets
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a script for storing secrets
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/secrets/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import getpass
import sys
import boto3
from botocore.exceptions import ClientError
import click
secrets_client = boto3.client("secretsmanager")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
def store_config_key(project_id, label, config_key):
name = f"{project_id}/secrets/{label}/{config_key}"
config_value = getpass.getpass()
try:
resp = secrets_client.create_secret(
Name=name,
Description=f"Config secret populated by {__file__}",
SecretString=config_value,
)
except ClientError as err:
if err.response["Error"]["Code"] == "ResourceExistsException":
resp = secrets_client.put_secret_value(
SecretId=name, SecretString=config_value
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Updated secret {name} -> [secret]")
else:
print(f"Unexpected error from PutSecretValue: {resp}")
sys.exit(1)
else:
raise
else:
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Created secret {name} -> [secret]")
else:
print(f"Unexpected error from CreateSecret: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
<commit_before><commit_msg>Add a script for storing secrets<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/secrets/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import getpass
import sys
import boto3
from botocore.exceptions import ClientError
import click
secrets_client = boto3.client("secretsmanager")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
def store_config_key(project_id, label, config_key):
name = f"{project_id}/secrets/{label}/{config_key}"
config_value = getpass.getpass()
try:
resp = secrets_client.create_secret(
Name=name,
Description=f"Config secret populated by {__file__}",
SecretString=config_value,
)
except ClientError as err:
if err.response["Error"]["Code"] == "ResourceExistsException":
resp = secrets_client.put_secret_value(
SecretId=name, SecretString=config_value
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Updated secret {name} -> [secret]")
else:
print(f"Unexpected error from PutSecretValue: {resp}")
sys.exit(1)
else:
raise
else:
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Created secret {name} -> [secret]")
else:
print(f"Unexpected error from CreateSecret: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
Add a script for storing secrets#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/secrets/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import getpass
import sys
import boto3
from botocore.exceptions import ClientError
import click
secrets_client = boto3.client("secretsmanager")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
def store_config_key(project_id, label, config_key):
name = f"{project_id}/secrets/{label}/{config_key}"
config_value = getpass.getpass()
try:
resp = secrets_client.create_secret(
Name=name,
Description=f"Config secret populated by {__file__}",
SecretString=config_value,
)
except ClientError as err:
if err.response["Error"]["Code"] == "ResourceExistsException":
resp = secrets_client.put_secret_value(
SecretId=name, SecretString=config_value
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Updated secret {name} -> [secret]")
else:
print(f"Unexpected error from PutSecretValue: {resp}")
sys.exit(1)
else:
raise
else:
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Created secret {name} -> [secret]")
else:
print(f"Unexpected error from CreateSecret: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
<commit_before><commit_msg>Add a script for storing secrets<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8
"""
Store a config variable in SSM under the key structure
/{project_id}/secrets/{label}/{config_key}
This script can store a regular config key (unencrypted) or an encrypted key.
"""
import getpass
import sys
import boto3
from botocore.exceptions import ClientError
import click
secrets_client = boto3.client("secretsmanager")
@click.command()
@click.option("--project_id", prompt="What is the project ID?", required=True)
@click.option("--label", default="prod", required=True)
@click.option("--config_key", prompt="What is the config key?", required=True)
def store_config_key(project_id, label, config_key):
name = f"{project_id}/secrets/{label}/{config_key}"
config_value = getpass.getpass()
try:
resp = secrets_client.create_secret(
Name=name,
Description=f"Config secret populated by {__file__}",
SecretString=config_value,
)
except ClientError as err:
if err.response["Error"]["Code"] == "ResourceExistsException":
resp = secrets_client.put_secret_value(
SecretId=name, SecretString=config_value
)
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Updated secret {name} -> [secret]")
else:
print(f"Unexpected error from PutSecretValue: {resp}")
sys.exit(1)
else:
raise
else:
if resp["ResponseMetadata"]["HTTPStatusCode"] == 200:
print(f"Created secret {name} -> [secret]")
else:
print(f"Unexpected error from CreateSecret: {resp}")
sys.exit(1)
if __name__ == "__main__":
store_config_key()
|
|
94e60ae8057795363c19f69d46c5890372cfa7dd
|
singleSimulation.py
|
singleSimulation.py
|
import os
import sys
import numpy as np
from scripts.Dqn import Learner
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
sumoBinary = "/usr/bin/sumo-gui"
sumoConfig = "data/bangalore.sumo.cfg"
import traci
from scripts.auxilliary import makemap
def get_state(detectorIDs):
state = []
for detector in detectorIDs:
speed = traci.inductionloop.getLastStepMeanSpeed(detector)
state.append(speed)
for detector in detectorIDs:
veh_num = traci.inductionloop.getLastStepVehicleNumber(detector)
state.append(veh_num)
state = np.array(state)
state = state.reshape((1, state.shape[0]))
return state
def calc_reward(state, next_state):
rew = 0
lstate = list(state)[0]
lnext_state = list(next_state)[0]
for ind, (det_old, det_new) in enumerate(zip(lstate, lnext_state)):
if ind < len(lstate)/2:
rew += 1000*(det_new - det_old)
else:
rew += 1000*(det_old - det_new)
return rew
def main():
# Control code here
sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
traci.start(sumoCmd)
TLIds = traci.trafficlights.getIDList()
actionsMap = makemap(TLIds)
detectorIDs = traci.inductionloop.getIDList()
state_space_size = traci.inductionloop.getIDCount()*2
action_space_size = len(actionsMap)
agent = Learner(state_space_size, action_space_size, 0.0)
agent.load("./save/traffic.h5")
# Get number of induction loops
state = get_state(detectorIDs)
total_reward = 0
simulationSteps = 0
while simulationSteps < 1000:
action = agent.act(state)
lightsPhase = actionsMap[action]
for light, index in zip(TLIds, range(len(TLIds))):
traci.trafficlights.setPhase(light, lightsPhase[index])
for i in range(2):
traci.simulationStep()
time.sleep(0.4)
simulationSteps += 2
next_state = get_state(detectorIDs)
reward = calc_reward(state, next_state)
total_reward += reward
agent.remember(state, action, reward, next_state)
state = next_state
traci.close()
print "Simulation Reward: {}".format(total_reward)
if __name__ == '__main__':
main()
|
Add program to run single simulation
|
Add program to run single simulation
|
Python
|
mit
|
Wert1996/Traffic-Optimisation
|
Add program to run single simulation
|
import os
import sys
import numpy as np
from scripts.Dqn import Learner
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
sumoBinary = "/usr/bin/sumo-gui"
sumoConfig = "data/bangalore.sumo.cfg"
import traci
from scripts.auxilliary import makemap
def get_state(detectorIDs):
state = []
for detector in detectorIDs:
speed = traci.inductionloop.getLastStepMeanSpeed(detector)
state.append(speed)
for detector in detectorIDs:
veh_num = traci.inductionloop.getLastStepVehicleNumber(detector)
state.append(veh_num)
state = np.array(state)
state = state.reshape((1, state.shape[0]))
return state
def calc_reward(state, next_state):
rew = 0
lstate = list(state)[0]
lnext_state = list(next_state)[0]
for ind, (det_old, det_new) in enumerate(zip(lstate, lnext_state)):
if ind < len(lstate)/2:
rew += 1000*(det_new - det_old)
else:
rew += 1000*(det_old - det_new)
return rew
def main():
# Control code here
sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
traci.start(sumoCmd)
TLIds = traci.trafficlights.getIDList()
actionsMap = makemap(TLIds)
detectorIDs = traci.inductionloop.getIDList()
state_space_size = traci.inductionloop.getIDCount()*2
action_space_size = len(actionsMap)
agent = Learner(state_space_size, action_space_size, 0.0)
agent.load("./save/traffic.h5")
# Get number of induction loops
state = get_state(detectorIDs)
total_reward = 0
simulationSteps = 0
while simulationSteps < 1000:
action = agent.act(state)
lightsPhase = actionsMap[action]
for light, index in zip(TLIds, range(len(TLIds))):
traci.trafficlights.setPhase(light, lightsPhase[index])
for i in range(2):
traci.simulationStep()
time.sleep(0.4)
simulationSteps += 2
next_state = get_state(detectorIDs)
reward = calc_reward(state, next_state)
total_reward += reward
agent.remember(state, action, reward, next_state)
state = next_state
traci.close()
print "Simulation Reward: {}".format(total_reward)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add program to run single simulation<commit_after>
|
import os
import sys
import numpy as np
from scripts.Dqn import Learner
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
sumoBinary = "/usr/bin/sumo-gui"
sumoConfig = "data/bangalore.sumo.cfg"
import traci
from scripts.auxilliary import makemap
def get_state(detectorIDs):
state = []
for detector in detectorIDs:
speed = traci.inductionloop.getLastStepMeanSpeed(detector)
state.append(speed)
for detector in detectorIDs:
veh_num = traci.inductionloop.getLastStepVehicleNumber(detector)
state.append(veh_num)
state = np.array(state)
state = state.reshape((1, state.shape[0]))
return state
def calc_reward(state, next_state):
rew = 0
lstate = list(state)[0]
lnext_state = list(next_state)[0]
for ind, (det_old, det_new) in enumerate(zip(lstate, lnext_state)):
if ind < len(lstate)/2:
rew += 1000*(det_new - det_old)
else:
rew += 1000*(det_old - det_new)
return rew
def main():
# Control code here
sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
traci.start(sumoCmd)
TLIds = traci.trafficlights.getIDList()
actionsMap = makemap(TLIds)
detectorIDs = traci.inductionloop.getIDList()
state_space_size = traci.inductionloop.getIDCount()*2
action_space_size = len(actionsMap)
agent = Learner(state_space_size, action_space_size, 0.0)
agent.load("./save/traffic.h5")
# Get number of induction loops
state = get_state(detectorIDs)
total_reward = 0
simulationSteps = 0
while simulationSteps < 1000:
action = agent.act(state)
lightsPhase = actionsMap[action]
for light, index in zip(TLIds, range(len(TLIds))):
traci.trafficlights.setPhase(light, lightsPhase[index])
for i in range(2):
traci.simulationStep()
time.sleep(0.4)
simulationSteps += 2
next_state = get_state(detectorIDs)
reward = calc_reward(state, next_state)
total_reward += reward
agent.remember(state, action, reward, next_state)
state = next_state
traci.close()
print "Simulation Reward: {}".format(total_reward)
if __name__ == '__main__':
main()
|
Add program to run single simulationimport os
import sys
import numpy as np
from scripts.Dqn import Learner
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
sumoBinary = "/usr/bin/sumo-gui"
sumoConfig = "data/bangalore.sumo.cfg"
import traci
from scripts.auxilliary import makemap
def get_state(detectorIDs):
state = []
for detector in detectorIDs:
speed = traci.inductionloop.getLastStepMeanSpeed(detector)
state.append(speed)
for detector in detectorIDs:
veh_num = traci.inductionloop.getLastStepVehicleNumber(detector)
state.append(veh_num)
state = np.array(state)
state = state.reshape((1, state.shape[0]))
return state
def calc_reward(state, next_state):
rew = 0
lstate = list(state)[0]
lnext_state = list(next_state)[0]
for ind, (det_old, det_new) in enumerate(zip(lstate, lnext_state)):
if ind < len(lstate)/2:
rew += 1000*(det_new - det_old)
else:
rew += 1000*(det_old - det_new)
return rew
def main():
# Control code here
sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
traci.start(sumoCmd)
TLIds = traci.trafficlights.getIDList()
actionsMap = makemap(TLIds)
detectorIDs = traci.inductionloop.getIDList()
state_space_size = traci.inductionloop.getIDCount()*2
action_space_size = len(actionsMap)
agent = Learner(state_space_size, action_space_size, 0.0)
agent.load("./save/traffic.h5")
# Get number of induction loops
state = get_state(detectorIDs)
total_reward = 0
simulationSteps = 0
while simulationSteps < 1000:
action = agent.act(state)
lightsPhase = actionsMap[action]
for light, index in zip(TLIds, range(len(TLIds))):
traci.trafficlights.setPhase(light, lightsPhase[index])
for i in range(2):
traci.simulationStep()
time.sleep(0.4)
simulationSteps += 2
next_state = get_state(detectorIDs)
reward = calc_reward(state, next_state)
total_reward += reward
agent.remember(state, action, reward, next_state)
state = next_state
traci.close()
print "Simulation Reward: {}".format(total_reward)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add program to run single simulation<commit_after>import os
import sys
import numpy as np
from scripts.Dqn import Learner
import time
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
sumoBinary = "/usr/bin/sumo-gui"
sumoConfig = "data/bangalore.sumo.cfg"
import traci
from scripts.auxilliary import makemap
def get_state(detectorIDs):
state = []
for detector in detectorIDs:
speed = traci.inductionloop.getLastStepMeanSpeed(detector)
state.append(speed)
for detector in detectorIDs:
veh_num = traci.inductionloop.getLastStepVehicleNumber(detector)
state.append(veh_num)
state = np.array(state)
state = state.reshape((1, state.shape[0]))
return state
def calc_reward(state, next_state):
rew = 0
lstate = list(state)[0]
lnext_state = list(next_state)[0]
for ind, (det_old, det_new) in enumerate(zip(lstate, lnext_state)):
if ind < len(lstate)/2:
rew += 1000*(det_new - det_old)
else:
rew += 1000*(det_old - det_new)
return rew
def main():
# Control code here
sumoCmd = [sumoBinary, "-c", sumoConfig, "--start"]
traci.start(sumoCmd)
TLIds = traci.trafficlights.getIDList()
actionsMap = makemap(TLIds)
detectorIDs = traci.inductionloop.getIDList()
state_space_size = traci.inductionloop.getIDCount()*2
action_space_size = len(actionsMap)
agent = Learner(state_space_size, action_space_size, 0.0)
agent.load("./save/traffic.h5")
# Get number of induction loops
state = get_state(detectorIDs)
total_reward = 0
simulationSteps = 0
while simulationSteps < 1000:
action = agent.act(state)
lightsPhase = actionsMap[action]
for light, index in zip(TLIds, range(len(TLIds))):
traci.trafficlights.setPhase(light, lightsPhase[index])
for i in range(2):
traci.simulationStep()
time.sleep(0.4)
simulationSteps += 2
next_state = get_state(detectorIDs)
reward = calc_reward(state, next_state)
total_reward += reward
agent.remember(state, action, reward, next_state)
state = next_state
traci.close()
print "Simulation Reward: {}".format(total_reward)
if __name__ == '__main__':
main()
|
|
2b68847ceb2a440aff89d23479b1b69a74cd6e6b
|
tools/visual/show_video_matfile_detection.py
|
tools/visual/show_video_matfile_detection.py
|
#!/usr/bin/env python
import argparse
import os
import sys
import cv2
from vdetlib.vdet.dataset import imagenet_vdet_class_idx
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_load, proto_dump, frame_path_at, frame_top_detections
from vdetlib.utils.visual import add_bbox
from vdetlib.utils.cython_nms import nms
import scipy.io as sio
import glob
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_root')
parser.add_argument('--cls', choices=imagenet_vdet_class_idx.keys())
parser.add_argument('--save_dir', default=None)
parser.add_argument('--top_k', default=10)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_root, '*.mat'))
if args.save_dir and not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
cls_index = imagenet_vdet_class_idx[args.cls]
for frame, det_file in zip(vid_proto['frames'], det_files):
det = sio.loadmat(det_file)
frame_idx = frame['frame']
img = imread(frame_path_at(vid_proto, frame_idx))
boxes = det['boxes'][:,cls_index,:]
scores = det['zs'][:,cls_index]
keep = nms(np.hstack((boxes,scores[:,np.newaxis])), 0.3)
det_img = add_bbox(img, [boxes[i,:] for i in keep])
cv2.imshow('detection', det_img)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
cv2.destroyAllWindows()
if args.save_dir:
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame_idx)), det_img)
|
Add a script to visualize detection matfiles.
|
Add a script to visualize detection matfiles.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to visualize detection matfiles.
|
#!/usr/bin/env python
import argparse
import os
import sys
import cv2
from vdetlib.vdet.dataset import imagenet_vdet_class_idx
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_load, proto_dump, frame_path_at, frame_top_detections
from vdetlib.utils.visual import add_bbox
from vdetlib.utils.cython_nms import nms
import scipy.io as sio
import glob
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_root')
parser.add_argument('--cls', choices=imagenet_vdet_class_idx.keys())
parser.add_argument('--save_dir', default=None)
parser.add_argument('--top_k', default=10)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_root, '*.mat'))
if args.save_dir and not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
cls_index = imagenet_vdet_class_idx[args.cls]
for frame, det_file in zip(vid_proto['frames'], det_files):
det = sio.loadmat(det_file)
frame_idx = frame['frame']
img = imread(frame_path_at(vid_proto, frame_idx))
boxes = det['boxes'][:,cls_index,:]
scores = det['zs'][:,cls_index]
keep = nms(np.hstack((boxes,scores[:,np.newaxis])), 0.3)
det_img = add_bbox(img, [boxes[i,:] for i in keep])
cv2.imshow('detection', det_img)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
cv2.destroyAllWindows()
if args.save_dir:
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame_idx)), det_img)
|
<commit_before><commit_msg>Add a script to visualize detection matfiles.<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import sys
import cv2
from vdetlib.vdet.dataset import imagenet_vdet_class_idx
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_load, proto_dump, frame_path_at, frame_top_detections
from vdetlib.utils.visual import add_bbox
from vdetlib.utils.cython_nms import nms
import scipy.io as sio
import glob
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_root')
parser.add_argument('--cls', choices=imagenet_vdet_class_idx.keys())
parser.add_argument('--save_dir', default=None)
parser.add_argument('--top_k', default=10)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_root, '*.mat'))
if args.save_dir and not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
cls_index = imagenet_vdet_class_idx[args.cls]
for frame, det_file in zip(vid_proto['frames'], det_files):
det = sio.loadmat(det_file)
frame_idx = frame['frame']
img = imread(frame_path_at(vid_proto, frame_idx))
boxes = det['boxes'][:,cls_index,:]
scores = det['zs'][:,cls_index]
keep = nms(np.hstack((boxes,scores[:,np.newaxis])), 0.3)
det_img = add_bbox(img, [boxes[i,:] for i in keep])
cv2.imshow('detection', det_img)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
cv2.destroyAllWindows()
if args.save_dir:
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame_idx)), det_img)
|
Add a script to visualize detection matfiles.#!/usr/bin/env python
import argparse
import os
import sys
import cv2
from vdetlib.vdet.dataset import imagenet_vdet_class_idx
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_load, proto_dump, frame_path_at, frame_top_detections
from vdetlib.utils.visual import add_bbox
from vdetlib.utils.cython_nms import nms
import scipy.io as sio
import glob
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_root')
parser.add_argument('--cls', choices=imagenet_vdet_class_idx.keys())
parser.add_argument('--save_dir', default=None)
parser.add_argument('--top_k', default=10)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_root, '*.mat'))
if args.save_dir and not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
cls_index = imagenet_vdet_class_idx[args.cls]
for frame, det_file in zip(vid_proto['frames'], det_files):
det = sio.loadmat(det_file)
frame_idx = frame['frame']
img = imread(frame_path_at(vid_proto, frame_idx))
boxes = det['boxes'][:,cls_index,:]
scores = det['zs'][:,cls_index]
keep = nms(np.hstack((boxes,scores[:,np.newaxis])), 0.3)
det_img = add_bbox(img, [boxes[i,:] for i in keep])
cv2.imshow('detection', det_img)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
cv2.destroyAllWindows()
if args.save_dir:
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame_idx)), det_img)
|
<commit_before><commit_msg>Add a script to visualize detection matfiles.<commit_after>#!/usr/bin/env python
import argparse
import os
import sys
import cv2
from vdetlib.vdet.dataset import imagenet_vdet_class_idx
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_load, proto_dump, frame_path_at, frame_top_detections
from vdetlib.utils.visual import add_bbox
from vdetlib.utils.cython_nms import nms
import scipy.io as sio
import glob
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('det_root')
parser.add_argument('--cls', choices=imagenet_vdet_class_idx.keys())
parser.add_argument('--save_dir', default=None)
parser.add_argument('--top_k', default=10)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
det_files = glob.glob(os.path.join(args.det_root, '*.mat'))
if args.save_dir and not os.path.isdir(args.save_dir):
os.mkdir(args.save_dir)
cls_index = imagenet_vdet_class_idx[args.cls]
for frame, det_file in zip(vid_proto['frames'], det_files):
det = sio.loadmat(det_file)
frame_idx = frame['frame']
img = imread(frame_path_at(vid_proto, frame_idx))
boxes = det['boxes'][:,cls_index,:]
scores = det['zs'][:,cls_index]
keep = nms(np.hstack((boxes,scores[:,np.newaxis])), 0.3)
det_img = add_bbox(img, [boxes[i,:] for i in keep])
cv2.imshow('detection', det_img)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
cv2.destroyAllWindows()
if args.save_dir:
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame_idx)), det_img)
|
|
a3638f641098b1e713492d1a5fd832c8f9c3da5d
|
resolwe/flow/migrations/0005_duplicate_data_dependency.py
|
resolwe/flow/migrations/0005_duplicate_data_dependency.py
|
# Generated by Django 3.1.7 on 2021-10-12 10:39
from django.db import migrations, models
def create_duplicate_dependencies(apps, schema_editor):
Data = apps.get_model("flow", "Data")
DataDependency = apps.get_model("flow", "DataDependency")
duplicates = Data.objects.filter(duplicated__isnull=False)
duplicates_without_relation = duplicates.exclude(
parents_dependency__kind="duplicate"
).annotate(
parent_id=models.Subquery(
Data.objects.filter(
location_id=models.OuterRef("location_id"), duplicated__isnull=True
).values("id")
)
)
DataDependency.objects.bulk_create(
DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate)
for duplicate in duplicates_without_relation
)
class Migration(migrations.Migration):
dependencies = [
("flow", "0004_data_process_resources"),
]
operations = [
migrations.RunPython(create_duplicate_dependencies),
]
|
Add missing DataDependency objects for duplicates
|
Add missing DataDependency objects for duplicates
|
Python
|
apache-2.0
|
genialis/resolwe,genialis/resolwe
|
Add missing DataDependency objects for duplicates
|
# Generated by Django 3.1.7 on 2021-10-12 10:39
from django.db import migrations, models
def create_duplicate_dependencies(apps, schema_editor):
Data = apps.get_model("flow", "Data")
DataDependency = apps.get_model("flow", "DataDependency")
duplicates = Data.objects.filter(duplicated__isnull=False)
duplicates_without_relation = duplicates.exclude(
parents_dependency__kind="duplicate"
).annotate(
parent_id=models.Subquery(
Data.objects.filter(
location_id=models.OuterRef("location_id"), duplicated__isnull=True
).values("id")
)
)
DataDependency.objects.bulk_create(
DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate)
for duplicate in duplicates_without_relation
)
class Migration(migrations.Migration):
dependencies = [
("flow", "0004_data_process_resources"),
]
operations = [
migrations.RunPython(create_duplicate_dependencies),
]
|
<commit_before><commit_msg>Add missing DataDependency objects for duplicates<commit_after>
|
# Generated by Django 3.1.7 on 2021-10-12 10:39
from django.db import migrations, models
def create_duplicate_dependencies(apps, schema_editor):
Data = apps.get_model("flow", "Data")
DataDependency = apps.get_model("flow", "DataDependency")
duplicates = Data.objects.filter(duplicated__isnull=False)
duplicates_without_relation = duplicates.exclude(
parents_dependency__kind="duplicate"
).annotate(
parent_id=models.Subquery(
Data.objects.filter(
location_id=models.OuterRef("location_id"), duplicated__isnull=True
).values("id")
)
)
DataDependency.objects.bulk_create(
DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate)
for duplicate in duplicates_without_relation
)
class Migration(migrations.Migration):
dependencies = [
("flow", "0004_data_process_resources"),
]
operations = [
migrations.RunPython(create_duplicate_dependencies),
]
|
Add missing DataDependency objects for duplicates# Generated by Django 3.1.7 on 2021-10-12 10:39
from django.db import migrations, models
def create_duplicate_dependencies(apps, schema_editor):
Data = apps.get_model("flow", "Data")
DataDependency = apps.get_model("flow", "DataDependency")
duplicates = Data.objects.filter(duplicated__isnull=False)
duplicates_without_relation = duplicates.exclude(
parents_dependency__kind="duplicate"
).annotate(
parent_id=models.Subquery(
Data.objects.filter(
location_id=models.OuterRef("location_id"), duplicated__isnull=True
).values("id")
)
)
DataDependency.objects.bulk_create(
DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate)
for duplicate in duplicates_without_relation
)
class Migration(migrations.Migration):
dependencies = [
("flow", "0004_data_process_resources"),
]
operations = [
migrations.RunPython(create_duplicate_dependencies),
]
|
<commit_before><commit_msg>Add missing DataDependency objects for duplicates<commit_after># Generated by Django 3.1.7 on 2021-10-12 10:39
from django.db import migrations, models
def create_duplicate_dependencies(apps, schema_editor):
Data = apps.get_model("flow", "Data")
DataDependency = apps.get_model("flow", "DataDependency")
duplicates = Data.objects.filter(duplicated__isnull=False)
duplicates_without_relation = duplicates.exclude(
parents_dependency__kind="duplicate"
).annotate(
parent_id=models.Subquery(
Data.objects.filter(
location_id=models.OuterRef("location_id"), duplicated__isnull=True
).values("id")
)
)
DataDependency.objects.bulk_create(
DataDependency(kind="duplicate", parent_id=duplicate.parent_id, child=duplicate)
for duplicate in duplicates_without_relation
)
class Migration(migrations.Migration):
dependencies = [
("flow", "0004_data_process_resources"),
]
operations = [
migrations.RunPython(create_duplicate_dependencies),
]
|
|
0a28e4239a7393a9e8e4fa2b57126c7d28ed4bea
|
astrodb/models.py
|
astrodb/models.py
|
"""
An attempt to modelize datas with the "peewee" ORM
You have to install "peewee" from its repository (this code use ManyToManyField that has been documented but omitted from the last release, so waiting for it we will develop using the unstable version): ::
pip install -e git+https://github.com/coleifer/peewee#egg=peewee
For now this is a one shot script, implying you have to remove the database file before re-run it.
Also, the code should be splitted in some modules and not resides anymore in the models.
"""
import os
from peewee import *
from playhouse.shortcuts import ManyToManyField
# Dummy connector using sqlite3 for demo, will have to do it using some user
# settings to use another database type
database_filepath = 'my_app.db'
db = SqliteDatabase(database_filepath)
class BaseModel(Model):
class Meta:
database = db
class Name(BaseModel):
"""
Name entry for stars
"""
name = CharField(max_length=50, unique=True, null=False)
class Star(BaseModel):
"""
Star model
Can have multiple names that are not unique (Many stars can share the same names), so we use a ManyToMany relation
"""
names = ManyToManyField(Name, related_name='stars')
right_ascension = DoubleField(default=0.0, null=False)
declination = DoubleField(default=0.0, null=False)
# Automatic through model for m2m (Many To Many) "name" relation
StarName = Star.names.get_through_model()
class Temperature(BaseModel):
"""
Temperature model
"""
value = IntegerField(null=False)
class RadialVelocity(BaseModel):
"""
Radial velocity model
"""
value = DoubleField(default=0.0, null=False)
observationdate = DoubleField(default=0.0, null=False)
class Abundance(BaseModel):
"""
Abondance model
"""
value = DoubleField(default=0.0, null=False)
carbon = DoubleField(default=0.0, null=False)
oxygen = DoubleField(default=0.0, null=False)
# Connecting to the database and create tables if not allready exists
if not os.path.exists(database_filepath):
print "=== Creating tables ==="
db.connect()
db.create_tables([Name, Star, StarName, Temperature, RadialVelocity, Abundance])
print "Finished!"
|
Add demo for modelizing using peewee ORM
|
Add demo for modelizing using peewee ORM
|
Python
|
mit
|
EricDepagne/Astrodb
|
Add demo for modelizing using peewee ORM
|
"""
An attempt to modelize datas with the "peewee" ORM
You have to install "peewee" from its repository (this code use ManyToManyField that has been documented but omitted from the last release, so waiting for it we will develop using the unstable version): ::
pip install -e git+https://github.com/coleifer/peewee#egg=peewee
For now this is a one shot script, implying you have to remove the database file before re-run it.
Also, the code should be splitted in some modules and not resides anymore in the models.
"""
import os
from peewee import *
from playhouse.shortcuts import ManyToManyField
# Dummy connector using sqlite3 for demo, will have to do it using some user
# settings to use another database type
database_filepath = 'my_app.db'
db = SqliteDatabase(database_filepath)
class BaseModel(Model):
class Meta:
database = db
class Name(BaseModel):
"""
Name entry for stars
"""
name = CharField(max_length=50, unique=True, null=False)
class Star(BaseModel):
"""
Star model
Can have multiple names that are not unique (Many stars can share the same names), so we use a ManyToMany relation
"""
names = ManyToManyField(Name, related_name='stars')
right_ascension = DoubleField(default=0.0, null=False)
declination = DoubleField(default=0.0, null=False)
# Automatic through model for m2m (Many To Many) "name" relation
StarName = Star.names.get_through_model()
class Temperature(BaseModel):
"""
Temperature model
"""
value = IntegerField(null=False)
class RadialVelocity(BaseModel):
"""
Radial velocity model
"""
value = DoubleField(default=0.0, null=False)
observationdate = DoubleField(default=0.0, null=False)
class Abundance(BaseModel):
"""
Abondance model
"""
value = DoubleField(default=0.0, null=False)
carbon = DoubleField(default=0.0, null=False)
oxygen = DoubleField(default=0.0, null=False)
# Connecting to the database and create tables if not allready exists
if not os.path.exists(database_filepath):
print "=== Creating tables ==="
db.connect()
db.create_tables([Name, Star, StarName, Temperature, RadialVelocity, Abundance])
print "Finished!"
|
<commit_before><commit_msg>Add demo for modelizing using peewee ORM<commit_after>
|
"""
An attempt to modelize datas with the "peewee" ORM
You have to install "peewee" from its repository (this code use ManyToManyField that has been documented but omitted from the last release, so waiting for it we will develop using the unstable version): ::
pip install -e git+https://github.com/coleifer/peewee#egg=peewee
For now this is a one shot script, implying you have to remove the database file before re-run it.
Also, the code should be splitted in some modules and not resides anymore in the models.
"""
import os
from peewee import *
from playhouse.shortcuts import ManyToManyField
# Dummy connector using sqlite3 for demo, will have to do it using some user
# settings to use another database type
database_filepath = 'my_app.db'
db = SqliteDatabase(database_filepath)
class BaseModel(Model):
class Meta:
database = db
class Name(BaseModel):
"""
Name entry for stars
"""
name = CharField(max_length=50, unique=True, null=False)
class Star(BaseModel):
"""
Star model
Can have multiple names that are not unique (Many stars can share the same names), so we use a ManyToMany relation
"""
names = ManyToManyField(Name, related_name='stars')
right_ascension = DoubleField(default=0.0, null=False)
declination = DoubleField(default=0.0, null=False)
# Automatic through model for m2m (Many To Many) "name" relation
StarName = Star.names.get_through_model()
class Temperature(BaseModel):
"""
Temperature model
"""
value = IntegerField(null=False)
class RadialVelocity(BaseModel):
"""
Radial velocity model
"""
value = DoubleField(default=0.0, null=False)
observationdate = DoubleField(default=0.0, null=False)
class Abundance(BaseModel):
"""
Abondance model
"""
value = DoubleField(default=0.0, null=False)
carbon = DoubleField(default=0.0, null=False)
oxygen = DoubleField(default=0.0, null=False)
# Connecting to the database and create tables if not allready exists
if not os.path.exists(database_filepath):
print "=== Creating tables ==="
db.connect()
db.create_tables([Name, Star, StarName, Temperature, RadialVelocity, Abundance])
print "Finished!"
|
Add demo for modelizing using peewee ORM"""
An attempt to modelize datas with the "peewee" ORM
You have to install "peewee" from its repository (this code use ManyToManyField that has been documented but omitted from the last release, so waiting for it we will develop using the unstable version): ::
pip install -e git+https://github.com/coleifer/peewee#egg=peewee
For now this is a one shot script, implying you have to remove the database file before re-run it.
Also, the code should be splitted in some modules and not resides anymore in the models.
"""
import os
from peewee import *
from playhouse.shortcuts import ManyToManyField
# Dummy connector using sqlite3 for demo, will have to do it using some user
# settings to use another database type
database_filepath = 'my_app.db'
db = SqliteDatabase(database_filepath)
class BaseModel(Model):
class Meta:
database = db
class Name(BaseModel):
"""
Name entry for stars
"""
name = CharField(max_length=50, unique=True, null=False)
class Star(BaseModel):
"""
Star model
Can have multiple names that are not unique (Many stars can share the same names), so we use a ManyToMany relation
"""
names = ManyToManyField(Name, related_name='stars')
right_ascension = DoubleField(default=0.0, null=False)
declination = DoubleField(default=0.0, null=False)
# Automatic through model for m2m (Many To Many) "name" relation
StarName = Star.names.get_through_model()
class Temperature(BaseModel):
"""
Temperature model
"""
value = IntegerField(null=False)
class RadialVelocity(BaseModel):
"""
Radial velocity model
"""
value = DoubleField(default=0.0, null=False)
observationdate = DoubleField(default=0.0, null=False)
class Abundance(BaseModel):
"""
Abondance model
"""
value = DoubleField(default=0.0, null=False)
carbon = DoubleField(default=0.0, null=False)
oxygen = DoubleField(default=0.0, null=False)
# Connecting to the database and create tables if not allready exists
if not os.path.exists(database_filepath):
print "=== Creating tables ==="
db.connect()
db.create_tables([Name, Star, StarName, Temperature, RadialVelocity, Abundance])
print "Finished!"
|
<commit_before><commit_msg>Add demo for modelizing using peewee ORM<commit_after>"""
An attempt to modelize datas with the "peewee" ORM
You have to install "peewee" from its repository (this code use ManyToManyField that has been documented but omitted from the last release, so waiting for it we will develop using the unstable version): ::
pip install -e git+https://github.com/coleifer/peewee#egg=peewee
For now this is a one shot script, implying you have to remove the database file before re-run it.
Also, the code should be splitted in some modules and not resides anymore in the models.
"""
import os
from peewee import *
from playhouse.shortcuts import ManyToManyField
# Dummy connector using sqlite3 for demo, will have to do it using some user
# settings to use another database type
database_filepath = 'my_app.db'
db = SqliteDatabase(database_filepath)
class BaseModel(Model):
class Meta:
database = db
class Name(BaseModel):
"""
Name entry for stars
"""
name = CharField(max_length=50, unique=True, null=False)
class Star(BaseModel):
"""
Star model
Can have multiple names that are not unique (Many stars can share the same names), so we use a ManyToMany relation
"""
names = ManyToManyField(Name, related_name='stars')
right_ascension = DoubleField(default=0.0, null=False)
declination = DoubleField(default=0.0, null=False)
# Automatic through model for m2m (Many To Many) "name" relation
StarName = Star.names.get_through_model()
class Temperature(BaseModel):
"""
Temperature model
"""
value = IntegerField(null=False)
class RadialVelocity(BaseModel):
"""
Radial velocity model
"""
value = DoubleField(default=0.0, null=False)
observationdate = DoubleField(default=0.0, null=False)
class Abundance(BaseModel):
"""
Abondance model
"""
value = DoubleField(default=0.0, null=False)
carbon = DoubleField(default=0.0, null=False)
oxygen = DoubleField(default=0.0, null=False)
# Connecting to the database and create tables if not allready exists
if not os.path.exists(database_filepath):
print "=== Creating tables ==="
db.connect()
db.create_tables([Name, Star, StarName, Temperature, RadialVelocity, Abundance])
print "Finished!"
|
|
6a36df252e5f7c17cb2dfc63062472c65645ad2f
|
unleashed/api.py
|
unleashed/api.py
|
import requests
from auth import UnleashedAuth
class UnleashedApi(object):
"""
Unleashed API client library.
"""
def __init__(self, api_url, api_id, api_key):
self.api_url = api_url
self.auth = UnleashedAuth(api_id, api_key)
def _get_request(self, method, params=None):
params = params or {}
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.get(
self.api_url + '/' + method,
headers=headers,
params=params,
auth=self.auth
)
return resp
def _post_request(self, method, body):
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.post(
self.api_url + '/' + method,
body,
headers=headers,
auth=self.auth
)
return resp
|
Add basic API which can make GET and POST requests
|
Add basic API which can make GET and POST requests
|
Python
|
mit
|
jsok/unleashed
|
Add basic API which can make GET and POST requests
|
import requests
from auth import UnleashedAuth
class UnleashedApi(object):
"""
Unleashed API client library.
"""
def __init__(self, api_url, api_id, api_key):
self.api_url = api_url
self.auth = UnleashedAuth(api_id, api_key)
def _get_request(self, method, params=None):
params = params or {}
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.get(
self.api_url + '/' + method,
headers=headers,
params=params,
auth=self.auth
)
return resp
def _post_request(self, method, body):
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.post(
self.api_url + '/' + method,
body,
headers=headers,
auth=self.auth
)
return resp
|
<commit_before><commit_msg>Add basic API which can make GET and POST requests<commit_after>
|
import requests
from auth import UnleashedAuth
class UnleashedApi(object):
"""
Unleashed API client library.
"""
def __init__(self, api_url, api_id, api_key):
self.api_url = api_url
self.auth = UnleashedAuth(api_id, api_key)
def _get_request(self, method, params=None):
params = params or {}
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.get(
self.api_url + '/' + method,
headers=headers,
params=params,
auth=self.auth
)
return resp
def _post_request(self, method, body):
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.post(
self.api_url + '/' + method,
body,
headers=headers,
auth=self.auth
)
return resp
|
Add basic API which can make GET and POST requestsimport requests
from auth import UnleashedAuth
class UnleashedApi(object):
"""
Unleashed API client library.
"""
def __init__(self, api_url, api_id, api_key):
self.api_url = api_url
self.auth = UnleashedAuth(api_id, api_key)
def _get_request(self, method, params=None):
params = params or {}
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.get(
self.api_url + '/' + method,
headers=headers,
params=params,
auth=self.auth
)
return resp
def _post_request(self, method, body):
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.post(
self.api_url + '/' + method,
body,
headers=headers,
auth=self.auth
)
return resp
|
<commit_before><commit_msg>Add basic API which can make GET and POST requests<commit_after>import requests
from auth import UnleashedAuth
class UnleashedApi(object):
"""
Unleashed API client library.
"""
def __init__(self, api_url, api_id, api_key):
self.api_url = api_url
self.auth = UnleashedAuth(api_id, api_key)
def _get_request(self, method, params=None):
params = params or {}
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.get(
self.api_url + '/' + method,
headers=headers,
params=params,
auth=self.auth
)
return resp
def _post_request(self, method, body):
headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
resp = requests.post(
self.api_url + '/' + method,
body,
headers=headers,
auth=self.auth
)
return resp
|
|
fbc088bbe3f7a8493c359618b35e6c31d9b7afce
|
district-court-scraper.py
|
district-court-scraper.py
|
import cookielib
import os
import re
import sys
import urllib
import urllib2
import webbrowser
from bs4 import BeautifulSoup
from time import sleep
user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \
u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11"
# Get cookie and list of courts
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = [('User-Agent', user_agent)]
home = opener.open('https://eapps.courts.state.va.us/gdcourts/captchaVerification.do?landing=landing')
html = BeautifulSoup(home.read())
captcha_script_url = None
for script_tag in html.find_all('script', {'src':True}):
if script_tag['src'].startswith('https://www.google.com/recaptcha/api/challenge'):
captcha_url = script_tag['src']
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c='
captcha_challenge = None
for line in urllib2.urlopen(captcha_url):
if line.strip().startswith('challenge'):
captcha_challenge = line.split(':')[1].strip()[1:-2]
webbrowser.open(captcha_image_url + captcha_challenge)
captcha_response = raw_input('Enter CAPTCHA: ')
data = urllib.urlencode({
'recaptcha_challenge_field': captcha_challenge,
'recaptcha_response_field': captcha_response,
'accept': 'Accept',
'pageName': 'landingCaptchaVerificationPage',
'showCaptcha': True})
capthca_verification_url = u"https://eapps.courts.state.va.us/gdcourts/captchaVerification.do"
html = opener.open(capthca_verification_url, data)
print html.read()
raw_input('PAUSE')
case_search_url = u"https://eapps.courts.state.va.us/gdcourts/caseSearch.do?welcomePage=welcomePage"
html = opener.open(case_search_url)
print html.read()
#html = BeautifulSoup(home.read())
|
Allow user to solve district court captcha
|
Allow user to solve district court captcha
|
Python
|
mit
|
CodeforNRV/attorney-analysis
|
Allow user to solve district court captcha
|
import cookielib
import os
import re
import sys
import urllib
import urllib2
import webbrowser
from bs4 import BeautifulSoup
from time import sleep
user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \
u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11"
# Get cookie and list of courts
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = [('User-Agent', user_agent)]
home = opener.open('https://eapps.courts.state.va.us/gdcourts/captchaVerification.do?landing=landing')
html = BeautifulSoup(home.read())
captcha_script_url = None
for script_tag in html.find_all('script', {'src':True}):
if script_tag['src'].startswith('https://www.google.com/recaptcha/api/challenge'):
captcha_url = script_tag['src']
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c='
captcha_challenge = None
for line in urllib2.urlopen(captcha_url):
if line.strip().startswith('challenge'):
captcha_challenge = line.split(':')[1].strip()[1:-2]
webbrowser.open(captcha_image_url + captcha_challenge)
captcha_response = raw_input('Enter CAPTCHA: ')
data = urllib.urlencode({
'recaptcha_challenge_field': captcha_challenge,
'recaptcha_response_field': captcha_response,
'accept': 'Accept',
'pageName': 'landingCaptchaVerificationPage',
'showCaptcha': True})
capthca_verification_url = u"https://eapps.courts.state.va.us/gdcourts/captchaVerification.do"
html = opener.open(capthca_verification_url, data)
print html.read()
raw_input('PAUSE')
case_search_url = u"https://eapps.courts.state.va.us/gdcourts/caseSearch.do?welcomePage=welcomePage"
html = opener.open(case_search_url)
print html.read()
#html = BeautifulSoup(home.read())
|
<commit_before><commit_msg>Allow user to solve district court captcha<commit_after>
|
import cookielib
import os
import re
import sys
import urllib
import urllib2
import webbrowser
from bs4 import BeautifulSoup
from time import sleep
user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \
u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11"
# Get cookie and list of courts
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = [('User-Agent', user_agent)]
home = opener.open('https://eapps.courts.state.va.us/gdcourts/captchaVerification.do?landing=landing')
html = BeautifulSoup(home.read())
captcha_script_url = None
for script_tag in html.find_all('script', {'src':True}):
if script_tag['src'].startswith('https://www.google.com/recaptcha/api/challenge'):
captcha_url = script_tag['src']
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c='
captcha_challenge = None
for line in urllib2.urlopen(captcha_url):
if line.strip().startswith('challenge'):
captcha_challenge = line.split(':')[1].strip()[1:-2]
webbrowser.open(captcha_image_url + captcha_challenge)
captcha_response = raw_input('Enter CAPTCHA: ')
data = urllib.urlencode({
'recaptcha_challenge_field': captcha_challenge,
'recaptcha_response_field': captcha_response,
'accept': 'Accept',
'pageName': 'landingCaptchaVerificationPage',
'showCaptcha': True})
capthca_verification_url = u"https://eapps.courts.state.va.us/gdcourts/captchaVerification.do"
html = opener.open(capthca_verification_url, data)
print html.read()
raw_input('PAUSE')
case_search_url = u"https://eapps.courts.state.va.us/gdcourts/caseSearch.do?welcomePage=welcomePage"
html = opener.open(case_search_url)
print html.read()
#html = BeautifulSoup(home.read())
|
Allow user to solve district court captchaimport cookielib
import os
import re
import sys
import urllib
import urllib2
import webbrowser
from bs4 import BeautifulSoup
from time import sleep
user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \
u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11"
# Get cookie and list of courts
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = [('User-Agent', user_agent)]
home = opener.open('https://eapps.courts.state.va.us/gdcourts/captchaVerification.do?landing=landing')
html = BeautifulSoup(home.read())
captcha_script_url = None
for script_tag in html.find_all('script', {'src':True}):
if script_tag['src'].startswith('https://www.google.com/recaptcha/api/challenge'):
captcha_url = script_tag['src']
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c='
captcha_challenge = None
for line in urllib2.urlopen(captcha_url):
if line.strip().startswith('challenge'):
captcha_challenge = line.split(':')[1].strip()[1:-2]
webbrowser.open(captcha_image_url + captcha_challenge)
captcha_response = raw_input('Enter CAPTCHA: ')
data = urllib.urlencode({
'recaptcha_challenge_field': captcha_challenge,
'recaptcha_response_field': captcha_response,
'accept': 'Accept',
'pageName': 'landingCaptchaVerificationPage',
'showCaptcha': True})
capthca_verification_url = u"https://eapps.courts.state.va.us/gdcourts/captchaVerification.do"
html = opener.open(capthca_verification_url, data)
print html.read()
raw_input('PAUSE')
case_search_url = u"https://eapps.courts.state.va.us/gdcourts/caseSearch.do?welcomePage=welcomePage"
html = opener.open(case_search_url)
print html.read()
#html = BeautifulSoup(home.read())
|
<commit_before><commit_msg>Allow user to solve district court captcha<commit_after>import cookielib
import os
import re
import sys
import urllib
import urllib2
import webbrowser
from bs4 import BeautifulSoup
from time import sleep
user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \
u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11"
# Get cookie and list of courts
cookieJar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar))
opener.addheaders = [('User-Agent', user_agent)]
home = opener.open('https://eapps.courts.state.va.us/gdcourts/captchaVerification.do?landing=landing')
html = BeautifulSoup(home.read())
captcha_script_url = None
for script_tag in html.find_all('script', {'src':True}):
if script_tag['src'].startswith('https://www.google.com/recaptcha/api/challenge'):
captcha_url = script_tag['src']
captcha_image_url = 'https://www.google.com/recaptcha/api/image?c='
captcha_challenge = None
for line in urllib2.urlopen(captcha_url):
if line.strip().startswith('challenge'):
captcha_challenge = line.split(':')[1].strip()[1:-2]
webbrowser.open(captcha_image_url + captcha_challenge)
captcha_response = raw_input('Enter CAPTCHA: ')
data = urllib.urlencode({
'recaptcha_challenge_field': captcha_challenge,
'recaptcha_response_field': captcha_response,
'accept': 'Accept',
'pageName': 'landingCaptchaVerificationPage',
'showCaptcha': True})
capthca_verification_url = u"https://eapps.courts.state.va.us/gdcourts/captchaVerification.do"
html = opener.open(capthca_verification_url, data)
print html.read()
raw_input('PAUSE')
case_search_url = u"https://eapps.courts.state.va.us/gdcourts/caseSearch.do?welcomePage=welcomePage"
html = opener.open(case_search_url)
print html.read()
#html = BeautifulSoup(home.read())
|
|
aa55e73c8757772486eec3d4b2f1bf980f7445e0
|
tools/convert-url-history.py
|
tools/convert-url-history.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
Add initial URL history importer
|
Add initial URL history importer
|
Python
|
apache-2.0
|
mirek2580/namebench
|
Add initial URL history importer
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
<commit_before><commit_msg>Add initial URL history importer<commit_after>
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
Add initial URL history importer#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
<commit_before><commit_msg>Add initial URL history importer<commit_after>#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate a data file based on an input file containing URL's.
This outputs a "weighted" (top hits first) data file for namebench
to use. Ignores subsequent hits for the same site.
"""
import operator
import sys
import re
filename = sys.argv[1]
if not filename:
print "You must provide a filename."
sys.exit(1)
parse_re = re.compile(' \w+://([\-\w\.]+)')
hits = {}
last_host = None
for line in open(filename).readlines():
match = parse_re.search(line)
if match:
host = match.groups()[0] + '.'
if host != last_host:
hits[host] = hits.get(host, 0) + 1
last_host = host
top_hits = sorted(hits.items(), key=operator.itemgetter(1),reverse=True)
for (hit, count) in top_hits:
print 'A %s\t# %s hits' % (hit, count)
|
|
3256d970f1dd8f34a91b6f5161ddcb54da83fb9f
|
checks/check_polygons_stay_valid_during_augmentation.py
|
checks/check_polygons_stay_valid_during_augmentation.py
|
from __future__ import print_function, division
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
def main():
nb_checked = 0
augs = iaa.SomeOf((1, None), [
iaa.Resize({"height": (1, 100), "width": (1, 100)}),
iaa.Affine(
scale=(0.01, 2.0),
rotate=(-360, 360),
shear=(-360, 360),
translate_px={"x": (-50, 50), "y": (-50, 50)}
),
iaa.PerspectiveTransform((0.01, 0.2))
])
height, width = 100, 200
while True:
poly = create_random_polygon(height, width, nb_checked)
psoi = PolygonsOnImage([poly], shape=(height, width, 3))
psoi_aug = augs.augment_polygons(psoi)
if not poly.is_valid or not psoi_aug.polygons[0].is_valid:
print("poly: ", poly, poly.is_valid)
print("poly_aug: ", psoi_aug.polygons[0], psoi_aug.polygons[0].is_valid)
assert poly.is_valid
assert psoi_aug.polygons[0].is_valid
nb_checked += 1
if nb_checked % 100 == 0:
print("Checked %d..." % (nb_checked,))
if nb_checked > 100000:
break
def create_random_polygon(height, width, seed):
rs = np.random.RandomState(seed)
nb_points = rs.randint(3, 50)
coords = rs.rand(nb_points, 2)
coords = (coords * 2 - 0.5) # allow coords outside of the image plane
coords[:, 0] *= width
coords[:, 1] *= height
poly = Polygon(coords)
if poly.is_valid:
return poly
new_seed = rs.randint(ia.SEED_MIN_VALUE, ia.SEED_MAX_VALUE)
return create_random_polygon(height, width, new_seed)
if __name__ == "__main__":
main()
|
Add check script for polygon validity during aug
|
Add check script for polygon validity during aug
|
Python
|
mit
|
aleju/imgaug,aleju/imgaug,aleju/ImageAugmenter
|
Add check script for polygon validity during aug
|
from __future__ import print_function, division
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
def main():
nb_checked = 0
augs = iaa.SomeOf((1, None), [
iaa.Resize({"height": (1, 100), "width": (1, 100)}),
iaa.Affine(
scale=(0.01, 2.0),
rotate=(-360, 360),
shear=(-360, 360),
translate_px={"x": (-50, 50), "y": (-50, 50)}
),
iaa.PerspectiveTransform((0.01, 0.2))
])
height, width = 100, 200
while True:
poly = create_random_polygon(height, width, nb_checked)
psoi = PolygonsOnImage([poly], shape=(height, width, 3))
psoi_aug = augs.augment_polygons(psoi)
if not poly.is_valid or not psoi_aug.polygons[0].is_valid:
print("poly: ", poly, poly.is_valid)
print("poly_aug: ", psoi_aug.polygons[0], psoi_aug.polygons[0].is_valid)
assert poly.is_valid
assert psoi_aug.polygons[0].is_valid
nb_checked += 1
if nb_checked % 100 == 0:
print("Checked %d..." % (nb_checked,))
if nb_checked > 100000:
break
def create_random_polygon(height, width, seed):
rs = np.random.RandomState(seed)
nb_points = rs.randint(3, 50)
coords = rs.rand(nb_points, 2)
coords = (coords * 2 - 0.5) # allow coords outside of the image plane
coords[:, 0] *= width
coords[:, 1] *= height
poly = Polygon(coords)
if poly.is_valid:
return poly
new_seed = rs.randint(ia.SEED_MIN_VALUE, ia.SEED_MAX_VALUE)
return create_random_polygon(height, width, new_seed)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for polygon validity during aug<commit_after>
|
from __future__ import print_function, division
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
def main():
nb_checked = 0
augs = iaa.SomeOf((1, None), [
iaa.Resize({"height": (1, 100), "width": (1, 100)}),
iaa.Affine(
scale=(0.01, 2.0),
rotate=(-360, 360),
shear=(-360, 360),
translate_px={"x": (-50, 50), "y": (-50, 50)}
),
iaa.PerspectiveTransform((0.01, 0.2))
])
height, width = 100, 200
while True:
poly = create_random_polygon(height, width, nb_checked)
psoi = PolygonsOnImage([poly], shape=(height, width, 3))
psoi_aug = augs.augment_polygons(psoi)
if not poly.is_valid or not psoi_aug.polygons[0].is_valid:
print("poly: ", poly, poly.is_valid)
print("poly_aug: ", psoi_aug.polygons[0], psoi_aug.polygons[0].is_valid)
assert poly.is_valid
assert psoi_aug.polygons[0].is_valid
nb_checked += 1
if nb_checked % 100 == 0:
print("Checked %d..." % (nb_checked,))
if nb_checked > 100000:
break
def create_random_polygon(height, width, seed):
rs = np.random.RandomState(seed)
nb_points = rs.randint(3, 50)
coords = rs.rand(nb_points, 2)
coords = (coords * 2 - 0.5) # allow coords outside of the image plane
coords[:, 0] *= width
coords[:, 1] *= height
poly = Polygon(coords)
if poly.is_valid:
return poly
new_seed = rs.randint(ia.SEED_MIN_VALUE, ia.SEED_MAX_VALUE)
return create_random_polygon(height, width, new_seed)
if __name__ == "__main__":
main()
|
Add check script for polygon validity during augfrom __future__ import print_function, division
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
def main():
nb_checked = 0
augs = iaa.SomeOf((1, None), [
iaa.Resize({"height": (1, 100), "width": (1, 100)}),
iaa.Affine(
scale=(0.01, 2.0),
rotate=(-360, 360),
shear=(-360, 360),
translate_px={"x": (-50, 50), "y": (-50, 50)}
),
iaa.PerspectiveTransform((0.01, 0.2))
])
height, width = 100, 200
while True:
poly = create_random_polygon(height, width, nb_checked)
psoi = PolygonsOnImage([poly], shape=(height, width, 3))
psoi_aug = augs.augment_polygons(psoi)
if not poly.is_valid or not psoi_aug.polygons[0].is_valid:
print("poly: ", poly, poly.is_valid)
print("poly_aug: ", psoi_aug.polygons[0], psoi_aug.polygons[0].is_valid)
assert poly.is_valid
assert psoi_aug.polygons[0].is_valid
nb_checked += 1
if nb_checked % 100 == 0:
print("Checked %d..." % (nb_checked,))
if nb_checked > 100000:
break
def create_random_polygon(height, width, seed):
rs = np.random.RandomState(seed)
nb_points = rs.randint(3, 50)
coords = rs.rand(nb_points, 2)
coords = (coords * 2 - 0.5) # allow coords outside of the image plane
coords[:, 0] *= width
coords[:, 1] *= height
poly = Polygon(coords)
if poly.is_valid:
return poly
new_seed = rs.randint(ia.SEED_MIN_VALUE, ia.SEED_MAX_VALUE)
return create_random_polygon(height, width, new_seed)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for polygon validity during aug<commit_after>from __future__ import print_function, division
import numpy as np
import imgaug as ia
import imgaug.augmenters as iaa
from imgaug.augmentables.polys import Polygon, PolygonsOnImage
def main():
nb_checked = 0
augs = iaa.SomeOf((1, None), [
iaa.Resize({"height": (1, 100), "width": (1, 100)}),
iaa.Affine(
scale=(0.01, 2.0),
rotate=(-360, 360),
shear=(-360, 360),
translate_px={"x": (-50, 50), "y": (-50, 50)}
),
iaa.PerspectiveTransform((0.01, 0.2))
])
height, width = 100, 200
while True:
poly = create_random_polygon(height, width, nb_checked)
psoi = PolygonsOnImage([poly], shape=(height, width, 3))
psoi_aug = augs.augment_polygons(psoi)
if not poly.is_valid or not psoi_aug.polygons[0].is_valid:
print("poly: ", poly, poly.is_valid)
print("poly_aug: ", psoi_aug.polygons[0], psoi_aug.polygons[0].is_valid)
assert poly.is_valid
assert psoi_aug.polygons[0].is_valid
nb_checked += 1
if nb_checked % 100 == 0:
print("Checked %d..." % (nb_checked,))
if nb_checked > 100000:
break
def create_random_polygon(height, width, seed):
rs = np.random.RandomState(seed)
nb_points = rs.randint(3, 50)
coords = rs.rand(nb_points, 2)
coords = (coords * 2 - 0.5) # allow coords outside of the image plane
coords[:, 0] *= width
coords[:, 1] *= height
poly = Polygon(coords)
if poly.is_valid:
return poly
new_seed = rs.randint(ia.SEED_MIN_VALUE, ia.SEED_MAX_VALUE)
return create_random_polygon(height, width, new_seed)
if __name__ == "__main__":
main()
|
|
dc2074c5759f7ee8b00a9cc48b8a7b49ccdc067e
|
scripts/add_noise_to_datasets.py
|
scripts/add_noise_to_datasets.py
|
#!/usr/bin/env python3
"""
Usage: {script_name} INPUT_FILE OUTPUT_FILE
Adds noise to the csv file given as input.
"""
import random
import sys
from collections import deque
def usage():
print(__doc__.format(script_name=sys.argv[0].split("/")[-1]))
sys.exit(0)
def empty_lines(line_num):
empty_count = 500*random.randint(1, 9)
lines = list(random.randint(0, line_num - 1) for _ in range(empty_count))
lines.sort()
return deque(lines)
def process_line(index, line, output, empty_lines):
# Add random number in the beginning
add_random_num = random.random() < .05
# Add extra cell in the beginning
extra_cell = random.random() < .05
# Add empty lines
if empty_lines:
while empty_lines and empty_lines[0] == index:
empty_lines.popleft()
output.write("\n")
if add_random_num:
output.write(random.randint(0, 9))
if extra_cell:
output.write("{},".format(random.randint(0, 1000)))
output.write(line)
def main():
if len(sys.argv) != 3:
usage()
input_file = open(sys.argv[1], "r")
output_file = open(sys.argv[2], "w")
line_num = sum(1 for _ in input_file)
input_file.seek(0)
empty = empty_lines(line_num)
for index, line in enumerate(input_file):
process_line(index, line, output_file, empty)
input_file.close()
output_file.close()
main()
|
Add scripts that adds noise to datasets
|
Add scripts that adds noise to datasets
|
Python
|
mit
|
CSWR/annotatingCSV,CSWR/annotatingCSV,CSWR/annotatingCSV
|
Add scripts that adds noise to datasets
|
#!/usr/bin/env python3
"""
Usage: {script_name} INPUT_FILE OUTPUT_FILE
Adds noise to the csv file given as input.
"""
import random
import sys
from collections import deque
def usage():
print(__doc__.format(script_name=sys.argv[0].split("/")[-1]))
sys.exit(0)
def empty_lines(line_num):
empty_count = 500*random.randint(1, 9)
lines = list(random.randint(0, line_num - 1) for _ in range(empty_count))
lines.sort()
return deque(lines)
def process_line(index, line, output, empty_lines):
# Add random number in the beginning
add_random_num = random.random() < .05
# Add extra cell in the beginning
extra_cell = random.random() < .05
# Add empty lines
if empty_lines:
while empty_lines and empty_lines[0] == index:
empty_lines.popleft()
output.write("\n")
if add_random_num:
output.write(random.randint(0, 9))
if extra_cell:
output.write("{},".format(random.randint(0, 1000)))
output.write(line)
def main():
if len(sys.argv) != 3:
usage()
input_file = open(sys.argv[1], "r")
output_file = open(sys.argv[2], "w")
line_num = sum(1 for _ in input_file)
input_file.seek(0)
empty = empty_lines(line_num)
for index, line in enumerate(input_file):
process_line(index, line, output_file, empty)
input_file.close()
output_file.close()
main()
|
<commit_before><commit_msg>Add scripts that adds noise to datasets<commit_after>
|
#!/usr/bin/env python3
"""
Usage: {script_name} INPUT_FILE OUTPUT_FILE
Adds noise to the csv file given as input.
"""
import random
import sys
from collections import deque
def usage():
print(__doc__.format(script_name=sys.argv[0].split("/")[-1]))
sys.exit(0)
def empty_lines(line_num):
empty_count = 500*random.randint(1, 9)
lines = list(random.randint(0, line_num - 1) for _ in range(empty_count))
lines.sort()
return deque(lines)
def process_line(index, line, output, empty_lines):
# Add random number in the beginning
add_random_num = random.random() < .05
# Add extra cell in the beginning
extra_cell = random.random() < .05
# Add empty lines
if empty_lines:
while empty_lines and empty_lines[0] == index:
empty_lines.popleft()
output.write("\n")
if add_random_num:
output.write(random.randint(0, 9))
if extra_cell:
output.write("{},".format(random.randint(0, 1000)))
output.write(line)
def main():
if len(sys.argv) != 3:
usage()
input_file = open(sys.argv[1], "r")
output_file = open(sys.argv[2], "w")
line_num = sum(1 for _ in input_file)
input_file.seek(0)
empty = empty_lines(line_num)
for index, line in enumerate(input_file):
process_line(index, line, output_file, empty)
input_file.close()
output_file.close()
main()
|
Add scripts that adds noise to datasets#!/usr/bin/env python3
"""
Usage: {script_name} INPUT_FILE OUTPUT_FILE
Adds noise to the csv file given as input.
"""
import random
import sys
from collections import deque
def usage():
print(__doc__.format(script_name=sys.argv[0].split("/")[-1]))
sys.exit(0)
def empty_lines(line_num):
empty_count = 500*random.randint(1, 9)
lines = list(random.randint(0, line_num - 1) for _ in range(empty_count))
lines.sort()
return deque(lines)
def process_line(index, line, output, empty_lines):
# Add random number in the beginning
add_random_num = random.random() < .05
# Add extra cell in the beginning
extra_cell = random.random() < .05
# Add empty lines
if empty_lines:
while empty_lines and empty_lines[0] == index:
empty_lines.popleft()
output.write("\n")
if add_random_num:
output.write(random.randint(0, 9))
if extra_cell:
output.write("{},".format(random.randint(0, 1000)))
output.write(line)
def main():
if len(sys.argv) != 3:
usage()
input_file = open(sys.argv[1], "r")
output_file = open(sys.argv[2], "w")
line_num = sum(1 for _ in input_file)
input_file.seek(0)
empty = empty_lines(line_num)
for index, line in enumerate(input_file):
process_line(index, line, output_file, empty)
input_file.close()
output_file.close()
main()
|
<commit_before><commit_msg>Add scripts that adds noise to datasets<commit_after>#!/usr/bin/env python3
"""
Usage: {script_name} INPUT_FILE OUTPUT_FILE
Adds noise to the csv file given as input.
"""
import random
import sys
from collections import deque
def usage():
print(__doc__.format(script_name=sys.argv[0].split("/")[-1]))
sys.exit(0)
def empty_lines(line_num):
empty_count = 500*random.randint(1, 9)
lines = list(random.randint(0, line_num - 1) for _ in range(empty_count))
lines.sort()
return deque(lines)
def process_line(index, line, output, empty_lines):
# Add random number in the beginning
add_random_num = random.random() < .05
# Add extra cell in the beginning
extra_cell = random.random() < .05
# Add empty lines
if empty_lines:
while empty_lines and empty_lines[0] == index:
empty_lines.popleft()
output.write("\n")
if add_random_num:
output.write(random.randint(0, 9))
if extra_cell:
output.write("{},".format(random.randint(0, 1000)))
output.write(line)
def main():
if len(sys.argv) != 3:
usage()
input_file = open(sys.argv[1], "r")
output_file = open(sys.argv[2], "w")
line_num = sum(1 for _ in input_file)
input_file.seek(0)
empty = empty_lines(line_num)
for index, line in enumerate(input_file):
process_line(index, line, output_file, empty)
input_file.close()
output_file.close()
main()
|
|
97f70e4d285a2ce231442f6544927671ca959c38
|
Graphs/nodes_at_same_level.py
|
Graphs/nodes_at_same_level.py
|
import unittest
"""
Write a function to connect all adjacent nodes at same level in binary tree.
"""
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
self.next_right = None
def connect_level(root):
if root is None:
return None
left_answer = connect_level(root.left)
right_answer = connect_level(root.right)
if left_answer is not None and right_answer is not None:
left_answer.next_right = right_answer
a = None
b = None
c = None
d = None
if left_answer is not None:
a = left_answer.left
b = left_answer.right
if right_answer is not None:
c = right_answer.left
d = right_answer.right
if a is not None and b is None:
if c is not None:
a.next_right = c
else:
a.next_right = d
elif b is not None:
if c is not None:
b.next_right = c
else:
b.next_right = d
return root
class TestLevelConnection(unittest.TestCase):
def test_level(self):
root = Node(10)
root.left = Node(3)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(1)
root.right.right = Node(2)
root = connect_level(root)
self.assertEqual(root.left.next_right, root.right)
self.assertEqual(root.left.left.next_right, root.left.right)
self.assertEqual(root.left.right.next_right, root.right.right)
|
Connect nodes at same level in binary tree
|
Connect nodes at same level in binary tree
|
Python
|
mit
|
prathamtandon/g4gproblems
|
Connect nodes at same level in binary tree
|
import unittest
"""
Write a function to connect all adjacent nodes at same level in binary tree.
"""
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
self.next_right = None
def connect_level(root):
if root is None:
return None
left_answer = connect_level(root.left)
right_answer = connect_level(root.right)
if left_answer is not None and right_answer is not None:
left_answer.next_right = right_answer
a = None
b = None
c = None
d = None
if left_answer is not None:
a = left_answer.left
b = left_answer.right
if right_answer is not None:
c = right_answer.left
d = right_answer.right
if a is not None and b is None:
if c is not None:
a.next_right = c
else:
a.next_right = d
elif b is not None:
if c is not None:
b.next_right = c
else:
b.next_right = d
return root
class TestLevelConnection(unittest.TestCase):
def test_level(self):
root = Node(10)
root.left = Node(3)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(1)
root.right.right = Node(2)
root = connect_level(root)
self.assertEqual(root.left.next_right, root.right)
self.assertEqual(root.left.left.next_right, root.left.right)
self.assertEqual(root.left.right.next_right, root.right.right)
|
<commit_before><commit_msg>Connect nodes at same level in binary tree<commit_after>
|
import unittest
"""
Write a function to connect all adjacent nodes at same level in binary tree.
"""
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
self.next_right = None
def connect_level(root):
if root is None:
return None
left_answer = connect_level(root.left)
right_answer = connect_level(root.right)
if left_answer is not None and right_answer is not None:
left_answer.next_right = right_answer
a = None
b = None
c = None
d = None
if left_answer is not None:
a = left_answer.left
b = left_answer.right
if right_answer is not None:
c = right_answer.left
d = right_answer.right
if a is not None and b is None:
if c is not None:
a.next_right = c
else:
a.next_right = d
elif b is not None:
if c is not None:
b.next_right = c
else:
b.next_right = d
return root
class TestLevelConnection(unittest.TestCase):
def test_level(self):
root = Node(10)
root.left = Node(3)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(1)
root.right.right = Node(2)
root = connect_level(root)
self.assertEqual(root.left.next_right, root.right)
self.assertEqual(root.left.left.next_right, root.left.right)
self.assertEqual(root.left.right.next_right, root.right.right)
|
Connect nodes at same level in binary treeimport unittest
"""
Write a function to connect all adjacent nodes at same level in binary tree.
"""
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
self.next_right = None
def connect_level(root):
if root is None:
return None
left_answer = connect_level(root.left)
right_answer = connect_level(root.right)
if left_answer is not None and right_answer is not None:
left_answer.next_right = right_answer
a = None
b = None
c = None
d = None
if left_answer is not None:
a = left_answer.left
b = left_answer.right
if right_answer is not None:
c = right_answer.left
d = right_answer.right
if a is not None and b is None:
if c is not None:
a.next_right = c
else:
a.next_right = d
elif b is not None:
if c is not None:
b.next_right = c
else:
b.next_right = d
return root
class TestLevelConnection(unittest.TestCase):
def test_level(self):
root = Node(10)
root.left = Node(3)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(1)
root.right.right = Node(2)
root = connect_level(root)
self.assertEqual(root.left.next_right, root.right)
self.assertEqual(root.left.left.next_right, root.left.right)
self.assertEqual(root.left.right.next_right, root.right.right)
|
<commit_before><commit_msg>Connect nodes at same level in binary tree<commit_after>import unittest
"""
Write a function to connect all adjacent nodes at same level in binary tree.
"""
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
self.next_right = None
def connect_level(root):
if root is None:
return None
left_answer = connect_level(root.left)
right_answer = connect_level(root.right)
if left_answer is not None and right_answer is not None:
left_answer.next_right = right_answer
a = None
b = None
c = None
d = None
if left_answer is not None:
a = left_answer.left
b = left_answer.right
if right_answer is not None:
c = right_answer.left
d = right_answer.right
if a is not None and b is None:
if c is not None:
a.next_right = c
else:
a.next_right = d
elif b is not None:
if c is not None:
b.next_right = c
else:
b.next_right = d
return root
class TestLevelConnection(unittest.TestCase):
def test_level(self):
root = Node(10)
root.left = Node(3)
root.right = Node(5)
root.left.left = Node(4)
root.left.right = Node(1)
root.right.right = Node(2)
root = connect_level(root)
self.assertEqual(root.left.next_right, root.right)
self.assertEqual(root.left.left.next_right, root.left.right)
self.assertEqual(root.left.right.next_right, root.right.right)
|
|
3e97849ce926fa375335227467064b55c4ff594f
|
scripts/find_existing_results.py
|
scripts/find_existing_results.py
|
#!/usr/bin/env python
import os
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input list of sample/library names to search for")
args = parser.parse_args()
samples = list()
with open(args.samples_file, 'r') as samples_file:
samples = samples_file.read().splitlines()
root_dir = os.getcwd()
# Check FinalBams
os.chdir("./FinalBAMs")
sys.stdout.write("Missing final aligned BAM files:\n")
for sample in samples:
if os.path.isfile("{}.recalibrated.sorted.bam".format(sample)):
sys.stdout.write("{}\n".format(sample))
os.chdir(root_dir)
# # Check MuTect
# os.chdir("./MuTect")
# sys.stdout.write("Missing MuTect VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.mutect.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check VarDict
# os.chdir("./VarDict")
# sys.stdout.write("Missing VarDict VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.vardict.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check Scalpel
# os.chdir("./Scalpel")
# sys.stdout.write("Missing Scalpel VCFs:\n")
# for sample in samples:
# if not os.path.isdir("{}-scalpel-output".format(sample)):
# sys.stdout.write("No Folder: {}\n".format(sample))
# else:
# os.chdir("{}-scalpel-output/".format(sample))
# if not os.path.isfile("variants.indel.vcf"):
# sys.stdout.write("No Variants file: {}\n".format(sample))
# os.chdir("..")
# os.chdir(root_dir)
#
# # Check FreeBayes
# os.chdir("./FreeBayes")
# sys.stdout.write("Missing FreeBayes VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.freebayes.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
# Check GEMINI Databases
# os.chdir("./GEMINI")
# sys.stdout.write("Missing GEMINI databases:\n")
# for sample in samples:
# if not os.path.isfile("{}.snpEff.GRCh37.75.db".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
|
Add script for finding duplicate results
|
Add script for finding duplicate results
|
Python
|
mit
|
dgaston/ddbio-ngsflow,dgaston/ddb-ngsflow
|
Add script for finding duplicate results
|
#!/usr/bin/env python
import os
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input list of sample/library names to search for")
args = parser.parse_args()
samples = list()
with open(args.samples_file, 'r') as samples_file:
samples = samples_file.read().splitlines()
root_dir = os.getcwd()
# Check FinalBams
os.chdir("./FinalBAMs")
sys.stdout.write("Missing final aligned BAM files:\n")
for sample in samples:
if os.path.isfile("{}.recalibrated.sorted.bam".format(sample)):
sys.stdout.write("{}\n".format(sample))
os.chdir(root_dir)
# # Check MuTect
# os.chdir("./MuTect")
# sys.stdout.write("Missing MuTect VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.mutect.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check VarDict
# os.chdir("./VarDict")
# sys.stdout.write("Missing VarDict VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.vardict.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check Scalpel
# os.chdir("./Scalpel")
# sys.stdout.write("Missing Scalpel VCFs:\n")
# for sample in samples:
# if not os.path.isdir("{}-scalpel-output".format(sample)):
# sys.stdout.write("No Folder: {}\n".format(sample))
# else:
# os.chdir("{}-scalpel-output/".format(sample))
# if not os.path.isfile("variants.indel.vcf"):
# sys.stdout.write("No Variants file: {}\n".format(sample))
# os.chdir("..")
# os.chdir(root_dir)
#
# # Check FreeBayes
# os.chdir("./FreeBayes")
# sys.stdout.write("Missing FreeBayes VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.freebayes.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
# Check GEMINI Databases
# os.chdir("./GEMINI")
# sys.stdout.write("Missing GEMINI databases:\n")
# for sample in samples:
# if not os.path.isfile("{}.snpEff.GRCh37.75.db".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
|
<commit_before><commit_msg>Add script for finding duplicate results<commit_after>
|
#!/usr/bin/env python
import os
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input list of sample/library names to search for")
args = parser.parse_args()
samples = list()
with open(args.samples_file, 'r') as samples_file:
samples = samples_file.read().splitlines()
root_dir = os.getcwd()
# Check FinalBams
os.chdir("./FinalBAMs")
sys.stdout.write("Missing final aligned BAM files:\n")
for sample in samples:
if os.path.isfile("{}.recalibrated.sorted.bam".format(sample)):
sys.stdout.write("{}\n".format(sample))
os.chdir(root_dir)
# # Check MuTect
# os.chdir("./MuTect")
# sys.stdout.write("Missing MuTect VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.mutect.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check VarDict
# os.chdir("./VarDict")
# sys.stdout.write("Missing VarDict VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.vardict.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check Scalpel
# os.chdir("./Scalpel")
# sys.stdout.write("Missing Scalpel VCFs:\n")
# for sample in samples:
# if not os.path.isdir("{}-scalpel-output".format(sample)):
# sys.stdout.write("No Folder: {}\n".format(sample))
# else:
# os.chdir("{}-scalpel-output/".format(sample))
# if not os.path.isfile("variants.indel.vcf"):
# sys.stdout.write("No Variants file: {}\n".format(sample))
# os.chdir("..")
# os.chdir(root_dir)
#
# # Check FreeBayes
# os.chdir("./FreeBayes")
# sys.stdout.write("Missing FreeBayes VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.freebayes.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
# Check GEMINI Databases
# os.chdir("./GEMINI")
# sys.stdout.write("Missing GEMINI databases:\n")
# for sample in samples:
# if not os.path.isfile("{}.snpEff.GRCh37.75.db".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
|
Add script for finding duplicate results#!/usr/bin/env python
import os
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input list of sample/library names to search for")
args = parser.parse_args()
samples = list()
with open(args.samples_file, 'r') as samples_file:
samples = samples_file.read().splitlines()
root_dir = os.getcwd()
# Check FinalBams
os.chdir("./FinalBAMs")
sys.stdout.write("Missing final aligned BAM files:\n")
for sample in samples:
if os.path.isfile("{}.recalibrated.sorted.bam".format(sample)):
sys.stdout.write("{}\n".format(sample))
os.chdir(root_dir)
# # Check MuTect
# os.chdir("./MuTect")
# sys.stdout.write("Missing MuTect VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.mutect.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check VarDict
# os.chdir("./VarDict")
# sys.stdout.write("Missing VarDict VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.vardict.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check Scalpel
# os.chdir("./Scalpel")
# sys.stdout.write("Missing Scalpel VCFs:\n")
# for sample in samples:
# if not os.path.isdir("{}-scalpel-output".format(sample)):
# sys.stdout.write("No Folder: {}\n".format(sample))
# else:
# os.chdir("{}-scalpel-output/".format(sample))
# if not os.path.isfile("variants.indel.vcf"):
# sys.stdout.write("No Variants file: {}\n".format(sample))
# os.chdir("..")
# os.chdir(root_dir)
#
# # Check FreeBayes
# os.chdir("./FreeBayes")
# sys.stdout.write("Missing FreeBayes VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.freebayes.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
# Check GEMINI Databases
# os.chdir("./GEMINI")
# sys.stdout.write("Missing GEMINI databases:\n")
# for sample in samples:
# if not os.path.isfile("{}.snpEff.GRCh37.75.db".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
|
<commit_before><commit_msg>Add script for finding duplicate results<commit_after>#!/usr/bin/env python
import os
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input list of sample/library names to search for")
args = parser.parse_args()
samples = list()
with open(args.samples_file, 'r') as samples_file:
samples = samples_file.read().splitlines()
root_dir = os.getcwd()
# Check FinalBams
os.chdir("./FinalBAMs")
sys.stdout.write("Missing final aligned BAM files:\n")
for sample in samples:
if os.path.isfile("{}.recalibrated.sorted.bam".format(sample)):
sys.stdout.write("{}\n".format(sample))
os.chdir(root_dir)
# # Check MuTect
# os.chdir("./MuTect")
# sys.stdout.write("Missing MuTect VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.mutect.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check VarDict
# os.chdir("./VarDict")
# sys.stdout.write("Missing VarDict VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.vardict.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
#
# # Check Scalpel
# os.chdir("./Scalpel")
# sys.stdout.write("Missing Scalpel VCFs:\n")
# for sample in samples:
# if not os.path.isdir("{}-scalpel-output".format(sample)):
# sys.stdout.write("No Folder: {}\n".format(sample))
# else:
# os.chdir("{}-scalpel-output/".format(sample))
# if not os.path.isfile("variants.indel.vcf"):
# sys.stdout.write("No Variants file: {}\n".format(sample))
# os.chdir("..")
# os.chdir(root_dir)
#
# # Check FreeBayes
# os.chdir("./FreeBayes")
# sys.stdout.write("Missing FreeBayes VCFs:\n")
# for sample in samples:
# if not os.path.isfile("{}.freebayes.vcf".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
# Check GEMINI Databases
# os.chdir("./GEMINI")
# sys.stdout.write("Missing GEMINI databases:\n")
# for sample in samples:
# if not os.path.isfile("{}.snpEff.GRCh37.75.db".format(sample)):
# sys.stdout.write("{}\n".format(sample))
# os.chdir(root_dir)
|
|
4ffab859f2798db504d47a98b525aba200e03c7a
|
imager/ImagerProfile/migrations/0003_auto_20150303_1301.py
|
imager/ImagerProfile/migrations/0003_auto_20150303_1301.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('ImagerProfile', '0002_auto_20150303_0721'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
Add migration for changes to ImagerProfile user related name
|
Add migration for changes to ImagerProfile user related name
|
Python
|
mit
|
nbeck90/django-imager,nbeck90/django-imager
|
Add migration for changes to ImagerProfile user related name
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('ImagerProfile', '0002_auto_20150303_0721'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for changes to ImagerProfile user related name<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('ImagerProfile', '0002_auto_20150303_0721'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
Add migration for changes to ImagerProfile user related name# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('ImagerProfile', '0002_auto_20150303_0721'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for changes to ImagerProfile user related name<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('ImagerProfile', '0002_auto_20150303_0721'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
|
ea27d19cb30b05d66256115753e82156d5a6d69d
|
openedx/tests/xblock_integration/test_external_xblocks.py
|
openedx/tests/xblock_integration/test_external_xblocks.py
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
Allow us to run tests from external XBlock repositories
|
Allow us to run tests from external XBlock repositories
|
Python
|
agpl-3.0
|
ampax/edx-platform,teltek/edx-platform,chrisndodge/edx-platform,antoviaque/edx-platform,deepsrijit1105/edx-platform,proversity-org/edx-platform,doganov/edx-platform,hastexo/edx-platform,angelapper/edx-platform,naresh21/synergetics-edx-platform,Stanford-Online/edx-platform,msegado/edx-platform,ovnicraft/edx-platform,appsembler/edx-platform,pabloborrego93/edx-platform,Ayub-Khan/edx-platform,devs1991/test_edx_docmode,defance/edx-platform,longmen21/edx-platform,miptliot/edx-platform,CredoReference/edx-platform,amir-qayyum-khan/edx-platform,IndonesiaX/edx-platform,kmoocdev2/edx-platform,Edraak/edraak-platform,Edraak/edraak-platform,devs1991/test_edx_docmode,arbrandes/edx-platform,jjmiranda/edx-platform,longmen21/edx-platform,amir-qayyum-khan/edx-platform,ahmedaljazzar/edx-platform,MakeHer/edx-platform,JioEducation/edx-platform,TeachAtTUM/edx-platform,BehavioralInsightsTeam/edx-platform,stvstnfrd/edx-platform,Endika/edx-platform,cpennington/edx-platform,TeachAtTUM/edx-platform,CredoReference/edx-platform,tanmaykm/edx-platform,hastexo/edx-platform,miptliot/edx-platform,marcore/edx-platform,wwj718/edx-platform,a-parhom/edx-platform,solashirai/edx-platform,alu042/edx-platform,analyseuc3m/ANALYSE-v1,ovnicraft/edx-platform,wwj718/edx-platform,amir-qayyum-khan/edx-platform,stvstnfrd/edx-platform,franosincic/edx-platform,UOMx/edx-platform,angelapper/edx-platform,RPI-OPENEDX/edx-platform,ampax/edx-platform,romain-li/edx-platform,gsehub/edx-platform,caesar2164/edx-platform,louyihua/edx-platform,Ayub-Khan/edx-platform,kmoocdev2/edx-platform,naresh21/synergetics-edx-platform,ahmedaljazzar/edx-platform,pabloborrego93/edx-platform,raccoongang/edx-platform,philanthropy-u/edx-platform,Lektorium-LLC/edx-platform,CourseTalk/edx-platform,jjmiranda/edx-platform,CredoReference/edx-platform,ahmedaljazzar/edx-platform,caesar2164/edx-platform,10clouds/edx-platform,wwj718/edx-platform,tanmaykm/edx-platform,edx/edx-platform,doganov/edx-platform,pepeportela/edx-platform,cecep-edu/edx-platform,EDUlib/edx-platform,miptliot/edx-platform,alu042/edx-platform,marcore/edx-platform,RPI-OPENEDX/edx-platform,mitocw/edx-platform,TeachAtTUM/edx-platform,romain-li/edx-platform,arbrandes/edx-platform,mbareta/edx-platform-ft,angelapper/edx-platform,IndonesiaX/edx-platform,appsembler/edx-platform,longmen21/edx-platform,a-parhom/edx-platform,chrisndodge/edx-platform,fintech-circle/edx-platform,devs1991/test_edx_docmode,franosincic/edx-platform,MakeHer/edx-platform,marcore/edx-platform,msegado/edx-platform,cecep-edu/edx-platform,teltek/edx-platform,analyseuc3m/ANALYSE-v1,BehavioralInsightsTeam/edx-platform,synergeticsedx/deployment-wipro,devs1991/test_edx_docmode,ESOedX/edx-platform,antoviaque/edx-platform,cpennington/edx-platform,ESOedX/edx-platform,synergeticsedx/deployment-wipro,MakeHer/edx-platform,appsembler/edx-platform,Ayub-Khan/edx-platform,chrisndodge/edx-platform,Stanford-Online/edx-platform,shabab12/edx-platform,solashirai/edx-platform,eduNEXT/edunext-platform,louyihua/edx-platform,alu042/edx-platform,cpennington/edx-platform,shabab12/edx-platform,jjmiranda/edx-platform,gsehub/edx-platform,eduNEXT/edunext-platform,pepeportela/edx-platform,devs1991/test_edx_docmode,mitocw/edx-platform,CredoReference/edx-platform,CourseTalk/edx-platform,edx-solutions/edx-platform,edx/edx-platform,analyseuc3m/ANALYSE-v1,kmoocdev2/edx-platform,philanthropy-u/edx-platform,caesar2164/edx-platform,arbrandes/edx-platform,fintech-circle/edx-platform,eduNEXT/edx-platform,analyseuc3m/ANALYSE-v1,Livit/Livit.Learn.EdX,Edraak/edraak-platform,Endika/edx-platform,RPI-OPENEDX/edx-platform,MakeHer/edx-platform,amir-qayyum-khan/edx-platform,raccoongang/edx-platform,antoviaque/edx-platform,synergeticsedx/deployment-wipro,solashirai/edx-platform,mitocw/edx-platform,ampax/edx-platform,wwj718/edx-platform,romain-li/edx-platform,eduNEXT/edunext-platform,cecep-edu/edx-platform,ESOedX/edx-platform,lduarte1991/edx-platform,cpennington/edx-platform,gsehub/edx-platform,Ayub-Khan/edx-platform,arbrandes/edx-platform,hastexo/edx-platform,JioEducation/edx-platform,prarthitm/edxplatform,cecep-edu/edx-platform,prarthitm/edxplatform,Livit/Livit.Learn.EdX,teltek/edx-platform,naresh21/synergetics-edx-platform,caesar2164/edx-platform,edx-solutions/edx-platform,CourseTalk/edx-platform,raccoongang/edx-platform,stvstnfrd/edx-platform,stvstnfrd/edx-platform,edx-solutions/edx-platform,itsjeyd/edx-platform,shabab12/edx-platform,naresh21/synergetics-edx-platform,mbareta/edx-platform-ft,franosincic/edx-platform,deepsrijit1105/edx-platform,appsembler/edx-platform,hastexo/edx-platform,mbareta/edx-platform-ft,msegado/edx-platform,lduarte1991/edx-platform,pabloborrego93/edx-platform,romain-li/edx-platform,jzoldak/edx-platform,louyihua/edx-platform,Lektorium-LLC/edx-platform,defance/edx-platform,devs1991/test_edx_docmode,ovnicraft/edx-platform,jzoldak/edx-platform,BehavioralInsightsTeam/edx-platform,fintech-circle/edx-platform,proversity-org/edx-platform,philanthropy-u/edx-platform,IndonesiaX/edx-platform,angelapper/edx-platform,pepeportela/edx-platform,pabloborrego93/edx-platform,Livit/Livit.Learn.EdX,raccoongang/edx-platform,a-parhom/edx-platform,marcore/edx-platform,BehavioralInsightsTeam/edx-platform,jzoldak/edx-platform,Endika/edx-platform,jjmiranda/edx-platform,RPI-OPENEDX/edx-platform,jzoldak/edx-platform,waheedahmed/edx-platform,doganov/edx-platform,UOMx/edx-platform,JioEducation/edx-platform,louyihua/edx-platform,gsehub/edx-platform,edx-solutions/edx-platform,Lektorium-LLC/edx-platform,UOMx/edx-platform,msegado/edx-platform,JioEducation/edx-platform,itsjeyd/edx-platform,tanmaykm/edx-platform,ampax/edx-platform,UOMx/edx-platform,waheedahmed/edx-platform,eduNEXT/edx-platform,a-parhom/edx-platform,philanthropy-u/edx-platform,proversity-org/edx-platform,MakeHer/edx-platform,waheedahmed/edx-platform,edx/edx-platform,procangroup/edx-platform,longmen21/edx-platform,proversity-org/edx-platform,Endika/edx-platform,Stanford-Online/edx-platform,fintech-circle/edx-platform,lduarte1991/edx-platform,kmoocdev2/edx-platform,procangroup/edx-platform,chrisndodge/edx-platform,itsjeyd/edx-platform,eduNEXT/edx-platform,edx/edx-platform,doganov/edx-platform,wwj718/edx-platform,mitocw/edx-platform,jolyonb/edx-platform,10clouds/edx-platform,franosincic/edx-platform,teltek/edx-platform,deepsrijit1105/edx-platform,defance/edx-platform,ovnicraft/edx-platform,tanmaykm/edx-platform,RPI-OPENEDX/edx-platform,solashirai/edx-platform,shabab12/edx-platform,synergeticsedx/deployment-wipro,ESOedX/edx-platform,devs1991/test_edx_docmode,waheedahmed/edx-platform,10clouds/edx-platform,solashirai/edx-platform,IndonesiaX/edx-platform,10clouds/edx-platform,gymnasium/edx-platform,Ayub-Khan/edx-platform,eduNEXT/edx-platform,EDUlib/edx-platform,jolyonb/edx-platform,waheedahmed/edx-platform,CourseTalk/edx-platform,EDUlib/edx-platform,ahmedaljazzar/edx-platform,jolyonb/edx-platform,mbareta/edx-platform-ft,gymnasium/edx-platform,Stanford-Online/edx-platform,prarthitm/edxplatform,miptliot/edx-platform,Lektorium-LLC/edx-platform,gymnasium/edx-platform,eduNEXT/edunext-platform,cecep-edu/edx-platform,jolyonb/edx-platform,Livit/Livit.Learn.EdX,lduarte1991/edx-platform,alu042/edx-platform,TeachAtTUM/edx-platform,romain-li/edx-platform,antoviaque/edx-platform,Edraak/edraak-platform,itsjeyd/edx-platform,deepsrijit1105/edx-platform,ovnicraft/edx-platform,devs1991/test_edx_docmode,franosincic/edx-platform,msegado/edx-platform,gymnasium/edx-platform,procangroup/edx-platform,doganov/edx-platform,procangroup/edx-platform,EDUlib/edx-platform,kmoocdev2/edx-platform,pepeportela/edx-platform,longmen21/edx-platform,IndonesiaX/edx-platform,defance/edx-platform,prarthitm/edxplatform
|
Allow us to run tests from external XBlock repositories
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
<commit_before><commit_msg>Allow us to run tests from external XBlock repositories<commit_after>
|
"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
Allow us to run tests from external XBlock repositories"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
<commit_before><commit_msg>Allow us to run tests from external XBlock repositories<commit_after>"""
This will run tests on all XBlocks in the `xblock.test.v0`
entrypoint. Did you notice something about that entry point? It ends
with a v0. That means this is not finished. At some point, we might
stop running v0 tests, replacing them with test case failures, and
run v1 tests only.
That be the dragon here.
"""
import pkg_resources
class DuplicateXBlockTest(Exception):
'''
This exception is shown if there are multiple entry points with the same
class name for a test. In most cases, this means you have two versions
of the same XBlock installed, or two XBlocks with namespace collisions. In
either case, it'd be nice to resolve (likely by renaming tests as they
come in, hopefully still being careful to catch collisions which might
effect deployed XBlocks. See discussion at:
https://github.com/edx/edx-platform/pull/11032#discussion_r48097392).
'''
pass
class InvalidTestName(Exception):
'''
This means you have an entry point for a test that does not correspond
to a properly named test class. For example, if you cut-and-paste entry
points in `setup.py`, and forgot to repoint the class (so it points to
`DoneXBlock` instead of `TestDone`), or otherwise made an error, you
will see this exception.
'''
pass
xblock_loaded = False # pylint: disable=invalid-name
for entrypoint in pkg_resources.iter_entry_points(group="xblock.test.v0"): # pylint: disable=no-member
plugin = entrypoint.load()
classname = plugin.__name__
if classname in globals():
raise DuplicateXBlockTest(classname)
if not classname.startswith("Test"):
raise InvalidTestName("Test class should start with 'Test': " + classname)
# This should never happen, but while we're testing for class name
# validity, we figured it was okay to be a little overly defensive.
# See discussion at:
# https://github.com/edx/edx-platform/pull/11032#discussion_r48097392
if not classname.replace("_", "").isalnum():
raise InvalidTestName("Python variables should be letters, numbers, and underscores: " + classname)
globals()[classname] = plugin
print "Loading XBlock test: " + classname
xblock_loaded = True
|
|
3559063b9110aa04b773dfc56db05297e4ce472f
|
migrations/versions/0249_another_letter_org.py
|
migrations/versions/0249_another_letter_org.py
|
"""empty message
Revision ID: 0249_another_letter_org
Revises: 0248_enable_choose_postage
"""
# revision identifiers, used by Alembic.
revision = '0249_another_letter_org'
down_revision = '0248_enable_choose_postage'
from alembic import op
NEW_ORGANISATIONS = [
('521', 'North Somerset Council', 'north-somerset'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for North Somerset council
|
Add letter logo for North Somerset council
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter logo for North Somerset council
|
"""empty message
Revision ID: 0249_another_letter_org
Revises: 0248_enable_choose_postage
"""
# revision identifiers, used by Alembic.
revision = '0249_another_letter_org'
down_revision = '0248_enable_choose_postage'
from alembic import op
NEW_ORGANISATIONS = [
('521', 'North Somerset Council', 'north-somerset'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for North Somerset council<commit_after>
|
"""empty message
Revision ID: 0249_another_letter_org
Revises: 0248_enable_choose_postage
"""
# revision identifiers, used by Alembic.
revision = '0249_another_letter_org'
down_revision = '0248_enable_choose_postage'
from alembic import op
NEW_ORGANISATIONS = [
('521', 'North Somerset Council', 'north-somerset'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logo for North Somerset council"""empty message
Revision ID: 0249_another_letter_org
Revises: 0248_enable_choose_postage
"""
# revision identifiers, used by Alembic.
revision = '0249_another_letter_org'
down_revision = '0248_enable_choose_postage'
from alembic import op
NEW_ORGANISATIONS = [
('521', 'North Somerset Council', 'north-somerset'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logo for North Somerset council<commit_after>"""empty message
Revision ID: 0249_another_letter_org
Revises: 0248_enable_choose_postage
"""
# revision identifiers, used by Alembic.
revision = '0249_another_letter_org'
down_revision = '0248_enable_choose_postage'
from alembic import op
NEW_ORGANISATIONS = [
('521', 'North Somerset Council', 'north-somerset'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
85f179a4d515801c4cfcb35e198efe268959d0ba
|
test/trainer_test.py
|
test/trainer_test.py
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
t0, v0 = next(trainer)
t1, v1 = next(trainer)
t2, v2 = next(trainer)
assert t2['loss'] < t0['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
train, valid = next(trainer)
assert train['loss'] < valid['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
Test 1st training loss improves over 0th validation.
|
Test 1st training loss improves over 0th validation.
|
Python
|
mit
|
devdoer/theanets,chrinide/theanets,lmjohns3/theanets
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
t0, v0 = next(trainer)
t1, v1 = next(trainer)
t2, v2 = next(trainer)
assert t2['loss'] < t0['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
Test 1st training loss improves over 0th validation.
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
train, valid = next(trainer)
assert train['loss'] < valid['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
t0, v0 = next(trainer)
t1, v1 = next(trainer)
t2, v2 = next(trainer)
assert t2['loss'] < t0['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
<commit_msg>Test 1st training loss improves over 0th validation.<commit_after>
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
train, valid = next(trainer)
assert train['loss'] < valid['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
t0, v0 = next(trainer)
t1, v1 = next(trainer)
t2, v2 = next(trainer)
assert t2['loss'] < t0['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
Test 1st training loss improves over 0th validation.import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
train, valid = next(trainer)
assert train['loss'] < valid['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
<commit_before>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
t0, v0 = next(trainer)
t1, v1 = next(trainer)
t2, v2 = next(trainer)
assert t2['loss'] < t0['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
<commit_msg>Test 1st training loss improves over 0th validation.<commit_after>import theanets
import util
class TestTrainer(util.MNIST):
def setUp(self):
super(TestTrainer, self).setUp()
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, self.DIGIT_SIZE))
def assert_progress(self, algo, **kwargs):
trainer = self.exp.itertrain(self.images, optimize=algo, **kwargs)
train, valid = next(trainer)
assert train['loss'] < valid['loss']
def test_sgd(self):
self.assert_progress('sgd', learning_rate=1e-4)
def test_nag(self):
self.assert_progress('nag', learning_rate=1e-4)
def test_rprop(self):
self.assert_progress('rprop', learning_rate=1e-4)
def test_rmsprop(self):
self.assert_progress('rmsprop', learning_rate=1e-4)
def test_adadelta(self):
self.assert_progress('adadelta', learning_rate=1e-4)
def test_cg(self):
self.assert_progress('cg')
def test_layerwise(self):
self.exp = theanets.Experiment(
theanets.Autoencoder,
layers=(self.DIGIT_SIZE, 10, 10, self.DIGIT_SIZE))
self.assert_progress('layerwise')
|
d67f6e9e96352e54e4b23e1740e90d009dac3b36
|
scripts/generate_match_message.py
|
scripts/generate_match_message.py
|
from django.utils import translation
from django.template.loader import get_template
from bluebottle.activities.tasks import get_matching_activities
from bluebottle.members.models import Member
import logging
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.activities.messages import MatchingActivitiesNotification
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
def run(*args):
for tenant in Client.objects.filter(name='GoodUp Demo'):
with LocalTenant(tenant, clear_tenant=True):
translation.activate('en')
user = Member.objects.get(pk=112)
activities = get_matching_activities(user)
notification = MatchingActivitiesNotification(user)
context = notification.get_context(user, activities=activities)
print(
get_template(
'mails/{0}.html'.format(notification.template)
).render(context)
)
|
Add script that generate a message
|
Add script that generate a message
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add script that generate a message
|
from django.utils import translation
from django.template.loader import get_template
from bluebottle.activities.tasks import get_matching_activities
from bluebottle.members.models import Member
import logging
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.activities.messages import MatchingActivitiesNotification
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
def run(*args):
for tenant in Client.objects.filter(name='GoodUp Demo'):
with LocalTenant(tenant, clear_tenant=True):
translation.activate('en')
user = Member.objects.get(pk=112)
activities = get_matching_activities(user)
notification = MatchingActivitiesNotification(user)
context = notification.get_context(user, activities=activities)
print(
get_template(
'mails/{0}.html'.format(notification.template)
).render(context)
)
|
<commit_before><commit_msg>Add script that generate a message<commit_after>
|
from django.utils import translation
from django.template.loader import get_template
from bluebottle.activities.tasks import get_matching_activities
from bluebottle.members.models import Member
import logging
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.activities.messages import MatchingActivitiesNotification
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
def run(*args):
for tenant in Client.objects.filter(name='GoodUp Demo'):
with LocalTenant(tenant, clear_tenant=True):
translation.activate('en')
user = Member.objects.get(pk=112)
activities = get_matching_activities(user)
notification = MatchingActivitiesNotification(user)
context = notification.get_context(user, activities=activities)
print(
get_template(
'mails/{0}.html'.format(notification.template)
).render(context)
)
|
Add script that generate a messagefrom django.utils import translation
from django.template.loader import get_template
from bluebottle.activities.tasks import get_matching_activities
from bluebottle.members.models import Member
import logging
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.activities.messages import MatchingActivitiesNotification
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
def run(*args):
for tenant in Client.objects.filter(name='GoodUp Demo'):
with LocalTenant(tenant, clear_tenant=True):
translation.activate('en')
user = Member.objects.get(pk=112)
activities = get_matching_activities(user)
notification = MatchingActivitiesNotification(user)
context = notification.get_context(user, activities=activities)
print(
get_template(
'mails/{0}.html'.format(notification.template)
).render(context)
)
|
<commit_before><commit_msg>Add script that generate a message<commit_after>from django.utils import translation
from django.template.loader import get_template
from bluebottle.activities.tasks import get_matching_activities
from bluebottle.members.models import Member
import logging
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
from bluebottle.time_based.tests.factories import DateActivityFactory, PeriodActivityFactory
from bluebottle.activities.messages import MatchingActivitiesNotification
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
def run(*args):
for tenant in Client.objects.filter(name='GoodUp Demo'):
with LocalTenant(tenant, clear_tenant=True):
translation.activate('en')
user = Member.objects.get(pk=112)
activities = get_matching_activities(user)
notification = MatchingActivitiesNotification(user)
context = notification.get_context(user, activities=activities)
print(
get_template(
'mails/{0}.html'.format(notification.template)
).render(context)
)
|
|
67cddb074622343bd48ba4ad75bbdbe2d9b042dc
|
changeFileNameFromID.py
|
changeFileNameFromID.py
|
import os
import codecs
import re
EXT = "usfm"
SOURCEDIR = "."
PROJECT = "OA-GUJ"
bk = {"GEN":1, "EXO":2, "LEV":3, "NUM":4, "DEU":5, "JOS":6, "JDG":7, "RUT":8, "1SA":9, "2SA":10, "1KI":11, "2KI":12, "1CH":13, "2CH":14, "EZR":15, "NEH":16, "EST":17, "JOB":18, "PSA":19, "PRO":20, "ECC":21, "SNG":22, "ISA":23, "JER":24, "LAM":25, "EZE":26, "DAN":27, "HOS":28, "JOL":29, "AMO":30, "OBA":31, "JON":32, "MIC":33, "NAM":34, "HAB":35, "ZEP":36, "HAG":37, "ZEC":38, "MAL":39, "MAT":40, "MRK":41, "LUK":42, "JHN":43, "ACT":44, "ROM":45, "1CO":46, "2CO":47, "GAL":48, "EPH":49, "PHP":50, "COL":51, "1TH":52, "2TH":53, "1TI":54, "2TI":55, "TIT":56, "PHM":57, "HEB":58, "JAS":59, "1PE":60, "2PE":61, "1JN":62, "2JN":63, "3JN":64, "JUD":65, "REV": 66}
fileList = os.listdir(SOURCEDIR)
for fil in fileList:
b = fil.split(".")
if(b[1].lower() == EXT):
f = codecs.open(fil, mode = 'r', encoding = 'utf-8')
fc = f.read()
f.close()
try:
bkID = re.findall("\\id ([A-Z0-9]{3})", fc)[0]
fName = str(bk[bkID]+1).zfill(2) + "_" + bkID + PROJECT + "." + EXT
except:
fName=fil
o = codecs.open(fName, mode='w', encoding='utf-8')
o.write(fc)
o.close()
|
Change the name of usfm file according to ID
|
Change the name of usfm file according to ID
This script will change the name of a usfm file according to the ID information. Quite useful when users gives files with unconventional naming system.
|
Python
|
mit
|
beniza/learningPython
|
Change the name of usfm file according to ID
This script will change the name of a usfm file according to the ID information. Quite useful when users gives files with unconventional naming system.
|
import os
import codecs
import re
EXT = "usfm"
SOURCEDIR = "."
PROJECT = "OA-GUJ"
bk = {"GEN":1, "EXO":2, "LEV":3, "NUM":4, "DEU":5, "JOS":6, "JDG":7, "RUT":8, "1SA":9, "2SA":10, "1KI":11, "2KI":12, "1CH":13, "2CH":14, "EZR":15, "NEH":16, "EST":17, "JOB":18, "PSA":19, "PRO":20, "ECC":21, "SNG":22, "ISA":23, "JER":24, "LAM":25, "EZE":26, "DAN":27, "HOS":28, "JOL":29, "AMO":30, "OBA":31, "JON":32, "MIC":33, "NAM":34, "HAB":35, "ZEP":36, "HAG":37, "ZEC":38, "MAL":39, "MAT":40, "MRK":41, "LUK":42, "JHN":43, "ACT":44, "ROM":45, "1CO":46, "2CO":47, "GAL":48, "EPH":49, "PHP":50, "COL":51, "1TH":52, "2TH":53, "1TI":54, "2TI":55, "TIT":56, "PHM":57, "HEB":58, "JAS":59, "1PE":60, "2PE":61, "1JN":62, "2JN":63, "3JN":64, "JUD":65, "REV": 66}
fileList = os.listdir(SOURCEDIR)
for fil in fileList:
b = fil.split(".")
if(b[1].lower() == EXT):
f = codecs.open(fil, mode = 'r', encoding = 'utf-8')
fc = f.read()
f.close()
try:
bkID = re.findall("\\id ([A-Z0-9]{3})", fc)[0]
fName = str(bk[bkID]+1).zfill(2) + "_" + bkID + PROJECT + "." + EXT
except:
fName=fil
o = codecs.open(fName, mode='w', encoding='utf-8')
o.write(fc)
o.close()
|
<commit_before><commit_msg>Change the name of usfm file according to ID
This script will change the name of a usfm file according to the ID information. Quite useful when users gives files with unconventional naming system.<commit_after>
|
import os
import codecs
import re
EXT = "usfm"
SOURCEDIR = "."
PROJECT = "OA-GUJ"
bk = {"GEN":1, "EXO":2, "LEV":3, "NUM":4, "DEU":5, "JOS":6, "JDG":7, "RUT":8, "1SA":9, "2SA":10, "1KI":11, "2KI":12, "1CH":13, "2CH":14, "EZR":15, "NEH":16, "EST":17, "JOB":18, "PSA":19, "PRO":20, "ECC":21, "SNG":22, "ISA":23, "JER":24, "LAM":25, "EZE":26, "DAN":27, "HOS":28, "JOL":29, "AMO":30, "OBA":31, "JON":32, "MIC":33, "NAM":34, "HAB":35, "ZEP":36, "HAG":37, "ZEC":38, "MAL":39, "MAT":40, "MRK":41, "LUK":42, "JHN":43, "ACT":44, "ROM":45, "1CO":46, "2CO":47, "GAL":48, "EPH":49, "PHP":50, "COL":51, "1TH":52, "2TH":53, "1TI":54, "2TI":55, "TIT":56, "PHM":57, "HEB":58, "JAS":59, "1PE":60, "2PE":61, "1JN":62, "2JN":63, "3JN":64, "JUD":65, "REV": 66}
fileList = os.listdir(SOURCEDIR)
for fil in fileList:
b = fil.split(".")
if(b[1].lower() == EXT):
f = codecs.open(fil, mode = 'r', encoding = 'utf-8')
fc = f.read()
f.close()
try:
bkID = re.findall("\\id ([A-Z0-9]{3})", fc)[0]
fName = str(bk[bkID]+1).zfill(2) + "_" + bkID + PROJECT + "." + EXT
except:
fName=fil
o = codecs.open(fName, mode='w', encoding='utf-8')
o.write(fc)
o.close()
|
Change the name of usfm file according to ID
This script will change the name of a usfm file according to the ID information. Quite useful when users gives files with unconventional naming system.import os
import codecs
import re
EXT = "usfm"
SOURCEDIR = "."
PROJECT = "OA-GUJ"
bk = {"GEN":1, "EXO":2, "LEV":3, "NUM":4, "DEU":5, "JOS":6, "JDG":7, "RUT":8, "1SA":9, "2SA":10, "1KI":11, "2KI":12, "1CH":13, "2CH":14, "EZR":15, "NEH":16, "EST":17, "JOB":18, "PSA":19, "PRO":20, "ECC":21, "SNG":22, "ISA":23, "JER":24, "LAM":25, "EZE":26, "DAN":27, "HOS":28, "JOL":29, "AMO":30, "OBA":31, "JON":32, "MIC":33, "NAM":34, "HAB":35, "ZEP":36, "HAG":37, "ZEC":38, "MAL":39, "MAT":40, "MRK":41, "LUK":42, "JHN":43, "ACT":44, "ROM":45, "1CO":46, "2CO":47, "GAL":48, "EPH":49, "PHP":50, "COL":51, "1TH":52, "2TH":53, "1TI":54, "2TI":55, "TIT":56, "PHM":57, "HEB":58, "JAS":59, "1PE":60, "2PE":61, "1JN":62, "2JN":63, "3JN":64, "JUD":65, "REV": 66}
fileList = os.listdir(SOURCEDIR)
for fil in fileList:
b = fil.split(".")
if(b[1].lower() == EXT):
f = codecs.open(fil, mode = 'r', encoding = 'utf-8')
fc = f.read()
f.close()
try:
bkID = re.findall("\\id ([A-Z0-9]{3})", fc)[0]
fName = str(bk[bkID]+1).zfill(2) + "_" + bkID + PROJECT + "." + EXT
except:
fName=fil
o = codecs.open(fName, mode='w', encoding='utf-8')
o.write(fc)
o.close()
|
<commit_before><commit_msg>Change the name of usfm file according to ID
This script will change the name of a usfm file according to the ID information. Quite useful when users gives files with unconventional naming system.<commit_after>import os
import codecs
import re
EXT = "usfm"
SOURCEDIR = "."
PROJECT = "OA-GUJ"
bk = {"GEN":1, "EXO":2, "LEV":3, "NUM":4, "DEU":5, "JOS":6, "JDG":7, "RUT":8, "1SA":9, "2SA":10, "1KI":11, "2KI":12, "1CH":13, "2CH":14, "EZR":15, "NEH":16, "EST":17, "JOB":18, "PSA":19, "PRO":20, "ECC":21, "SNG":22, "ISA":23, "JER":24, "LAM":25, "EZE":26, "DAN":27, "HOS":28, "JOL":29, "AMO":30, "OBA":31, "JON":32, "MIC":33, "NAM":34, "HAB":35, "ZEP":36, "HAG":37, "ZEC":38, "MAL":39, "MAT":40, "MRK":41, "LUK":42, "JHN":43, "ACT":44, "ROM":45, "1CO":46, "2CO":47, "GAL":48, "EPH":49, "PHP":50, "COL":51, "1TH":52, "2TH":53, "1TI":54, "2TI":55, "TIT":56, "PHM":57, "HEB":58, "JAS":59, "1PE":60, "2PE":61, "1JN":62, "2JN":63, "3JN":64, "JUD":65, "REV": 66}
fileList = os.listdir(SOURCEDIR)
for fil in fileList:
b = fil.split(".")
if(b[1].lower() == EXT):
f = codecs.open(fil, mode = 'r', encoding = 'utf-8')
fc = f.read()
f.close()
try:
bkID = re.findall("\\id ([A-Z0-9]{3})", fc)[0]
fName = str(bk[bkID]+1).zfill(2) + "_" + bkID + PROJECT + "." + EXT
except:
fName=fil
o = codecs.open(fName, mode='w', encoding='utf-8')
o.write(fc)
o.close()
|
|
0264308667d2b64dbb089d4758258281b7ccbc1f
|
nlpdemo/lib_confirm_sample/confirm-openpyxl.py
|
nlpdemo/lib_confirm_sample/confirm-openpyxl.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws['A2'] = datetime.datetime.now()
# Save the file
wb.save("/tmp/sample.xlsx")
from openpyxl import load_workbook
wb2 = load_workbook('/tmp/sample.xlsx')
print(wb2.get_sheet_names())
|
Add lib confirm for openpyxl
|
Add lib confirm for openpyxl
|
Python
|
mit
|
demonstrationch/nlp-demo
|
Add lib confirm for openpyxl
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws['A2'] = datetime.datetime.now()
# Save the file
wb.save("/tmp/sample.xlsx")
from openpyxl import load_workbook
wb2 = load_workbook('/tmp/sample.xlsx')
print(wb2.get_sheet_names())
|
<commit_before><commit_msg>Add lib confirm for openpyxl<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws['A2'] = datetime.datetime.now()
# Save the file
wb.save("/tmp/sample.xlsx")
from openpyxl import load_workbook
wb2 = load_workbook('/tmp/sample.xlsx')
print(wb2.get_sheet_names())
|
Add lib confirm for openpyxl#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws['A2'] = datetime.datetime.now()
# Save the file
wb.save("/tmp/sample.xlsx")
from openpyxl import load_workbook
wb2 = load_workbook('/tmp/sample.xlsx')
print(wb2.get_sheet_names())
|
<commit_before><commit_msg>Add lib confirm for openpyxl<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from openpyxl import Workbook
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Python types will automatically be converted
import datetime
ws['A2'] = datetime.datetime.now()
# Save the file
wb.save("/tmp/sample.xlsx")
from openpyxl import load_workbook
wb2 = load_workbook('/tmp/sample.xlsx')
print(wb2.get_sheet_names())
|
|
bf138c0f268d470336c5f96b467e319b01303048
|
app/utils/utc.py
|
app/utils/utc.py
|
# UTC implementation borrowed from the Python docs:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
# License is the same as the example one.
from datetime import (
timedelta,
tzinfo,
)
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
|
Add a simple UTC tzinfo implementation.
|
Add a simple UTC tzinfo implementation.
* Python 2.x does not have any time zone info implemented.
By default we will treat time as UTC time, adjustments should
be done by the clients.
Here we introduce a very simple UTC Python tzinfo class borrowed
from the Python docs.
|
Python
|
lgpl-2.1
|
kernelci/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend,joyxu/kernelci-backend,kernelci/kernelci-backend
|
Add a simple UTC tzinfo implementation.
* Python 2.x does not have any time zone info implemented.
By default we will treat time as UTC time, adjustments should
be done by the clients.
Here we introduce a very simple UTC Python tzinfo class borrowed
from the Python docs.
|
# UTC implementation borrowed from the Python docs:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
# License is the same as the example one.
from datetime import (
timedelta,
tzinfo,
)
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
|
<commit_before><commit_msg>Add a simple UTC tzinfo implementation.
* Python 2.x does not have any time zone info implemented.
By default we will treat time as UTC time, adjustments should
be done by the clients.
Here we introduce a very simple UTC Python tzinfo class borrowed
from the Python docs.<commit_after>
|
# UTC implementation borrowed from the Python docs:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
# License is the same as the example one.
from datetime import (
timedelta,
tzinfo,
)
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
|
Add a simple UTC tzinfo implementation.
* Python 2.x does not have any time zone info implemented.
By default we will treat time as UTC time, adjustments should
be done by the clients.
Here we introduce a very simple UTC Python tzinfo class borrowed
from the Python docs.# UTC implementation borrowed from the Python docs:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
# License is the same as the example one.
from datetime import (
timedelta,
tzinfo,
)
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
|
<commit_before><commit_msg>Add a simple UTC tzinfo implementation.
* Python 2.x does not have any time zone info implemented.
By default we will treat time as UTC time, adjustments should
be done by the clients.
Here we introduce a very simple UTC Python tzinfo class borrowed
from the Python docs.<commit_after># UTC implementation borrowed from the Python docs:
# http://docs.python.org/2/library/datetime.html#tzinfo-objects
# License is the same as the example one.
from datetime import (
timedelta,
tzinfo,
)
ZERO = timedelta(0)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
|
|
e58bcd886829cc1b38b5f8383b0553136b709ace
|
.appveyor.test.py
|
.appveyor.test.py
|
from dmoj import judgeenv, executors
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style
required_executors = ['AWK', 'BF', 'C', 'CPP03', 'CPP11', 'CS', 'GO', 'PERL', 'PY2', 'PY3',
'RUBY19', 'RUBY21', 'SED', 'VB']
def main():
judgeenv.load_env(cli=True, testsuite=True)
# Emulate ANSI colors with colorama
__import__('colorama').init()
executors.load_executors()
executor_fail = not all(name in executors.executors for name in required_executors)
if executor_fail:
print ansi_style('#ansi[A required executor failed to load.](red|bold)')
else:
print ansi_style('#ansi[All required executors loaded successfully.](green|bold)')
print
tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
fails = tester.test_all()
print
print 'Test complete'
if fails:
print ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails
else:
print ansi_style('#ansi[All cases passed.](green|bold)')
raise SystemExit(int(executor_fail or fails != 0))
if __name__ == '__main__':
main()
|
Check that all required executors are loaded on AppVeyor
|
Check that all required executors are loaded on AppVeyor
|
Python
|
agpl-3.0
|
DMOJ/judge,DMOJ/judge,DMOJ/judge
|
Check that all required executors are loaded on AppVeyor
|
from dmoj import judgeenv, executors
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style
required_executors = ['AWK', 'BF', 'C', 'CPP03', 'CPP11', 'CS', 'GO', 'PERL', 'PY2', 'PY3',
'RUBY19', 'RUBY21', 'SED', 'VB']
def main():
judgeenv.load_env(cli=True, testsuite=True)
# Emulate ANSI colors with colorama
__import__('colorama').init()
executors.load_executors()
executor_fail = not all(name in executors.executors for name in required_executors)
if executor_fail:
print ansi_style('#ansi[A required executor failed to load.](red|bold)')
else:
print ansi_style('#ansi[All required executors loaded successfully.](green|bold)')
print
tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
fails = tester.test_all()
print
print 'Test complete'
if fails:
print ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails
else:
print ansi_style('#ansi[All cases passed.](green|bold)')
raise SystemExit(int(executor_fail or fails != 0))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Check that all required executors are loaded on AppVeyor<commit_after>
|
from dmoj import judgeenv, executors
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style
required_executors = ['AWK', 'BF', 'C', 'CPP03', 'CPP11', 'CS', 'GO', 'PERL', 'PY2', 'PY3',
'RUBY19', 'RUBY21', 'SED', 'VB']
def main():
judgeenv.load_env(cli=True, testsuite=True)
# Emulate ANSI colors with colorama
__import__('colorama').init()
executors.load_executors()
executor_fail = not all(name in executors.executors for name in required_executors)
if executor_fail:
print ansi_style('#ansi[A required executor failed to load.](red|bold)')
else:
print ansi_style('#ansi[All required executors loaded successfully.](green|bold)')
print
tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
fails = tester.test_all()
print
print 'Test complete'
if fails:
print ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails
else:
print ansi_style('#ansi[All cases passed.](green|bold)')
raise SystemExit(int(executor_fail or fails != 0))
if __name__ == '__main__':
main()
|
Check that all required executors are loaded on AppVeyorfrom dmoj import judgeenv, executors
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style
required_executors = ['AWK', 'BF', 'C', 'CPP03', 'CPP11', 'CS', 'GO', 'PERL', 'PY2', 'PY3',
'RUBY19', 'RUBY21', 'SED', 'VB']
def main():
judgeenv.load_env(cli=True, testsuite=True)
# Emulate ANSI colors with colorama
__import__('colorama').init()
executors.load_executors()
executor_fail = not all(name in executors.executors for name in required_executors)
if executor_fail:
print ansi_style('#ansi[A required executor failed to load.](red|bold)')
else:
print ansi_style('#ansi[All required executors loaded successfully.](green|bold)')
print
tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
fails = tester.test_all()
print
print 'Test complete'
if fails:
print ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails
else:
print ansi_style('#ansi[All cases passed.](green|bold)')
raise SystemExit(int(executor_fail or fails != 0))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Check that all required executors are loaded on AppVeyor<commit_after>from dmoj import judgeenv, executors
from dmoj.testsuite import Tester
from dmoj.utils.ansi import ansi_style
required_executors = ['AWK', 'BF', 'C', 'CPP03', 'CPP11', 'CS', 'GO', 'PERL', 'PY2', 'PY3',
'RUBY19', 'RUBY21', 'SED', 'VB']
def main():
judgeenv.load_env(cli=True, testsuite=True)
# Emulate ANSI colors with colorama
__import__('colorama').init()
executors.load_executors()
executor_fail = not all(name in executors.executors for name in required_executors)
if executor_fail:
print ansi_style('#ansi[A required executor failed to load.](red|bold)')
else:
print ansi_style('#ansi[All required executors loaded successfully.](green|bold)')
print
tester = Tester(judgeenv.problem_regex, judgeenv.case_regex)
fails = tester.test_all()
print
print 'Test complete'
if fails:
print ansi_style('#ansi[A total of %d case(s) failed](red|bold).') % fails
else:
print ansi_style('#ansi[All cases passed.](green|bold)')
raise SystemExit(int(executor_fail or fails != 0))
if __name__ == '__main__':
main()
|
|
b33b919a15f335d93229bc80bd0f834ebab0c03d
|
scripts/python/plot_signal.py
|
scripts/python/plot_signal.py
|
"""
A script to plot signals
"""
import sys
import os
import getopt
import json
import matplotlib.pyplot as plt
CMD_USAGE = """
"""
MAX_PLOT_SIZE = 5000
def plot_signal(signal):
with open(signal, "rb") as json_file:
json_data = json.load(json_file)
beacons = json_data['interestedBeacons']
legends = []
print "Plotting ", signal
for beacon in beacons:
signal = beacon['rssis']
size = len(signal)
if size > MAX_PLOT_SIZE:
size = MAX_PLOT_SIZE
signal = signal[:size]
x = range(1, size+1)
plt.plot(x, signal)
legends.append('{0} m'.format(beacon['realDistance']))
plt.legend(legends)
plt.show()
def plot_signals(signal_files):
for signal in signal_files:
plot_signal(signal)
def main(argv):
"""
:param argv:
:return:
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:",
["help", "inputs="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ""
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
print "Input data folder: ", inputs
signals = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith(".raw") or name.endswith(".raw_filter"):
signals.append(os.path.abspath(os.path.join(root, name)))
if len(signals) == 0:
print "There is no data to plot."
else:
plot_signals(signals)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add script to plot signal figure.
|
Add script to plot signal figure.
|
Python
|
bsd-3-clause
|
Wayne82/libsvm-practice,Wayne82/libsvm-practice,Wayne82/libsvm-practice
|
Add script to plot signal figure.
|
"""
A script to plot signals
"""
import sys
import os
import getopt
import json
import matplotlib.pyplot as plt
CMD_USAGE = """
"""
MAX_PLOT_SIZE = 5000
def plot_signal(signal):
with open(signal, "rb") as json_file:
json_data = json.load(json_file)
beacons = json_data['interestedBeacons']
legends = []
print "Plotting ", signal
for beacon in beacons:
signal = beacon['rssis']
size = len(signal)
if size > MAX_PLOT_SIZE:
size = MAX_PLOT_SIZE
signal = signal[:size]
x = range(1, size+1)
plt.plot(x, signal)
legends.append('{0} m'.format(beacon['realDistance']))
plt.legend(legends)
plt.show()
def plot_signals(signal_files):
for signal in signal_files:
plot_signal(signal)
def main(argv):
"""
:param argv:
:return:
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:",
["help", "inputs="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ""
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
print "Input data folder: ", inputs
signals = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith(".raw") or name.endswith(".raw_filter"):
signals.append(os.path.abspath(os.path.join(root, name)))
if len(signals) == 0:
print "There is no data to plot."
else:
plot_signals(signals)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script to plot signal figure.<commit_after>
|
"""
A script to plot signals
"""
import sys
import os
import getopt
import json
import matplotlib.pyplot as plt
CMD_USAGE = """
"""
MAX_PLOT_SIZE = 5000
def plot_signal(signal):
with open(signal, "rb") as json_file:
json_data = json.load(json_file)
beacons = json_data['interestedBeacons']
legends = []
print "Plotting ", signal
for beacon in beacons:
signal = beacon['rssis']
size = len(signal)
if size > MAX_PLOT_SIZE:
size = MAX_PLOT_SIZE
signal = signal[:size]
x = range(1, size+1)
plt.plot(x, signal)
legends.append('{0} m'.format(beacon['realDistance']))
plt.legend(legends)
plt.show()
def plot_signals(signal_files):
for signal in signal_files:
plot_signal(signal)
def main(argv):
"""
:param argv:
:return:
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:",
["help", "inputs="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ""
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
print "Input data folder: ", inputs
signals = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith(".raw") or name.endswith(".raw_filter"):
signals.append(os.path.abspath(os.path.join(root, name)))
if len(signals) == 0:
print "There is no data to plot."
else:
plot_signals(signals)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Add script to plot signal figure."""
A script to plot signals
"""
import sys
import os
import getopt
import json
import matplotlib.pyplot as plt
CMD_USAGE = """
"""
MAX_PLOT_SIZE = 5000
def plot_signal(signal):
with open(signal, "rb") as json_file:
json_data = json.load(json_file)
beacons = json_data['interestedBeacons']
legends = []
print "Plotting ", signal
for beacon in beacons:
signal = beacon['rssis']
size = len(signal)
if size > MAX_PLOT_SIZE:
size = MAX_PLOT_SIZE
signal = signal[:size]
x = range(1, size+1)
plt.plot(x, signal)
legends.append('{0} m'.format(beacon['realDistance']))
plt.legend(legends)
plt.show()
def plot_signals(signal_files):
for signal in signal_files:
plot_signal(signal)
def main(argv):
"""
:param argv:
:return:
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:",
["help", "inputs="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ""
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
print "Input data folder: ", inputs
signals = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith(".raw") or name.endswith(".raw_filter"):
signals.append(os.path.abspath(os.path.join(root, name)))
if len(signals) == 0:
print "There is no data to plot."
else:
plot_signals(signals)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add script to plot signal figure.<commit_after>"""
A script to plot signals
"""
import sys
import os
import getopt
import json
import matplotlib.pyplot as plt
CMD_USAGE = """
"""
MAX_PLOT_SIZE = 5000
def plot_signal(signal):
with open(signal, "rb") as json_file:
json_data = json.load(json_file)
beacons = json_data['interestedBeacons']
legends = []
print "Plotting ", signal
for beacon in beacons:
signal = beacon['rssis']
size = len(signal)
if size > MAX_PLOT_SIZE:
size = MAX_PLOT_SIZE
signal = signal[:size]
x = range(1, size+1)
plt.plot(x, signal)
legends.append('{0} m'.format(beacon['realDistance']))
plt.legend(legends)
plt.show()
def plot_signals(signal_files):
for signal in signal_files:
plot_signal(signal)
def main(argv):
"""
:param argv:
:return:
"""
try:
optlist, _ = getopt.getopt(argv[1:], "hi:",
["help", "inputs="])
except getopt.GetoptError:
print("Command line arguments error, please try --help for help")
return 1
inputs = ""
for opt, opt_arg in optlist:
if opt in ("-h", "--help"):
print CMD_USAGE
return 0
if opt in ("-i", "--inputs"):
inputs = opt_arg
print "Input data folder: ", inputs
signals = []
for root, _, files in os.walk(inputs):
for name in files:
if name.endswith(".raw") or name.endswith(".raw_filter"):
signals.append(os.path.abspath(os.path.join(root, name)))
if len(signals) == 0:
print "There is no data to plot."
else:
plot_signals(signals)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
a9507a8187f72cbf77889ee87b01a54f2ed4ffe9
|
examples/crawler.py
|
examples/crawler.py
|
import guv
guv.monkey_patch()
import requests
def get_url(url):
print('get_url()')
return requests.get(url)
def main():
urls = ['http://httpbin.org/delay/1'] * 10
pool = guv.GreenPool()
results = pool.imap(get_url, urls)
for i, resp in enumerate(results):
print('{}: done, length: {}'.format(i, len(resp.text)))
if __name__ == '__main__':
main()
|
Add simple example web cralwer
|
Add simple example web cralwer
|
Python
|
mit
|
veegee/guv,veegee/guv
|
Add simple example web cralwer
|
import guv
guv.monkey_patch()
import requests
def get_url(url):
print('get_url()')
return requests.get(url)
def main():
urls = ['http://httpbin.org/delay/1'] * 10
pool = guv.GreenPool()
results = pool.imap(get_url, urls)
for i, resp in enumerate(results):
print('{}: done, length: {}'.format(i, len(resp.text)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple example web cralwer<commit_after>
|
import guv
guv.monkey_patch()
import requests
def get_url(url):
print('get_url()')
return requests.get(url)
def main():
urls = ['http://httpbin.org/delay/1'] * 10
pool = guv.GreenPool()
results = pool.imap(get_url, urls)
for i, resp in enumerate(results):
print('{}: done, length: {}'.format(i, len(resp.text)))
if __name__ == '__main__':
main()
|
Add simple example web cralwerimport guv
guv.monkey_patch()
import requests
def get_url(url):
print('get_url()')
return requests.get(url)
def main():
urls = ['http://httpbin.org/delay/1'] * 10
pool = guv.GreenPool()
results = pool.imap(get_url, urls)
for i, resp in enumerate(results):
print('{}: done, length: {}'.format(i, len(resp.text)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple example web cralwer<commit_after>import guv
guv.monkey_patch()
import requests
def get_url(url):
print('get_url()')
return requests.get(url)
def main():
urls = ['http://httpbin.org/delay/1'] * 10
pool = guv.GreenPool()
results = pool.imap(get_url, urls)
for i, resp in enumerate(results):
print('{}: done, length: {}'.format(i, len(resp.text)))
if __name__ == '__main__':
main()
|
|
4a6eb1059f2321b2e54edc6bb614dca7d06c186f
|
CodeFights/isIPv4Address.py
|
CodeFights/isIPv4Address.py
|
#!/usr/local/bin/python
# Code Fights Is IPv4 Address Problem
def isIPv4Address(inputString):
import re
pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
match = re.search(pattern, inputString)
if match:
segments = inputString.split(".")
return sum([int(x) >= 0 and int(x) <= 255 for x in segments]) == 4
return False
def main():
tests = [
["172.16.254.1", True],
["172.316.254.1", False],
[".254.255.0", False],
["1.1.1.1a", False],
["1", False],
["0.254.255.0", True],
["1.23.256.255.", False],
["1.23.256..", False],
["0..1.0", False],
["1.1.1.1.1", False],
["1.256.1.1", False],
["a0.1.1.1", False],
["0.1.1.256", False],
["129380129831213981.255.255.255", False],
["255.255.255.255abcdekjhf", False],
["7283728", False]
]
for t in tests:
res = isIPv4Address(t[0])
if t[1] == res:
print("PASSED: isIPv4Address({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isIPv4Address({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights is IPv4 address problem
|
Solve Code Fights is IPv4 address problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights is IPv4 address problem
|
#!/usr/local/bin/python
# Code Fights Is IPv4 Address Problem
def isIPv4Address(inputString):
import re
pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
match = re.search(pattern, inputString)
if match:
segments = inputString.split(".")
return sum([int(x) >= 0 and int(x) <= 255 for x in segments]) == 4
return False
def main():
tests = [
["172.16.254.1", True],
["172.316.254.1", False],
[".254.255.0", False],
["1.1.1.1a", False],
["1", False],
["0.254.255.0", True],
["1.23.256.255.", False],
["1.23.256..", False],
["0..1.0", False],
["1.1.1.1.1", False],
["1.256.1.1", False],
["a0.1.1.1", False],
["0.1.1.256", False],
["129380129831213981.255.255.255", False],
["255.255.255.255abcdekjhf", False],
["7283728", False]
]
for t in tests:
res = isIPv4Address(t[0])
if t[1] == res:
print("PASSED: isIPv4Address({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isIPv4Address({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is IPv4 address problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Is IPv4 Address Problem
def isIPv4Address(inputString):
import re
pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
match = re.search(pattern, inputString)
if match:
segments = inputString.split(".")
return sum([int(x) >= 0 and int(x) <= 255 for x in segments]) == 4
return False
def main():
tests = [
["172.16.254.1", True],
["172.316.254.1", False],
[".254.255.0", False],
["1.1.1.1a", False],
["1", False],
["0.254.255.0", True],
["1.23.256.255.", False],
["1.23.256..", False],
["0..1.0", False],
["1.1.1.1.1", False],
["1.256.1.1", False],
["a0.1.1.1", False],
["0.1.1.256", False],
["129380129831213981.255.255.255", False],
["255.255.255.255abcdekjhf", False],
["7283728", False]
]
for t in tests:
res = isIPv4Address(t[0])
if t[1] == res:
print("PASSED: isIPv4Address({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isIPv4Address({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights is IPv4 address problem#!/usr/local/bin/python
# Code Fights Is IPv4 Address Problem
def isIPv4Address(inputString):
import re
pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
match = re.search(pattern, inputString)
if match:
segments = inputString.split(".")
return sum([int(x) >= 0 and int(x) <= 255 for x in segments]) == 4
return False
def main():
tests = [
["172.16.254.1", True],
["172.316.254.1", False],
[".254.255.0", False],
["1.1.1.1a", False],
["1", False],
["0.254.255.0", True],
["1.23.256.255.", False],
["1.23.256..", False],
["0..1.0", False],
["1.1.1.1.1", False],
["1.256.1.1", False],
["a0.1.1.1", False],
["0.1.1.256", False],
["129380129831213981.255.255.255", False],
["255.255.255.255abcdekjhf", False],
["7283728", False]
]
for t in tests:
res = isIPv4Address(t[0])
if t[1] == res:
print("PASSED: isIPv4Address({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isIPv4Address({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is IPv4 address problem<commit_after>#!/usr/local/bin/python
# Code Fights Is IPv4 Address Problem
def isIPv4Address(inputString):
import re
pattern = re.compile(r'^\d{1,3}(?:\.\d{1,3}){3}$')
match = re.search(pattern, inputString)
if match:
segments = inputString.split(".")
return sum([int(x) >= 0 and int(x) <= 255 for x in segments]) == 4
return False
def main():
tests = [
["172.16.254.1", True],
["172.316.254.1", False],
[".254.255.0", False],
["1.1.1.1a", False],
["1", False],
["0.254.255.0", True],
["1.23.256.255.", False],
["1.23.256..", False],
["0..1.0", False],
["1.1.1.1.1", False],
["1.256.1.1", False],
["a0.1.1.1", False],
["0.1.1.256", False],
["129380129831213981.255.255.255", False],
["255.255.255.255abcdekjhf", False],
["7283728", False]
]
for t in tests:
res = isIPv4Address(t[0])
if t[1] == res:
print("PASSED: isIPv4Address({}) returned {}"
.format(t[0], res))
else:
print("FAILED: isIPv4Address({}) returned {}, should have returned {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
7205ca43cfb7576b217f34fefebb288545609926
|
Test/utiltest.py
|
Test/utiltest.py
|
import sys
sys.path.append("../")
import unittest
import Algorithmia
from Algorithmia.util import getParentAndBase
class UtilTest(unittest.TestCase):
def test_getParentAndBase(self):
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c'))
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c///'))
self.assertEqual(('//a//b', 'c'), getParentAndBase('//a//b////c///'))
def test_getParentAndBase_errors(self):
self.assertRaises(Exception, getParentAndBase('/'))
self.assertRaises(Exception, getParentAndBase(''))
self.assertRaises(Exception, getParentAndBase('a/'))
if __name__ == '__main__':
unittest.main()
|
Add some tests for the util getParentAndBase method
|
Add some tests for the util getParentAndBase method
|
Python
|
mit
|
algorithmiaio/algorithmia-python
|
Add some tests for the util getParentAndBase method
|
import sys
sys.path.append("../")
import unittest
import Algorithmia
from Algorithmia.util import getParentAndBase
class UtilTest(unittest.TestCase):
def test_getParentAndBase(self):
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c'))
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c///'))
self.assertEqual(('//a//b', 'c'), getParentAndBase('//a//b////c///'))
def test_getParentAndBase_errors(self):
self.assertRaises(Exception, getParentAndBase('/'))
self.assertRaises(Exception, getParentAndBase(''))
self.assertRaises(Exception, getParentAndBase('a/'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for the util getParentAndBase method<commit_after>
|
import sys
sys.path.append("../")
import unittest
import Algorithmia
from Algorithmia.util import getParentAndBase
class UtilTest(unittest.TestCase):
def test_getParentAndBase(self):
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c'))
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c///'))
self.assertEqual(('//a//b', 'c'), getParentAndBase('//a//b////c///'))
def test_getParentAndBase_errors(self):
self.assertRaises(Exception, getParentAndBase('/'))
self.assertRaises(Exception, getParentAndBase(''))
self.assertRaises(Exception, getParentAndBase('a/'))
if __name__ == '__main__':
unittest.main()
|
Add some tests for the util getParentAndBase methodimport sys
sys.path.append("../")
import unittest
import Algorithmia
from Algorithmia.util import getParentAndBase
class UtilTest(unittest.TestCase):
def test_getParentAndBase(self):
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c'))
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c///'))
self.assertEqual(('//a//b', 'c'), getParentAndBase('//a//b////c///'))
def test_getParentAndBase_errors(self):
self.assertRaises(Exception, getParentAndBase('/'))
self.assertRaises(Exception, getParentAndBase(''))
self.assertRaises(Exception, getParentAndBase('a/'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for the util getParentAndBase method<commit_after>import sys
sys.path.append("../")
import unittest
import Algorithmia
from Algorithmia.util import getParentAndBase
class UtilTest(unittest.TestCase):
def test_getParentAndBase(self):
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c'))
self.assertEqual(('a/b', 'c'), getParentAndBase('a/b/c///'))
self.assertEqual(('//a//b', 'c'), getParentAndBase('//a//b////c///'))
def test_getParentAndBase_errors(self):
self.assertRaises(Exception, getParentAndBase('/'))
self.assertRaises(Exception, getParentAndBase(''))
self.assertRaises(Exception, getParentAndBase('a/'))
if __name__ == '__main__':
unittest.main()
|
|
438c6d0b8bea994e9f57b4c480ebcdfd197a0e28
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
Update "Programming Language" Tag to Indicate Python3 Support
|
Update "Programming Language" Tag to Indicate Python3 Support
|
Python
|
mit
|
erikbern/plaid-python,qwil/plaid-python,plaid/plaid-python
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
Update "Programming Language" Tag to Indicate Python3 Support
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
<commit_before>from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
<commit_msg>Update "Programming Language" Tag to Indicate Python3 Support<commit_after>
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
Update "Programming Language" Tag to Indicate Python3 Supportfrom setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
<commit_before>from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
<commit_msg>Update "Programming Language" Tag to Indicate Python3 Support<commit_after>from setuptools import setup, find_packages
import plaid
url = 'https://github.com/plaid/plaid-python'
setup(
name='plaid-python',
version=plaid.__version__,
description='Simple Python API client for Plaid',
long_description='',
keywords='api, client, plaid',
author='Chris Forrette',
author_email='chris@chrisforrette.com',
url=url,
download_url='{}/tarball/v{}'.format(url, plaid.__version__),
license='MIT',
packages=find_packages(exclude='tests'),
package_data={'README': ['README.md']},
install_requires=['requests==2.2.1'],
zip_safe=False,
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Web Environment",
]
)
|
66cf36dc9803443723c824b246ef8c065aba8d86
|
utils/CIndex/completion_logger_server.py
|
utils/CIndex/completion_logger_server.py
|
#!/usr/bin/env python
import sys
from socket import *
from time import localtime, strftime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write(strftime("'%a, %d %b %Y %H:%M:%S' ", localtime()))
f.write(data)
f.write('\n')
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
Add simple python server for recording code completion timings.
|
Add simple python server for recording code completion timings.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@101327 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,llvm-mirror/clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,apple/swift-clang,llvm-mirror/clang,apple/swift-clang,llvm-mirror/clang
|
Add simple python server for recording code completion timings.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@101327 91177308-0d34-0410-b5e6-96231b3b80d8
|
#!/usr/bin/env python
import sys
from socket import *
from time import localtime, strftime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write(strftime("'%a, %d %b %Y %H:%M:%S' ", localtime()))
f.write(data)
f.write('\n')
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple python server for recording code completion timings.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@101327 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
#!/usr/bin/env python
import sys
from socket import *
from time import localtime, strftime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write(strftime("'%a, %d %b %Y %H:%M:%S' ", localtime()))
f.write(data)
f.write('\n')
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
Add simple python server for recording code completion timings.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@101327 91177308-0d34-0410-b5e6-96231b3b80d8#!/usr/bin/env python
import sys
from socket import *
from time import localtime, strftime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write(strftime("'%a, %d %b %Y %H:%M:%S' ", localtime()))
f.write(data)
f.write('\n')
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add simple python server for recording code completion timings.
git-svn-id: ffe668792ed300d6c2daa1f6eba2e0aa28d7ec6c@101327 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>#!/usr/bin/env python
import sys
from socket import *
from time import localtime, strftime
def main():
if len(sys.argv) < 4:
print "completion_logger_server.py <listen address> <listen port> <log file>"
exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
buf = 1024 * 8
addr = (host,port)
# Create socket and bind to address
UDPSock = socket(AF_INET,SOCK_DGRAM)
UDPSock.bind(addr)
print "Listing on {0}:{1} and logging to '{2}'".format(host, port, sys.argv[3])
# Open the logging file.
f = open(sys.argv[3], "a")
# Receive messages
while 1:
data,addr = UDPSock.recvfrom(buf)
if not data:
break
else:
f.write(strftime("'%a, %d %b %Y %H:%M:%S' ", localtime()))
f.write(data)
f.write('\n')
# Close socket
UDPSock.close()
if __name__ == '__main__':
main()
|
|
31fb276ce27375b40e1ac6fe2fd360802cf65f77
|
tests/test_pm_imagenet.py
|
tests/test_pm_imagenet.py
|
import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert torch.equal(out_logits_2, out_logits)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
|
Add download/forward tests for imagenet pretrainedmodels
|
Add download/forward tests for imagenet pretrainedmodels
|
Python
|
bsd-3-clause
|
Cadene/pretrained-models.pytorch
|
Add download/forward tests for imagenet pretrainedmodels
|
import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert torch.equal(out_logits_2, out_logits)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
|
<commit_before><commit_msg>Add download/forward tests for imagenet pretrainedmodels<commit_after>
|
import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert torch.equal(out_logits_2, out_logits)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
|
Add download/forward tests for imagenet pretrainedmodelsimport pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert torch.equal(out_logits_2, out_logits)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
|
<commit_before><commit_msg>Add download/forward tests for imagenet pretrainedmodels<commit_after>import pytest
import torch
import torch.nn as nn
from torch.autograd import Variable
import pretrainedmodels as pm
import pretrainedmodels.utils as utils
pm_args = []
for model_name in pm.model_names:
for pretrained in pm.pretrained_settings[model_name]:
if pretrained in ['imagenet', 'imagenet+5k']:
pm_args.append((model_name, pretrained))
img = utils.LoadImage()('data/cat.jpg')
@pytest.mark.parametrize('model_name, pretrained', pm_args)
def test_pm_imagenet(model_name, pretrained):
print('test_pm_imagenet("{}")'.format(model_name))
net = pm.__dict__[model_name](
num_classes=1000,
pretrained=pretrained)
net.eval()
tensor = utils.TransformImage(net)(img)
tensor = tensor.unsqueeze(0)
x = Variable(tensor, requires_grad=False)
out_logits = net(x)
if 'squeezenet' in model_name:
# Conv2d without view at the end
assert out_logits.shape == torch.Size([1,1000,1,1])
return
assert out_logits.shape == torch.Size([1,1000])
out_feats = net.features(x)
out_logits_2 = net.logits(out_feats)
assert torch.equal(out_logits_2, out_logits)
if 'dpn' in model_name:
# Conv2d instead of Linear
return
net.last_linear = nn.Linear(
net.last_linear.in_features,
10)
out_logits_3 = net.logits(out_feats)
assert out_logits_3.shape == torch.Size([1,10])
|
|
c8c737d3763965543e738225d5d435601c50b9b2
|
bn.py
|
bn.py
|
import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
|
Build and revision number script
|
Build and revision number script
|
Python
|
apache-2.0
|
vasiliy-vdovichenko/ncrontab,youniss2013/ncrontab,amitapl/ncrontab,xingh/ncrontab,devnarayan/ncrontab,paulgmiller/ncrontab,cyotek/ncrontab,IntranetFactory/ncrontab,jakesays/ncrontab,codesharpdev/ncrontab,DanThiffault/ncrontab,paulgmiller/ncrontab,IntranetFactory/ncrontab,micheltol/ncrontab,bjorskog/ncrontab,xingh/ncrontab,petlof/ncrontab,JophyJob/ncrontab,joshberry/ncrontab,codeyu/ncrontab,klunwebale/ncrontab,dabutvin/ncrontab,rog1039/ncrontab
|
Build and revision number script
|
import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
|
<commit_before><commit_msg>Build and revision number script<commit_after>
|
import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
|
Build and revision number scriptimport sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
|
<commit_before><commit_msg>Build and revision number script<commit_after>import sys
from time import gmtime
year, mon, mday, hour, min, sec, wday, yday, isdst = gmtime()
bld = ((year - 2000) * 12 + mon - 1) * 100 + mday
rev = hour * 100 + min
print 'Your build and revision number for today is %d.%d.' % (bld, rev)
|
|
35778c48ba197803e2688732cf11d346838e7b7f
|
tests/integration/test_sqs.py
|
tests/integration/test_sqs.py
|
import os
from asyncaws import SQS
from tornado.testing import AsyncTestCase, gen_test
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_REGION']
class TestSQS(AsyncTestCase):
sqs = SQS(aws_key_id, aws_key_secret, aws_region, async=False)
@gen_test(timeout=60)
def test_create_queue(self):
queue_url = self.sqs.create_queue(
"test-queue", {"MessageRetentionPeriod": 60})
self.assertIsInstance(queue_url, str)
self.assertTrue(queue_url.startswith('http'))
get_attr_result = self.sqs.get_queue_attributes(
queue_url, ['MessageRetentionPeriod'])
self.assertIsInstance(get_attr_result, dict)
self.assertEqual(get_attr_result['MessageRetentionPeriod'], '60')
add_perm_result = self.sqs.add_permission(
queue_url, ['637085312181'], ["SendMessage"], "test-permission-id")
self.assertIsInstance(add_perm_result, str)
delete_result = self.sqs.delete_queue(queue_url)
self.assertIsInstance(delete_result, str)
|
Add first integration test for SQS
|
Add first integration test for SQS
|
Python
|
mit
|
MA3STR0/AsyncAWS
|
Add first integration test for SQS
|
import os
from asyncaws import SQS
from tornado.testing import AsyncTestCase, gen_test
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_REGION']
class TestSQS(AsyncTestCase):
sqs = SQS(aws_key_id, aws_key_secret, aws_region, async=False)
@gen_test(timeout=60)
def test_create_queue(self):
queue_url = self.sqs.create_queue(
"test-queue", {"MessageRetentionPeriod": 60})
self.assertIsInstance(queue_url, str)
self.assertTrue(queue_url.startswith('http'))
get_attr_result = self.sqs.get_queue_attributes(
queue_url, ['MessageRetentionPeriod'])
self.assertIsInstance(get_attr_result, dict)
self.assertEqual(get_attr_result['MessageRetentionPeriod'], '60')
add_perm_result = self.sqs.add_permission(
queue_url, ['637085312181'], ["SendMessage"], "test-permission-id")
self.assertIsInstance(add_perm_result, str)
delete_result = self.sqs.delete_queue(queue_url)
self.assertIsInstance(delete_result, str)
|
<commit_before><commit_msg>Add first integration test for SQS<commit_after>
|
import os
from asyncaws import SQS
from tornado.testing import AsyncTestCase, gen_test
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_REGION']
class TestSQS(AsyncTestCase):
sqs = SQS(aws_key_id, aws_key_secret, aws_region, async=False)
@gen_test(timeout=60)
def test_create_queue(self):
queue_url = self.sqs.create_queue(
"test-queue", {"MessageRetentionPeriod": 60})
self.assertIsInstance(queue_url, str)
self.assertTrue(queue_url.startswith('http'))
get_attr_result = self.sqs.get_queue_attributes(
queue_url, ['MessageRetentionPeriod'])
self.assertIsInstance(get_attr_result, dict)
self.assertEqual(get_attr_result['MessageRetentionPeriod'], '60')
add_perm_result = self.sqs.add_permission(
queue_url, ['637085312181'], ["SendMessage"], "test-permission-id")
self.assertIsInstance(add_perm_result, str)
delete_result = self.sqs.delete_queue(queue_url)
self.assertIsInstance(delete_result, str)
|
Add first integration test for SQSimport os
from asyncaws import SQS
from tornado.testing import AsyncTestCase, gen_test
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_REGION']
class TestSQS(AsyncTestCase):
sqs = SQS(aws_key_id, aws_key_secret, aws_region, async=False)
@gen_test(timeout=60)
def test_create_queue(self):
queue_url = self.sqs.create_queue(
"test-queue", {"MessageRetentionPeriod": 60})
self.assertIsInstance(queue_url, str)
self.assertTrue(queue_url.startswith('http'))
get_attr_result = self.sqs.get_queue_attributes(
queue_url, ['MessageRetentionPeriod'])
self.assertIsInstance(get_attr_result, dict)
self.assertEqual(get_attr_result['MessageRetentionPeriod'], '60')
add_perm_result = self.sqs.add_permission(
queue_url, ['637085312181'], ["SendMessage"], "test-permission-id")
self.assertIsInstance(add_perm_result, str)
delete_result = self.sqs.delete_queue(queue_url)
self.assertIsInstance(delete_result, str)
|
<commit_before><commit_msg>Add first integration test for SQS<commit_after>import os
from asyncaws import SQS
from tornado.testing import AsyncTestCase, gen_test
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
aws_region = os.environ['AWS_REGION']
class TestSQS(AsyncTestCase):
sqs = SQS(aws_key_id, aws_key_secret, aws_region, async=False)
@gen_test(timeout=60)
def test_create_queue(self):
queue_url = self.sqs.create_queue(
"test-queue", {"MessageRetentionPeriod": 60})
self.assertIsInstance(queue_url, str)
self.assertTrue(queue_url.startswith('http'))
get_attr_result = self.sqs.get_queue_attributes(
queue_url, ['MessageRetentionPeriod'])
self.assertIsInstance(get_attr_result, dict)
self.assertEqual(get_attr_result['MessageRetentionPeriod'], '60')
add_perm_result = self.sqs.add_permission(
queue_url, ['637085312181'], ["SendMessage"], "test-permission-id")
self.assertIsInstance(add_perm_result, str)
delete_result = self.sqs.delete_queue(queue_url)
self.assertIsInstance(delete_result, str)
|
|
c29e89b5e5c448e21354c4d5a0cb55b7ca59c4e0
|
app.py
|
app.py
|
import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
"""Handles post requests by responding with a JSON file."""
@tornado.web.asynchronous
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self.optimize_portfolio(data)
self.finish()
def optimize_portfolio(self, data):
Info = {'key': 'value'}
self.write(json.dumps(data))
def make_app():
"""Returns an application with a handler for only '/'."""
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
"""
Responds to this curl
curl -H "Content-Type: application
/json" -X POST -d '{"username":"xyz","password":"xyz"}' http://localhost:8000
"""
|
Write a tornado server that responds to POST requests.
|
Write a tornado server that responds to POST requests.
|
Python
|
mit
|
coshx/portfolio_optimizer,coshx/portfolio_optimizer,coshx/portfolio_optimizer,coshx/portfolio_optimizer
|
Write a tornado server that responds to POST requests.
|
import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
"""Handles post requests by responding with a JSON file."""
@tornado.web.asynchronous
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self.optimize_portfolio(data)
self.finish()
def optimize_portfolio(self, data):
Info = {'key': 'value'}
self.write(json.dumps(data))
def make_app():
"""Returns an application with a handler for only '/'."""
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
"""
Responds to this curl
curl -H "Content-Type: application
/json" -X POST -d '{"username":"xyz","password":"xyz"}' http://localhost:8000
"""
|
<commit_before><commit_msg>Write a tornado server that responds to POST requests.<commit_after>
|
import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
"""Handles post requests by responding with a JSON file."""
@tornado.web.asynchronous
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self.optimize_portfolio(data)
self.finish()
def optimize_portfolio(self, data):
Info = {'key': 'value'}
self.write(json.dumps(data))
def make_app():
"""Returns an application with a handler for only '/'."""
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
"""
Responds to this curl
curl -H "Content-Type: application
/json" -X POST -d '{"username":"xyz","password":"xyz"}' http://localhost:8000
"""
|
Write a tornado server that responds to POST requests.import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
"""Handles post requests by responding with a JSON file."""
@tornado.web.asynchronous
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self.optimize_portfolio(data)
self.finish()
def optimize_portfolio(self, data):
Info = {'key': 'value'}
self.write(json.dumps(data))
def make_app():
"""Returns an application with a handler for only '/'."""
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
"""
Responds to this curl
curl -H "Content-Type: application
/json" -X POST -d '{"username":"xyz","password":"xyz"}' http://localhost:8000
"""
|
<commit_before><commit_msg>Write a tornado server that responds to POST requests.<commit_after>import json
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
"""Handles post requests by responding with a JSON file."""
@tornado.web.asynchronous
def post(self):
data = json.loads(self.request.body.decode('utf-8'))
self.optimize_portfolio(data)
self.finish()
def optimize_portfolio(self, data):
Info = {'key': 'value'}
self.write(json.dumps(data))
def make_app():
"""Returns an application with a handler for only '/'."""
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
tornado.ioloop.IOLoop.current().start()
"""
Responds to this curl
curl -H "Content-Type: application
/json" -X POST -d '{"username":"xyz","password":"xyz"}' http://localhost:8000
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.