commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
664dc5124f4786986b2d9e0c2d146c17d24f4dd2
|
data_preparation.py
|
data_preparation.py
|
# importing modules/ libraries
import pandas as pd
import numpy as np
orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv')
print('length of orders_prior_df:', len(orders_prior_df))
order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv')
print('length of order_products_prior_df:', len(order_products_prior_df))
grouped = order_products_prior_df.groupby('order_id')
grouped_data = pd.DataFrame()
grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean)
def product_ids(group):
l = []
ord_id = group['order_id']
for e in group['product_id']:
l.append(str(e))
return ' '.join(l)
grouped_data['product_ids'] = grouped.apply(product_ids)
print('length of grouped_data:', len(grouped_data))
orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id')
print('length of orders_prior_merged:', len(orders_prior_merged))
|
Merge prior product ids with orders ids first pass
|
feat: Merge prior product ids with orders ids first pass
|
Python
|
mit
|
rjegankumar/instacart_prediction_model
|
feat: Merge prior product ids with orders ids first pass
|
# importing modules/ libraries
import pandas as pd
import numpy as np
orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv')
print('length of orders_prior_df:', len(orders_prior_df))
order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv')
print('length of order_products_prior_df:', len(order_products_prior_df))
grouped = order_products_prior_df.groupby('order_id')
grouped_data = pd.DataFrame()
grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean)
def product_ids(group):
l = []
ord_id = group['order_id']
for e in group['product_id']:
l.append(str(e))
return ' '.join(l)
grouped_data['product_ids'] = grouped.apply(product_ids)
print('length of grouped_data:', len(grouped_data))
orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id')
print('length of orders_prior_merged:', len(orders_prior_merged))
|
<commit_before><commit_msg>feat: Merge prior product ids with orders ids first pass<commit_after>
|
# importing modules/ libraries
import pandas as pd
import numpy as np
orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv')
print('length of orders_prior_df:', len(orders_prior_df))
order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv')
print('length of order_products_prior_df:', len(order_products_prior_df))
grouped = order_products_prior_df.groupby('order_id')
grouped_data = pd.DataFrame()
grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean)
def product_ids(group):
l = []
ord_id = group['order_id']
for e in group['product_id']:
l.append(str(e))
return ' '.join(l)
grouped_data['product_ids'] = grouped.apply(product_ids)
print('length of grouped_data:', len(grouped_data))
orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id')
print('length of orders_prior_merged:', len(orders_prior_merged))
|
feat: Merge prior product ids with orders ids first pass# importing modules/ libraries
import pandas as pd
import numpy as np
orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv')
print('length of orders_prior_df:', len(orders_prior_df))
order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv')
print('length of order_products_prior_df:', len(order_products_prior_df))
grouped = order_products_prior_df.groupby('order_id')
grouped_data = pd.DataFrame()
grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean)
def product_ids(group):
l = []
ord_id = group['order_id']
for e in group['product_id']:
l.append(str(e))
return ' '.join(l)
grouped_data['product_ids'] = grouped.apply(product_ids)
print('length of grouped_data:', len(grouped_data))
orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id')
print('length of orders_prior_merged:', len(orders_prior_merged))
|
<commit_before><commit_msg>feat: Merge prior product ids with orders ids first pass<commit_after># importing modules/ libraries
import pandas as pd
import numpy as np
orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv')
print('length of orders_prior_df:', len(orders_prior_df))
order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv')
print('length of order_products_prior_df:', len(order_products_prior_df))
grouped = order_products_prior_df.groupby('order_id')
grouped_data = pd.DataFrame()
grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean)
def product_ids(group):
l = []
ord_id = group['order_id']
for e in group['product_id']:
l.append(str(e))
return ' '.join(l)
grouped_data['product_ids'] = grouped.apply(product_ids)
print('length of grouped_data:', len(grouped_data))
orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id')
print('length of orders_prior_merged:', len(orders_prior_merged))
|
|
8013d91077a1b0a51c0caef0222114f98be91033
|
httpobs/tests/unittests/test_preload.py
|
httpobs/tests/unittests/test_preload.py
|
from unittest import TestCase
from httpobs.scanner.analyzer.utils import *
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
|
Add tests for HSTS/HPKP functions
|
Add tests for HSTS/HPKP functions
|
Python
|
mpl-2.0
|
april/http-observatory,mozilla/http-observatory,april/http-observatory,mozilla/http-observatory,april/http-observatory,mozilla/http-observatory
|
Add tests for HSTS/HPKP functions
|
from unittest import TestCase
from httpobs.scanner.analyzer.utils import *
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
|
<commit_before><commit_msg>Add tests for HSTS/HPKP functions<commit_after>
|
from unittest import TestCase
from httpobs.scanner.analyzer.utils import *
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
|
Add tests for HSTS/HPKP functionsfrom unittest import TestCase
from httpobs.scanner.analyzer.utils import *
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
|
<commit_before><commit_msg>Add tests for HSTS/HPKP functions<commit_after>from unittest import TestCase
from httpobs.scanner.analyzer.utils import *
class TestPreloadPublicKeyPinning(TestCase):
def test_not_preloaded(self):
result = is_hpkp_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hpkp_preloaded('apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
result = is_hpkp_preloaded('foo.apis.google.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
# uses include_subdomains_for_pinning
result = is_hpkp_preloaded('dropboxstatic.com')
self.assertTrue(result['pinned'])
self.assertTrue(result['includeSubDomainsForPinning'])
class TestPreloadStrictTransportSecurity(TestCase):
def test_not_preloaded(self):
result = is_hsts_preloaded('totallyfakehostname.insertsuperduperfakedomainhere.wtftld')
self.assertFalse(result)
def test_preloaded(self):
result = is_hsts_preloaded('bugzilla.mozilla.org')
self.assertEquals('force-https', result['mode'])
self.assertTrue(result['includeSubDomains'])
result = is_hsts_preloaded('mail.yahoo.com')
self.assertEqual('force-https', result['mode'])
self.assertFalse(result['includeSubDomains'])
|
|
482a00c0a479a06a0e8d54058bb8ea9a12be0023
|
api_examples/export_institution_csv.py
|
api_examples/export_institution_csv.py
|
#!/bin/python2.7
# -*- coding: utf-8 -*-
"""
A tool to insert institutions from CSV files.
Requirements:
- requests
- gusregon
- unicodecsv
- jmespath
Example usage:
To run help text use:
$ python insert_institution_csv.py -h
"""
from __future__ import print_function, unicode_literals
import argparse
import sys
import itertools
from Queue import Queue
import jmespath
import requests
import tqdm
import unicodecsv as csv
import requests_cache
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class Client(object):
def __init__(self, start, s=None):
self.start = start
self.s = s or requests.Session()
def get_page_iter(self):
q = Queue()
q.put(self.start)
while not q.empty():
resp = self.s.get(url=q.get())
if resp.status_code != 200:
return
data = resp.json()
if data.get('next'):
q.put(data['next'])
for row in data['results']:
yield row
JMES_DEFAULT = "{name: name, url:url, pk:pk, email:email, tags:join(',',tags), jst:jst, regon:regon}"
class Command(object):
def __init__(self, argv):
self.argv = argv
self.args = self.get_build_args(argv[1:])
self.s = requests.Session() if not self.args.cache else requests_cache.CachedSession()
def get_build_args(self, argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
type=argparse.FileType('w'),
help="Output CSV-file")
parser.add_argument('--start',
required=True,
help="Start URL")
parser.add_argument('--jmes', type=jmespath.compile,
required=False,
help='JMESPath to convert values (default: "{}")'.format(JMES_DEFAULT),
default=jmespath.compile(JMES_DEFAULT))
parser.add_argument('--cache', action='store_true', help="Enable cache")
return parser.parse_args(argv)
def run(self):
client = Client(start=self.args.start, s=self.s)
data = client.get_page_iter()
first = next(data)
fieldnames = self.args.jmes.search(first).keys()
print("Identified fields: {}".format(", ".join(fieldnames)))
writer = csv.DictWriter(self.args.output, fieldnames=fieldnames)
writer.writeheader()
for item in tqdm.tqdm(itertools.chain([first], data)):
writer.writerow(self.args.jmes.search(item))
if __name__ == "__main__":
sys.exit(Command(sys.argv).run())
|
Add examples of API to export institutions
|
Add examples of API to export institutions
|
Python
|
mit
|
watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder
|
Add examples of API to export institutions
|
#!/bin/python2.7
# -*- coding: utf-8 -*-
"""
A tool to insert institutions from CSV files.
Requirements:
- requests
- gusregon
- unicodecsv
- jmespath
Example usage:
To run help text use:
$ python insert_institution_csv.py -h
"""
from __future__ import print_function, unicode_literals
import argparse
import sys
import itertools
from Queue import Queue
import jmespath
import requests
import tqdm
import unicodecsv as csv
import requests_cache
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class Client(object):
def __init__(self, start, s=None):
self.start = start
self.s = s or requests.Session()
def get_page_iter(self):
q = Queue()
q.put(self.start)
while not q.empty():
resp = self.s.get(url=q.get())
if resp.status_code != 200:
return
data = resp.json()
if data.get('next'):
q.put(data['next'])
for row in data['results']:
yield row
JMES_DEFAULT = "{name: name, url:url, pk:pk, email:email, tags:join(',',tags), jst:jst, regon:regon}"
class Command(object):
def __init__(self, argv):
self.argv = argv
self.args = self.get_build_args(argv[1:])
self.s = requests.Session() if not self.args.cache else requests_cache.CachedSession()
def get_build_args(self, argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
type=argparse.FileType('w'),
help="Output CSV-file")
parser.add_argument('--start',
required=True,
help="Start URL")
parser.add_argument('--jmes', type=jmespath.compile,
required=False,
help='JMESPath to convert values (default: "{}")'.format(JMES_DEFAULT),
default=jmespath.compile(JMES_DEFAULT))
parser.add_argument('--cache', action='store_true', help="Enable cache")
return parser.parse_args(argv)
def run(self):
client = Client(start=self.args.start, s=self.s)
data = client.get_page_iter()
first = next(data)
fieldnames = self.args.jmes.search(first).keys()
print("Identified fields: {}".format(", ".join(fieldnames)))
writer = csv.DictWriter(self.args.output, fieldnames=fieldnames)
writer.writeheader()
for item in tqdm.tqdm(itertools.chain([first], data)):
writer.writerow(self.args.jmes.search(item))
if __name__ == "__main__":
sys.exit(Command(sys.argv).run())
|
<commit_before><commit_msg>Add examples of API to export institutions<commit_after>
|
#!/bin/python2.7
# -*- coding: utf-8 -*-
"""
A tool to insert institutions from CSV files.
Requirements:
- requests
- gusregon
- unicodecsv
- jmespath
Example usage:
To run help text use:
$ python insert_institution_csv.py -h
"""
from __future__ import print_function, unicode_literals
import argparse
import sys
import itertools
from Queue import Queue
import jmespath
import requests
import tqdm
import unicodecsv as csv
import requests_cache
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class Client(object):
def __init__(self, start, s=None):
self.start = start
self.s = s or requests.Session()
def get_page_iter(self):
q = Queue()
q.put(self.start)
while not q.empty():
resp = self.s.get(url=q.get())
if resp.status_code != 200:
return
data = resp.json()
if data.get('next'):
q.put(data['next'])
for row in data['results']:
yield row
JMES_DEFAULT = "{name: name, url:url, pk:pk, email:email, tags:join(',',tags), jst:jst, regon:regon}"
class Command(object):
def __init__(self, argv):
self.argv = argv
self.args = self.get_build_args(argv[1:])
self.s = requests.Session() if not self.args.cache else requests_cache.CachedSession()
def get_build_args(self, argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
type=argparse.FileType('w'),
help="Output CSV-file")
parser.add_argument('--start',
required=True,
help="Start URL")
parser.add_argument('--jmes', type=jmespath.compile,
required=False,
help='JMESPath to convert values (default: "{}")'.format(JMES_DEFAULT),
default=jmespath.compile(JMES_DEFAULT))
parser.add_argument('--cache', action='store_true', help="Enable cache")
return parser.parse_args(argv)
def run(self):
client = Client(start=self.args.start, s=self.s)
data = client.get_page_iter()
first = next(data)
fieldnames = self.args.jmes.search(first).keys()
print("Identified fields: {}".format(", ".join(fieldnames)))
writer = csv.DictWriter(self.args.output, fieldnames=fieldnames)
writer.writeheader()
for item in tqdm.tqdm(itertools.chain([first], data)):
writer.writerow(self.args.jmes.search(item))
if __name__ == "__main__":
sys.exit(Command(sys.argv).run())
|
Add examples of API to export institutions#!/bin/python2.7
# -*- coding: utf-8 -*-
"""
A tool to insert institutions from CSV files.
Requirements:
- requests
- gusregon
- unicodecsv
- jmespath
Example usage:
To run help text use:
$ python insert_institution_csv.py -h
"""
from __future__ import print_function, unicode_literals
import argparse
import sys
import itertools
from Queue import Queue
import jmespath
import requests
import tqdm
import unicodecsv as csv
import requests_cache
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class Client(object):
def __init__(self, start, s=None):
self.start = start
self.s = s or requests.Session()
def get_page_iter(self):
q = Queue()
q.put(self.start)
while not q.empty():
resp = self.s.get(url=q.get())
if resp.status_code != 200:
return
data = resp.json()
if data.get('next'):
q.put(data['next'])
for row in data['results']:
yield row
JMES_DEFAULT = "{name: name, url:url, pk:pk, email:email, tags:join(',',tags), jst:jst, regon:regon}"
class Command(object):
def __init__(self, argv):
self.argv = argv
self.args = self.get_build_args(argv[1:])
self.s = requests.Session() if not self.args.cache else requests_cache.CachedSession()
def get_build_args(self, argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
type=argparse.FileType('w'),
help="Output CSV-file")
parser.add_argument('--start',
required=True,
help="Start URL")
parser.add_argument('--jmes', type=jmespath.compile,
required=False,
help='JMESPath to convert values (default: "{}")'.format(JMES_DEFAULT),
default=jmespath.compile(JMES_DEFAULT))
parser.add_argument('--cache', action='store_true', help="Enable cache")
return parser.parse_args(argv)
def run(self):
client = Client(start=self.args.start, s=self.s)
data = client.get_page_iter()
first = next(data)
fieldnames = self.args.jmes.search(first).keys()
print("Identified fields: {}".format(", ".join(fieldnames)))
writer = csv.DictWriter(self.args.output, fieldnames=fieldnames)
writer.writeheader()
for item in tqdm.tqdm(itertools.chain([first], data)):
writer.writerow(self.args.jmes.search(item))
if __name__ == "__main__":
sys.exit(Command(sys.argv).run())
|
<commit_before><commit_msg>Add examples of API to export institutions<commit_after>#!/bin/python2.7
# -*- coding: utf-8 -*-
"""
A tool to insert institutions from CSV files.
Requirements:
- requests
- gusregon
- unicodecsv
- jmespath
Example usage:
To run help text use:
$ python insert_institution_csv.py -h
"""
from __future__ import print_function, unicode_literals
import argparse
import sys
import itertools
from Queue import Queue
import jmespath
import requests
import tqdm
import unicodecsv as csv
import requests_cache
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class Client(object):
def __init__(self, start, s=None):
self.start = start
self.s = s or requests.Session()
def get_page_iter(self):
q = Queue()
q.put(self.start)
while not q.empty():
resp = self.s.get(url=q.get())
if resp.status_code != 200:
return
data = resp.json()
if data.get('next'):
q.put(data['next'])
for row in data['results']:
yield row
JMES_DEFAULT = "{name: name, url:url, pk:pk, email:email, tags:join(',',tags), jst:jst, regon:regon}"
class Command(object):
def __init__(self, argv):
self.argv = argv
self.args = self.get_build_args(argv[1:])
self.s = requests.Session() if not self.args.cache else requests_cache.CachedSession()
def get_build_args(self, argv):
parser = argparse.ArgumentParser()
parser.add_argument('--output',
required=True,
type=argparse.FileType('w'),
help="Output CSV-file")
parser.add_argument('--start',
required=True,
help="Start URL")
parser.add_argument('--jmes', type=jmespath.compile,
required=False,
help='JMESPath to convert values (default: "{}")'.format(JMES_DEFAULT),
default=jmespath.compile(JMES_DEFAULT))
parser.add_argument('--cache', action='store_true', help="Enable cache")
return parser.parse_args(argv)
def run(self):
client = Client(start=self.args.start, s=self.s)
data = client.get_page_iter()
first = next(data)
fieldnames = self.args.jmes.search(first).keys()
print("Identified fields: {}".format(", ".join(fieldnames)))
writer = csv.DictWriter(self.args.output, fieldnames=fieldnames)
writer.writeheader()
for item in tqdm.tqdm(itertools.chain([first], data)):
writer.writerow(self.args.jmes.search(item))
if __name__ == "__main__":
sys.exit(Command(sys.argv).run())
|
|
ab9c8fca3b53a9f4a3e9b32aa94a597eed5e1e4b
|
councilmatic_core/migrations/0049_alter_person_headshot.py
|
councilmatic_core/migrations/0049_alter_person_headshot.py
|
# Generated by Django 2.1.9 on 2019-07-17 15:16
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headshot',
field=models.FileField(default='images/headshot_placeholder.png', storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/Users/goobzie/datamade/chi-councilmatic'), upload_to='images/headshots'),
),
]
|
Add missing migration for Person.headshot
|
Add missing migration for Person.headshot
|
Python
|
mit
|
datamade/django-councilmatic,datamade/django-councilmatic,datamade/django-councilmatic,datamade/django-councilmatic
|
Add missing migration for Person.headshot
|
# Generated by Django 2.1.9 on 2019-07-17 15:16
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headshot',
field=models.FileField(default='images/headshot_placeholder.png', storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/Users/goobzie/datamade/chi-councilmatic'), upload_to='images/headshots'),
),
]
|
<commit_before><commit_msg>Add missing migration for Person.headshot<commit_after>
|
# Generated by Django 2.1.9 on 2019-07-17 15:16
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headshot',
field=models.FileField(default='images/headshot_placeholder.png', storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/Users/goobzie/datamade/chi-councilmatic'), upload_to='images/headshots'),
),
]
|
Add missing migration for Person.headshot# Generated by Django 2.1.9 on 2019-07-17 15:16
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headshot',
field=models.FileField(default='images/headshot_placeholder.png', storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/Users/goobzie/datamade/chi-councilmatic'), upload_to='images/headshots'),
),
]
|
<commit_before><commit_msg>Add missing migration for Person.headshot<commit_after># Generated by Django 2.1.9 on 2019-07-17 15:16
import django.core.files.storage
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('councilmatic_core', '0048_post_shape'),
]
operations = [
migrations.AlterField(
model_name='person',
name='headshot',
field=models.FileField(default='images/headshot_placeholder.png', storage=django.core.files.storage.FileSystemStorage(base_url='/', location='/Users/goobzie/datamade/chi-councilmatic'), upload_to='images/headshots'),
),
]
|
|
da3b186ccb19d137d9a154470d88ee69eec8ef47
|
democracy/management/commands/democracy_remove_dupes.py
|
democracy/management/commands/democracy_remove_dupes.py
|
from datetime import timedelta
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from democracy.models import SectionComment
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--yes-i-know-what-im-doing", dest="nothing_can_go_wrong", action="store_true"),
)
def _remove_dupes(self, klass):
potential_dupes = klass.objects.values('content',).exclude(content='')\
.exclude(deleted=True).annotate(Count('content'))\
.filter(content__count__gt=1).order_by('-content__count')
for d in potential_dupes:
objs = list(klass.objects.filter(content=d['content']).order_by('created_at'))
first = objs.pop(0)
print("%s\n%s" % (first.created_at, first.content))
for other in objs:
if other.created_at - first.created_at > timedelta(hours=1):
print("\ttoo late %s" % (other.created_at - first.created_at))
continue
if getattr(other, klass.parent_field) != getattr(first, klass.parent_field):
print("\tdifferent parent %s" % other)
continue
if other.plugin_data != first.plugin_data:
print("\tplugin data differs %s" % other)
continue
print("\tbye bye %s" % other)
other.soft_delete()
def handle(self, *args, **options):
if not options.pop("nothing_can_go_wrong", False):
raise Exception("You don't know what you're doing.")
self._remove_dupes(SectionComment)
|
Implement management command for removing duplicate comments
|
Implement management command for removing duplicate comments
|
Python
|
mit
|
vikoivun/kerrokantasi,stephawe/kerrokantasi,stephawe/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,stephawe/kerrokantasi,vikoivun/kerrokantasi,City-of-Helsinki/kerrokantasi,vikoivun/kerrokantasi
|
Implement management command for removing duplicate comments
|
from datetime import timedelta
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from democracy.models import SectionComment
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--yes-i-know-what-im-doing", dest="nothing_can_go_wrong", action="store_true"),
)
def _remove_dupes(self, klass):
potential_dupes = klass.objects.values('content',).exclude(content='')\
.exclude(deleted=True).annotate(Count('content'))\
.filter(content__count__gt=1).order_by('-content__count')
for d in potential_dupes:
objs = list(klass.objects.filter(content=d['content']).order_by('created_at'))
first = objs.pop(0)
print("%s\n%s" % (first.created_at, first.content))
for other in objs:
if other.created_at - first.created_at > timedelta(hours=1):
print("\ttoo late %s" % (other.created_at - first.created_at))
continue
if getattr(other, klass.parent_field) != getattr(first, klass.parent_field):
print("\tdifferent parent %s" % other)
continue
if other.plugin_data != first.plugin_data:
print("\tplugin data differs %s" % other)
continue
print("\tbye bye %s" % other)
other.soft_delete()
def handle(self, *args, **options):
if not options.pop("nothing_can_go_wrong", False):
raise Exception("You don't know what you're doing.")
self._remove_dupes(SectionComment)
|
<commit_before><commit_msg>Implement management command for removing duplicate comments<commit_after>
|
from datetime import timedelta
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from democracy.models import SectionComment
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--yes-i-know-what-im-doing", dest="nothing_can_go_wrong", action="store_true"),
)
def _remove_dupes(self, klass):
potential_dupes = klass.objects.values('content',).exclude(content='')\
.exclude(deleted=True).annotate(Count('content'))\
.filter(content__count__gt=1).order_by('-content__count')
for d in potential_dupes:
objs = list(klass.objects.filter(content=d['content']).order_by('created_at'))
first = objs.pop(0)
print("%s\n%s" % (first.created_at, first.content))
for other in objs:
if other.created_at - first.created_at > timedelta(hours=1):
print("\ttoo late %s" % (other.created_at - first.created_at))
continue
if getattr(other, klass.parent_field) != getattr(first, klass.parent_field):
print("\tdifferent parent %s" % other)
continue
if other.plugin_data != first.plugin_data:
print("\tplugin data differs %s" % other)
continue
print("\tbye bye %s" % other)
other.soft_delete()
def handle(self, *args, **options):
if not options.pop("nothing_can_go_wrong", False):
raise Exception("You don't know what you're doing.")
self._remove_dupes(SectionComment)
|
Implement management command for removing duplicate commentsfrom datetime import timedelta
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from democracy.models import SectionComment
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--yes-i-know-what-im-doing", dest="nothing_can_go_wrong", action="store_true"),
)
def _remove_dupes(self, klass):
potential_dupes = klass.objects.values('content',).exclude(content='')\
.exclude(deleted=True).annotate(Count('content'))\
.filter(content__count__gt=1).order_by('-content__count')
for d in potential_dupes:
objs = list(klass.objects.filter(content=d['content']).order_by('created_at'))
first = objs.pop(0)
print("%s\n%s" % (first.created_at, first.content))
for other in objs:
if other.created_at - first.created_at > timedelta(hours=1):
print("\ttoo late %s" % (other.created_at - first.created_at))
continue
if getattr(other, klass.parent_field) != getattr(first, klass.parent_field):
print("\tdifferent parent %s" % other)
continue
if other.plugin_data != first.plugin_data:
print("\tplugin data differs %s" % other)
continue
print("\tbye bye %s" % other)
other.soft_delete()
def handle(self, *args, **options):
if not options.pop("nothing_can_go_wrong", False):
raise Exception("You don't know what you're doing.")
self._remove_dupes(SectionComment)
|
<commit_before><commit_msg>Implement management command for removing duplicate comments<commit_after>from datetime import timedelta
from optparse import make_option
from django.core.management.base import BaseCommand
from django.db.models import Count
from democracy.models import SectionComment
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option("--yes-i-know-what-im-doing", dest="nothing_can_go_wrong", action="store_true"),
)
def _remove_dupes(self, klass):
potential_dupes = klass.objects.values('content',).exclude(content='')\
.exclude(deleted=True).annotate(Count('content'))\
.filter(content__count__gt=1).order_by('-content__count')
for d in potential_dupes:
objs = list(klass.objects.filter(content=d['content']).order_by('created_at'))
first = objs.pop(0)
print("%s\n%s" % (first.created_at, first.content))
for other in objs:
if other.created_at - first.created_at > timedelta(hours=1):
print("\ttoo late %s" % (other.created_at - first.created_at))
continue
if getattr(other, klass.parent_field) != getattr(first, klass.parent_field):
print("\tdifferent parent %s" % other)
continue
if other.plugin_data != first.plugin_data:
print("\tplugin data differs %s" % other)
continue
print("\tbye bye %s" % other)
other.soft_delete()
def handle(self, *args, **options):
if not options.pop("nothing_can_go_wrong", False):
raise Exception("You don't know what you're doing.")
self._remove_dupes(SectionComment)
|
|
cfcd9d72eba2b2f59586816693a93bdab1f01c70
|
src/plan_tool.py
|
src/plan_tool.py
|
#!env python
from argparse import ArgumentParser
import json
import database as db
from database.model import Team, RouteDistance
from geotools import openroute_link
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
db.init_session(connection_string=DB_CONNECTION)
def read_legacy_plan(in_file):
with open(in_file, "r") as in_fn:
data = json.load(in_fn)
result = {}
for entry in data:
result[entry["team_id"][0]] = [station[0] for station in entry["plan"]]
return result
def read_plan_file(args):
result = {}
if args.inform == "legacy":
result = read_legacy_plan(args.in_file)
elif args.inform == "dan_marc":
print "to be implemented"
return result
def cmd_convert_plan(args):
result = read_plan_file(args)
with open(args.out_file, "w+") as out_fn:
json.dump(result, out_fn)
def cmd_print_plan(args):
result = read_plan_file(args)
teams = {}
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
teams[str(team.id)] = team
for entry in result:
team = teams[entry]
plan = result[entry]
print "# %s ::" % team.name
station_points = []
last_station = None
for station in plan:
station_team = teams[station]
dist = ""
if last_station is not None:
distance = db.session.query(RouteDistance).filter_by(location_from=last_station.location,
location_to=station_team.location).first()
dist = "[dist=%d]" % distance.distance
print "+ %s %s" % (station_team.name, dist)
station_points.append(MapPoint.from_team(station_team))
last_station = station_team
print "- route: %s" % openroute_link(station_points)
print ""
def parse_args():
args = ArgumentParser()
subcommands = args.add_subparsers()
args.add_argument("--inform", help="Specify the input format", required=True,
choices=("legacy", "dan_marc"))
args.add_argument("in_file", help="The file to convert")
convert_parser = subcommands.add_parser("convert")
convert_parser.add_argument("out_file", help="The output file")
convert_parser.set_defaults(func=cmd_convert_plan)
print_parser = subcommands.add_parser("print")
print_parser.add_argument("--osm", action="store_true", help="build osm route links")
print_parser.set_defaults(func=cmd_print_plan)
return args.parse_args()
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
Add a script to process the team plans.
|
Add a script to process the team plans.
It can read the legacy json format and print the old overview.
It can also convert the legacy format to some more readeable.
|
Python
|
bsd-3-clause
|
janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system
|
Add a script to process the team plans.
It can read the legacy json format and print the old overview.
It can also convert the legacy format to some more readeable.
|
#!env python
from argparse import ArgumentParser
import json
import database as db
from database.model import Team, RouteDistance
from geotools import openroute_link
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
db.init_session(connection_string=DB_CONNECTION)
def read_legacy_plan(in_file):
with open(in_file, "r") as in_fn:
data = json.load(in_fn)
result = {}
for entry in data:
result[entry["team_id"][0]] = [station[0] for station in entry["plan"]]
return result
def read_plan_file(args):
result = {}
if args.inform == "legacy":
result = read_legacy_plan(args.in_file)
elif args.inform == "dan_marc":
print "to be implemented"
return result
def cmd_convert_plan(args):
result = read_plan_file(args)
with open(args.out_file, "w+") as out_fn:
json.dump(result, out_fn)
def cmd_print_plan(args):
result = read_plan_file(args)
teams = {}
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
teams[str(team.id)] = team
for entry in result:
team = teams[entry]
plan = result[entry]
print "# %s ::" % team.name
station_points = []
last_station = None
for station in plan:
station_team = teams[station]
dist = ""
if last_station is not None:
distance = db.session.query(RouteDistance).filter_by(location_from=last_station.location,
location_to=station_team.location).first()
dist = "[dist=%d]" % distance.distance
print "+ %s %s" % (station_team.name, dist)
station_points.append(MapPoint.from_team(station_team))
last_station = station_team
print "- route: %s" % openroute_link(station_points)
print ""
def parse_args():
args = ArgumentParser()
subcommands = args.add_subparsers()
args.add_argument("--inform", help="Specify the input format", required=True,
choices=("legacy", "dan_marc"))
args.add_argument("in_file", help="The file to convert")
convert_parser = subcommands.add_parser("convert")
convert_parser.add_argument("out_file", help="The output file")
convert_parser.set_defaults(func=cmd_convert_plan)
print_parser = subcommands.add_parser("print")
print_parser.add_argument("--osm", action="store_true", help="build osm route links")
print_parser.set_defaults(func=cmd_print_plan)
return args.parse_args()
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
<commit_before><commit_msg>Add a script to process the team plans.
It can read the legacy json format and print the old overview.
It can also convert the legacy format to some more readeable.<commit_after>
|
#!env python
from argparse import ArgumentParser
import json
import database as db
from database.model import Team, RouteDistance
from geotools import openroute_link
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
db.init_session(connection_string=DB_CONNECTION)
def read_legacy_plan(in_file):
with open(in_file, "r") as in_fn:
data = json.load(in_fn)
result = {}
for entry in data:
result[entry["team_id"][0]] = [station[0] for station in entry["plan"]]
return result
def read_plan_file(args):
result = {}
if args.inform == "legacy":
result = read_legacy_plan(args.in_file)
elif args.inform == "dan_marc":
print "to be implemented"
return result
def cmd_convert_plan(args):
result = read_plan_file(args)
with open(args.out_file, "w+") as out_fn:
json.dump(result, out_fn)
def cmd_print_plan(args):
result = read_plan_file(args)
teams = {}
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
teams[str(team.id)] = team
for entry in result:
team = teams[entry]
plan = result[entry]
print "# %s ::" % team.name
station_points = []
last_station = None
for station in plan:
station_team = teams[station]
dist = ""
if last_station is not None:
distance = db.session.query(RouteDistance).filter_by(location_from=last_station.location,
location_to=station_team.location).first()
dist = "[dist=%d]" % distance.distance
print "+ %s %s" % (station_team.name, dist)
station_points.append(MapPoint.from_team(station_team))
last_station = station_team
print "- route: %s" % openroute_link(station_points)
print ""
def parse_args():
args = ArgumentParser()
subcommands = args.add_subparsers()
args.add_argument("--inform", help="Specify the input format", required=True,
choices=("legacy", "dan_marc"))
args.add_argument("in_file", help="The file to convert")
convert_parser = subcommands.add_parser("convert")
convert_parser.add_argument("out_file", help="The output file")
convert_parser.set_defaults(func=cmd_convert_plan)
print_parser = subcommands.add_parser("print")
print_parser.add_argument("--osm", action="store_true", help="build osm route links")
print_parser.set_defaults(func=cmd_print_plan)
return args.parse_args()
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
Add a script to process the team plans.
It can read the legacy json format and print the old overview.
It can also convert the legacy format to some more readeable.#!env python
from argparse import ArgumentParser
import json
import database as db
from database.model import Team, RouteDistance
from geotools import openroute_link
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
db.init_session(connection_string=DB_CONNECTION)
def read_legacy_plan(in_file):
with open(in_file, "r") as in_fn:
data = json.load(in_fn)
result = {}
for entry in data:
result[entry["team_id"][0]] = [station[0] for station in entry["plan"]]
return result
def read_plan_file(args):
result = {}
if args.inform == "legacy":
result = read_legacy_plan(args.in_file)
elif args.inform == "dan_marc":
print "to be implemented"
return result
def cmd_convert_plan(args):
result = read_plan_file(args)
with open(args.out_file, "w+") as out_fn:
json.dump(result, out_fn)
def cmd_print_plan(args):
result = read_plan_file(args)
teams = {}
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
teams[str(team.id)] = team
for entry in result:
team = teams[entry]
plan = result[entry]
print "# %s ::" % team.name
station_points = []
last_station = None
for station in plan:
station_team = teams[station]
dist = ""
if last_station is not None:
distance = db.session.query(RouteDistance).filter_by(location_from=last_station.location,
location_to=station_team.location).first()
dist = "[dist=%d]" % distance.distance
print "+ %s %s" % (station_team.name, dist)
station_points.append(MapPoint.from_team(station_team))
last_station = station_team
print "- route: %s" % openroute_link(station_points)
print ""
def parse_args():
args = ArgumentParser()
subcommands = args.add_subparsers()
args.add_argument("--inform", help="Specify the input format", required=True,
choices=("legacy", "dan_marc"))
args.add_argument("in_file", help="The file to convert")
convert_parser = subcommands.add_parser("convert")
convert_parser.add_argument("out_file", help="The output file")
convert_parser.set_defaults(func=cmd_convert_plan)
print_parser = subcommands.add_parser("print")
print_parser.add_argument("--osm", action="store_true", help="build osm route links")
print_parser.set_defaults(func=cmd_print_plan)
return args.parse_args()
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
<commit_before><commit_msg>Add a script to process the team plans.
It can read the legacy json format and print the old overview.
It can also convert the legacy format to some more readeable.<commit_after>#!env python
from argparse import ArgumentParser
import json
import database as db
from database.model import Team, RouteDistance
from geotools import openroute_link
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
db.init_session(connection_string=DB_CONNECTION)
def read_legacy_plan(in_file):
with open(in_file, "r") as in_fn:
data = json.load(in_fn)
result = {}
for entry in data:
result[entry["team_id"][0]] = [station[0] for station in entry["plan"]]
return result
def read_plan_file(args):
result = {}
if args.inform == "legacy":
result = read_legacy_plan(args.in_file)
elif args.inform == "dan_marc":
print "to be implemented"
return result
def cmd_convert_plan(args):
result = read_plan_file(args)
with open(args.out_file, "w+") as out_fn:
json.dump(result, out_fn)
def cmd_print_plan(args):
result = read_plan_file(args)
teams = {}
for team in db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True):
teams[str(team.id)] = team
for entry in result:
team = teams[entry]
plan = result[entry]
print "# %s ::" % team.name
station_points = []
last_station = None
for station in plan:
station_team = teams[station]
dist = ""
if last_station is not None:
distance = db.session.query(RouteDistance).filter_by(location_from=last_station.location,
location_to=station_team.location).first()
dist = "[dist=%d]" % distance.distance
print "+ %s %s" % (station_team.name, dist)
station_points.append(MapPoint.from_team(station_team))
last_station = station_team
print "- route: %s" % openroute_link(station_points)
print ""
def parse_args():
args = ArgumentParser()
subcommands = args.add_subparsers()
args.add_argument("--inform", help="Specify the input format", required=True,
choices=("legacy", "dan_marc"))
args.add_argument("in_file", help="The file to convert")
convert_parser = subcommands.add_parser("convert")
convert_parser.add_argument("out_file", help="The output file")
convert_parser.set_defaults(func=cmd_convert_plan)
print_parser = subcommands.add_parser("print")
print_parser.add_argument("--osm", action="store_true", help="build osm route links")
print_parser.set_defaults(func=cmd_print_plan)
return args.parse_args()
if __name__ == "__main__":
args = parse_args()
args.func(args)
|
|
3ce0b4f7888aa01ff88fa94ff21ebd8d1f85da43
|
readthedocs/donate/migrations/0009_add-error-to-promos.py
|
readthedocs/donate/migrations/0009_add-error-to-promos.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-24 16:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donate', '0008_add-programming-language-filter'),
]
operations = [
migrations.AlterModelOptions(
name='supporterpromo',
options={'ordering': ('analytics_id', '-live')},
),
migrations.AlterField(
model_name='geofilter',
name='countries',
field=models.ManyToManyField(related_name='filters', to='donate.Country'),
),
migrations.AlterField(
model_name='supporterpromo',
name='display_type',
field=models.CharField(choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages'), (b'error', b'Error Pages')], default=b'doc', max_length=200, verbose_name='Display Type'),
),
migrations.AlterField(
model_name='supporterpromo',
name='programming_language',
field=models.CharField(blank=True, choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'JavaScript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], default=None, max_length=20, null=True, verbose_name='Programming Language'),
),
]
|
Add migration that catches the donate app up
|
Add migration that catches the donate app up
|
Python
|
mit
|
rtfd/readthedocs.org,pombredanne/readthedocs.org,davidfischer/readthedocs.org,pombredanne/readthedocs.org,tddv/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,tddv/readthedocs.org,tddv/readthedocs.org,safwanrahman/readthedocs.org
|
Add migration that catches the donate app up
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-24 16:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donate', '0008_add-programming-language-filter'),
]
operations = [
migrations.AlterModelOptions(
name='supporterpromo',
options={'ordering': ('analytics_id', '-live')},
),
migrations.AlterField(
model_name='geofilter',
name='countries',
field=models.ManyToManyField(related_name='filters', to='donate.Country'),
),
migrations.AlterField(
model_name='supporterpromo',
name='display_type',
field=models.CharField(choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages'), (b'error', b'Error Pages')], default=b'doc', max_length=200, verbose_name='Display Type'),
),
migrations.AlterField(
model_name='supporterpromo',
name='programming_language',
field=models.CharField(blank=True, choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'JavaScript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], default=None, max_length=20, null=True, verbose_name='Programming Language'),
),
]
|
<commit_before><commit_msg>Add migration that catches the donate app up<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-24 16:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donate', '0008_add-programming-language-filter'),
]
operations = [
migrations.AlterModelOptions(
name='supporterpromo',
options={'ordering': ('analytics_id', '-live')},
),
migrations.AlterField(
model_name='geofilter',
name='countries',
field=models.ManyToManyField(related_name='filters', to='donate.Country'),
),
migrations.AlterField(
model_name='supporterpromo',
name='display_type',
field=models.CharField(choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages'), (b'error', b'Error Pages')], default=b'doc', max_length=200, verbose_name='Display Type'),
),
migrations.AlterField(
model_name='supporterpromo',
name='programming_language',
field=models.CharField(blank=True, choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'JavaScript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], default=None, max_length=20, null=True, verbose_name='Programming Language'),
),
]
|
Add migration that catches the donate app up# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-24 16:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donate', '0008_add-programming-language-filter'),
]
operations = [
migrations.AlterModelOptions(
name='supporterpromo',
options={'ordering': ('analytics_id', '-live')},
),
migrations.AlterField(
model_name='geofilter',
name='countries',
field=models.ManyToManyField(related_name='filters', to='donate.Country'),
),
migrations.AlterField(
model_name='supporterpromo',
name='display_type',
field=models.CharField(choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages'), (b'error', b'Error Pages')], default=b'doc', max_length=200, verbose_name='Display Type'),
),
migrations.AlterField(
model_name='supporterpromo',
name='programming_language',
field=models.CharField(blank=True, choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'JavaScript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], default=None, max_length=20, null=True, verbose_name='Programming Language'),
),
]
|
<commit_before><commit_msg>Add migration that catches the donate app up<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-24 16:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donate', '0008_add-programming-language-filter'),
]
operations = [
migrations.AlterModelOptions(
name='supporterpromo',
options={'ordering': ('analytics_id', '-live')},
),
migrations.AlterField(
model_name='geofilter',
name='countries',
field=models.ManyToManyField(related_name='filters', to='donate.Country'),
),
migrations.AlterField(
model_name='supporterpromo',
name='display_type',
field=models.CharField(choices=[(b'doc', b'Documentation Pages'), (b'site-footer', b'Site Footer'), (b'search', b'Search Pages'), (b'error', b'Error Pages')], default=b'doc', max_length=200, verbose_name='Display Type'),
),
migrations.AlterField(
model_name='supporterpromo',
name='programming_language',
field=models.CharField(blank=True, choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'JavaScript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], default=None, max_length=20, null=True, verbose_name='Programming Language'),
),
]
|
|
a1e096791c12f35889f70655c8b4164593b6fd60
|
membership/migrations/0003_ensure_http_prefix_contact_uris.py
|
membership/migrations/0003_ensure_http_prefix_contact_uris.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def ensure_http_prefix_contact(apps, schema_editor):
Contact = apps.get_model("membership", "Contact")
# Contacts with broken homepage field value
for contact in Contact.objects\
.exclude(homepage='')\
.exclude(homepage__startswith="http://")\
.exclude(homepage__startswith="https://"):
if contact.homepage:
if ":/" not in contact.homepage:
contact.homepage = "http://{uri}".format(uri=contact.homepage)
contact.save()
dependencies = [
('membership', '0002_charfields_to_not_null'),
]
operations = [
migrations.RunPython(ensure_http_prefix_contact)
]
|
Fix existing home page URIs with migration
|
Fix existing home page URIs with migration
|
Python
|
mit
|
annttu/sikteeri,AriMartti/sikteeri,joneskoo/sikteeri,kapsiry/sikteeri,kapsiry/sikteeri,kapsiry/sikteeri,AriMartti/sikteeri,kapsiry/sikteeri,annttu/sikteeri,annttu/sikteeri,AriMartti/sikteeri,joneskoo/sikteeri,joneskoo/sikteeri,annttu/sikteeri,AriMartti/sikteeri,joneskoo/sikteeri
|
Fix existing home page URIs with migration
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def ensure_http_prefix_contact(apps, schema_editor):
Contact = apps.get_model("membership", "Contact")
# Contacts with broken homepage field value
for contact in Contact.objects\
.exclude(homepage='')\
.exclude(homepage__startswith="http://")\
.exclude(homepage__startswith="https://"):
if contact.homepage:
if ":/" not in contact.homepage:
contact.homepage = "http://{uri}".format(uri=contact.homepage)
contact.save()
dependencies = [
('membership', '0002_charfields_to_not_null'),
]
operations = [
migrations.RunPython(ensure_http_prefix_contact)
]
|
<commit_before><commit_msg>Fix existing home page URIs with migration<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def ensure_http_prefix_contact(apps, schema_editor):
Contact = apps.get_model("membership", "Contact")
# Contacts with broken homepage field value
for contact in Contact.objects\
.exclude(homepage='')\
.exclude(homepage__startswith="http://")\
.exclude(homepage__startswith="https://"):
if contact.homepage:
if ":/" not in contact.homepage:
contact.homepage = "http://{uri}".format(uri=contact.homepage)
contact.save()
dependencies = [
('membership', '0002_charfields_to_not_null'),
]
operations = [
migrations.RunPython(ensure_http_prefix_contact)
]
|
Fix existing home page URIs with migration# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def ensure_http_prefix_contact(apps, schema_editor):
Contact = apps.get_model("membership", "Contact")
# Contacts with broken homepage field value
for contact in Contact.objects\
.exclude(homepage='')\
.exclude(homepage__startswith="http://")\
.exclude(homepage__startswith="https://"):
if contact.homepage:
if ":/" not in contact.homepage:
contact.homepage = "http://{uri}".format(uri=contact.homepage)
contact.save()
dependencies = [
('membership', '0002_charfields_to_not_null'),
]
operations = [
migrations.RunPython(ensure_http_prefix_contact)
]
|
<commit_before><commit_msg>Fix existing home page URIs with migration<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
def ensure_http_prefix_contact(apps, schema_editor):
Contact = apps.get_model("membership", "Contact")
# Contacts with broken homepage field value
for contact in Contact.objects\
.exclude(homepage='')\
.exclude(homepage__startswith="http://")\
.exclude(homepage__startswith="https://"):
if contact.homepage:
if ":/" not in contact.homepage:
contact.homepage = "http://{uri}".format(uri=contact.homepage)
contact.save()
dependencies = [
('membership', '0002_charfields_to_not_null'),
]
operations = [
migrations.RunPython(ensure_http_prefix_contact)
]
|
|
08bd011a4783f2f5b162beedf6decd682168e753
|
lava_scheduler_app/migrations/0023_update_aliases.py
|
lava_scheduler_app/migrations/0023_update_aliases.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 13:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0022_create_devicetype_alias'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='aliases',
field=models.ManyToManyField(blank=True, related_name='device_types', to='lava_scheduler_app.Alias'),
),
]
|
Update aliases migration for review change
|
Update aliases migration for review change
Change-Id: Ia0dca513f058d60c16fb2c6a41c766c99d0850d4
|
Python
|
agpl-3.0
|
Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server
|
Update aliases migration for review change
Change-Id: Ia0dca513f058d60c16fb2c6a41c766c99d0850d4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 13:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0022_create_devicetype_alias'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='aliases',
field=models.ManyToManyField(blank=True, related_name='device_types', to='lava_scheduler_app.Alias'),
),
]
|
<commit_before><commit_msg>Update aliases migration for review change
Change-Id: Ia0dca513f058d60c16fb2c6a41c766c99d0850d4<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 13:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0022_create_devicetype_alias'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='aliases',
field=models.ManyToManyField(blank=True, related_name='device_types', to='lava_scheduler_app.Alias'),
),
]
|
Update aliases migration for review change
Change-Id: Ia0dca513f058d60c16fb2c6a41c766c99d0850d4# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 13:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0022_create_devicetype_alias'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='aliases',
field=models.ManyToManyField(blank=True, related_name='device_types', to='lava_scheduler_app.Alias'),
),
]
|
<commit_before><commit_msg>Update aliases migration for review change
Change-Id: Ia0dca513f058d60c16fb2c6a41c766c99d0850d4<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-24 13:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lava_scheduler_app', '0022_create_devicetype_alias'),
]
operations = [
migrations.AlterField(
model_name='devicetype',
name='aliases',
field=models.ManyToManyField(blank=True, related_name='device_types', to='lava_scheduler_app.Alias'),
),
]
|
|
ceb33c4cfff7dab2a4e05df0b64bf307ce280a58
|
saleor/shipping/migrations/0018_default_zones_countries.py
|
saleor/shipping/migrations/0018_default_zones_countries.py
|
# Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
Add migration to update existing shipping zones
|
Add migration to update existing shipping zones
|
Python
|
bsd-3-clause
|
mociepka/saleor,mociepka/saleor,mociepka/saleor
|
Add migration to update existing shipping zones
|
# Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Add migration to update existing shipping zones<commit_after>
|
# Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
Add migration to update existing shipping zones# Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Add migration to update existing shipping zones<commit_after># Generated by Django 3.0.6 on 2020-06-05 14:35
from django.db import migrations
from ..utils import get_countries_without_shipping_zone
def assign_countries_in_default_shipping_zone(apps, schema_editor):
ShippingZone = apps.get_model("shipping", "ShippingZone")
qs = ShippingZone.objects.filter(default=True)
if qs.exists():
default_zone = qs[0]
if not default_zone.countries:
default_zone.countries = get_countries_without_shipping_zone()
default_zone.save(update_fields=["countries"])
class Migration(migrations.Migration):
dependencies = [
("shipping", "0017_django_price_2"),
]
operations = [
migrations.RunPython(
assign_countries_in_default_shipping_zone, migrations.RunPython.noop
)
]
|
|
eea45fcad4dda1656de0848b6128271060874850
|
string/kmp.py
|
string/kmp.py
|
# -*- coding:utf-8 -*-
def build_next_arr(pattern_s):
ps_len = len(pattern_s)
nxt = [-1] * ps_len
k = -1
for i in range(1, ps_len):
while k != -1 and pattern_s[k + 1] != pattern_s[i]:
k = nxt[k]
if pattern_s[k + 1] == pattern_s[i]:
k += 1
nxt[i] = k
return nxt
def kmp(string, pattern_s):
slen = len(string)
m = len(pattern_s)
nxt = build_next_arr(pattern_s)
j = 0
for i in range(slen):
while j > 0 and string[i] != pattern_s[j]:
j = nxt[j - 1] + 1
if string[i] == pattern_s[j]:
j += 1
if j == m:
return i - m + 1
return -1
if __name__ == '__main__':
string = "hello world! today is not my day"
ps = 'today'
print(string)
print(len(string))
print(kmp(string, ps))
|
Add KMP string search algorithm implmentation
|
Add KMP string search algorithm implmentation
|
Python
|
apache-2.0
|
free-free/algorithm,free-free/algorithm
|
Add KMP string search algorithm implmentation
|
# -*- coding:utf-8 -*-
def build_next_arr(pattern_s):
ps_len = len(pattern_s)
nxt = [-1] * ps_len
k = -1
for i in range(1, ps_len):
while k != -1 and pattern_s[k + 1] != pattern_s[i]:
k = nxt[k]
if pattern_s[k + 1] == pattern_s[i]:
k += 1
nxt[i] = k
return nxt
def kmp(string, pattern_s):
slen = len(string)
m = len(pattern_s)
nxt = build_next_arr(pattern_s)
j = 0
for i in range(slen):
while j > 0 and string[i] != pattern_s[j]:
j = nxt[j - 1] + 1
if string[i] == pattern_s[j]:
j += 1
if j == m:
return i - m + 1
return -1
if __name__ == '__main__':
string = "hello world! today is not my day"
ps = 'today'
print(string)
print(len(string))
print(kmp(string, ps))
|
<commit_before><commit_msg>Add KMP string search algorithm implmentation<commit_after>
|
# -*- coding:utf-8 -*-
def build_next_arr(pattern_s):
ps_len = len(pattern_s)
nxt = [-1] * ps_len
k = -1
for i in range(1, ps_len):
while k != -1 and pattern_s[k + 1] != pattern_s[i]:
k = nxt[k]
if pattern_s[k + 1] == pattern_s[i]:
k += 1
nxt[i] = k
return nxt
def kmp(string, pattern_s):
slen = len(string)
m = len(pattern_s)
nxt = build_next_arr(pattern_s)
j = 0
for i in range(slen):
while j > 0 and string[i] != pattern_s[j]:
j = nxt[j - 1] + 1
if string[i] == pattern_s[j]:
j += 1
if j == m:
return i - m + 1
return -1
if __name__ == '__main__':
string = "hello world! today is not my day"
ps = 'today'
print(string)
print(len(string))
print(kmp(string, ps))
|
Add KMP string search algorithm implmentation# -*- coding:utf-8 -*-
def build_next_arr(pattern_s):
ps_len = len(pattern_s)
nxt = [-1] * ps_len
k = -1
for i in range(1, ps_len):
while k != -1 and pattern_s[k + 1] != pattern_s[i]:
k = nxt[k]
if pattern_s[k + 1] == pattern_s[i]:
k += 1
nxt[i] = k
return nxt
def kmp(string, pattern_s):
slen = len(string)
m = len(pattern_s)
nxt = build_next_arr(pattern_s)
j = 0
for i in range(slen):
while j > 0 and string[i] != pattern_s[j]:
j = nxt[j - 1] + 1
if string[i] == pattern_s[j]:
j += 1
if j == m:
return i - m + 1
return -1
if __name__ == '__main__':
string = "hello world! today is not my day"
ps = 'today'
print(string)
print(len(string))
print(kmp(string, ps))
|
<commit_before><commit_msg>Add KMP string search algorithm implmentation<commit_after># -*- coding:utf-8 -*-
def build_next_arr(pattern_s):
ps_len = len(pattern_s)
nxt = [-1] * ps_len
k = -1
for i in range(1, ps_len):
while k != -1 and pattern_s[k + 1] != pattern_s[i]:
k = nxt[k]
if pattern_s[k + 1] == pattern_s[i]:
k += 1
nxt[i] = k
return nxt
def kmp(string, pattern_s):
slen = len(string)
m = len(pattern_s)
nxt = build_next_arr(pattern_s)
j = 0
for i in range(slen):
while j > 0 and string[i] != pattern_s[j]:
j = nxt[j - 1] + 1
if string[i] == pattern_s[j]:
j += 1
if j == m:
return i - m + 1
return -1
if __name__ == '__main__':
string = "hello world! today is not my day"
ps = 'today'
print(string)
print(len(string))
print(kmp(string, ps))
|
|
18b3de9a03db4550d37dde0bf8be648b549b6530
|
scripts/registry-cli.py
|
scripts/registry-cli.py
|
#!/usr/bin/env python
"""A command-line client for the Docker registry."""
import argparse
import requests
def list_cmd(args):
"""List images"""
url = args.registry + '/v2/_catalog'
r = requests.get(url)
data = r.json()
for repo in data['repositories']:
print(repo)
def list_tags_cmd(args):
"""List tags for an image"""
url = args.registry + '/v2/' + args.name + '/tags/list'
r = requests.get(url)
data = r.json()
if data['tags'] is None:
print('No tags for {}'.format(args.name))
return
for repo in data['tags']:
print(repo)
def delete_tag_cmd(args):
"""Delete a tag from an image"""
url = args.registry + '/v2/' + args.name + '/manifests/' + args.tag
header = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = requests.get(url, headers=header)
if r.status_code == 404:
print('image/tag combination not found')
return
digest = r.headers['Docker-Content-Digest']
url = args.registry + '/v2/' + args.name + '/manifests/' + digest
r = requests.delete(url, headers=header)
if r.status_code != 202:
print('error')
else:
print('deleted')
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description="Docker Registry client")
parser.add_argument("-r", "--registry", help="Registry URL", default="http:127.0.0.1:5000")
subparser = parser.add_subparsers()
argparser_list = subparser.add_parser('list', help="Lists all the images in the registry")
argparser_list.set_defaults(func=list_cmd)
argparser_list_tags = subparser.add_parser('list-tags', help="Lists all tags for an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.set_defaults(func=list_tags_cmd)
argparser_list_tags = subparser.add_parser('delete', help="Delete a tag from an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.add_argument('tag', help="Name of the tag to delete")
argparser_list_tags.set_defaults(func=delete_tag_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
Add a script for doing basic maintenance on a private Docker Registry
|
Add a script for doing basic maintenance on a private Docker Registry
|
Python
|
apache-2.0
|
DistributedSystemsGroup/zoe,DistributedSystemsGroup/zoe,DistributedSystemsGroup/zoe,DistributedSystemsGroup/zoe,DistributedSystemsGroup/zoe
|
Add a script for doing basic maintenance on a private Docker Registry
|
#!/usr/bin/env python
"""A command-line client for the Docker registry."""
import argparse
import requests
def list_cmd(args):
"""List images"""
url = args.registry + '/v2/_catalog'
r = requests.get(url)
data = r.json()
for repo in data['repositories']:
print(repo)
def list_tags_cmd(args):
"""List tags for an image"""
url = args.registry + '/v2/' + args.name + '/tags/list'
r = requests.get(url)
data = r.json()
if data['tags'] is None:
print('No tags for {}'.format(args.name))
return
for repo in data['tags']:
print(repo)
def delete_tag_cmd(args):
"""Delete a tag from an image"""
url = args.registry + '/v2/' + args.name + '/manifests/' + args.tag
header = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = requests.get(url, headers=header)
if r.status_code == 404:
print('image/tag combination not found')
return
digest = r.headers['Docker-Content-Digest']
url = args.registry + '/v2/' + args.name + '/manifests/' + digest
r = requests.delete(url, headers=header)
if r.status_code != 202:
print('error')
else:
print('deleted')
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description="Docker Registry client")
parser.add_argument("-r", "--registry", help="Registry URL", default="http:127.0.0.1:5000")
subparser = parser.add_subparsers()
argparser_list = subparser.add_parser('list', help="Lists all the images in the registry")
argparser_list.set_defaults(func=list_cmd)
argparser_list_tags = subparser.add_parser('list-tags', help="Lists all tags for an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.set_defaults(func=list_tags_cmd)
argparser_list_tags = subparser.add_parser('delete', help="Delete a tag from an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.add_argument('tag', help="Name of the tag to delete")
argparser_list_tags.set_defaults(func=delete_tag_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script for doing basic maintenance on a private Docker Registry<commit_after>
|
#!/usr/bin/env python
"""A command-line client for the Docker registry."""
import argparse
import requests
def list_cmd(args):
"""List images"""
url = args.registry + '/v2/_catalog'
r = requests.get(url)
data = r.json()
for repo in data['repositories']:
print(repo)
def list_tags_cmd(args):
"""List tags for an image"""
url = args.registry + '/v2/' + args.name + '/tags/list'
r = requests.get(url)
data = r.json()
if data['tags'] is None:
print('No tags for {}'.format(args.name))
return
for repo in data['tags']:
print(repo)
def delete_tag_cmd(args):
"""Delete a tag from an image"""
url = args.registry + '/v2/' + args.name + '/manifests/' + args.tag
header = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = requests.get(url, headers=header)
if r.status_code == 404:
print('image/tag combination not found')
return
digest = r.headers['Docker-Content-Digest']
url = args.registry + '/v2/' + args.name + '/manifests/' + digest
r = requests.delete(url, headers=header)
if r.status_code != 202:
print('error')
else:
print('deleted')
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description="Docker Registry client")
parser.add_argument("-r", "--registry", help="Registry URL", default="http:127.0.0.1:5000")
subparser = parser.add_subparsers()
argparser_list = subparser.add_parser('list', help="Lists all the images in the registry")
argparser_list.set_defaults(func=list_cmd)
argparser_list_tags = subparser.add_parser('list-tags', help="Lists all tags for an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.set_defaults(func=list_tags_cmd)
argparser_list_tags = subparser.add_parser('delete', help="Delete a tag from an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.add_argument('tag', help="Name of the tag to delete")
argparser_list_tags.set_defaults(func=delete_tag_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
Add a script for doing basic maintenance on a private Docker Registry#!/usr/bin/env python
"""A command-line client for the Docker registry."""
import argparse
import requests
def list_cmd(args):
"""List images"""
url = args.registry + '/v2/_catalog'
r = requests.get(url)
data = r.json()
for repo in data['repositories']:
print(repo)
def list_tags_cmd(args):
"""List tags for an image"""
url = args.registry + '/v2/' + args.name + '/tags/list'
r = requests.get(url)
data = r.json()
if data['tags'] is None:
print('No tags for {}'.format(args.name))
return
for repo in data['tags']:
print(repo)
def delete_tag_cmd(args):
"""Delete a tag from an image"""
url = args.registry + '/v2/' + args.name + '/manifests/' + args.tag
header = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = requests.get(url, headers=header)
if r.status_code == 404:
print('image/tag combination not found')
return
digest = r.headers['Docker-Content-Digest']
url = args.registry + '/v2/' + args.name + '/manifests/' + digest
r = requests.delete(url, headers=header)
if r.status_code != 202:
print('error')
else:
print('deleted')
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description="Docker Registry client")
parser.add_argument("-r", "--registry", help="Registry URL", default="http:127.0.0.1:5000")
subparser = parser.add_subparsers()
argparser_list = subparser.add_parser('list', help="Lists all the images in the registry")
argparser_list.set_defaults(func=list_cmd)
argparser_list_tags = subparser.add_parser('list-tags', help="Lists all tags for an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.set_defaults(func=list_tags_cmd)
argparser_list_tags = subparser.add_parser('delete', help="Delete a tag from an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.add_argument('tag', help="Name of the tag to delete")
argparser_list_tags.set_defaults(func=delete_tag_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a script for doing basic maintenance on a private Docker Registry<commit_after>#!/usr/bin/env python
"""A command-line client for the Docker registry."""
import argparse
import requests
def list_cmd(args):
"""List images"""
url = args.registry + '/v2/_catalog'
r = requests.get(url)
data = r.json()
for repo in data['repositories']:
print(repo)
def list_tags_cmd(args):
"""List tags for an image"""
url = args.registry + '/v2/' + args.name + '/tags/list'
r = requests.get(url)
data = r.json()
if data['tags'] is None:
print('No tags for {}'.format(args.name))
return
for repo in data['tags']:
print(repo)
def delete_tag_cmd(args):
"""Delete a tag from an image"""
url = args.registry + '/v2/' + args.name + '/manifests/' + args.tag
header = {'Accept': 'application/vnd.docker.distribution.manifest.v2+json'}
r = requests.get(url, headers=header)
if r.status_code == 404:
print('image/tag combination not found')
return
digest = r.headers['Docker-Content-Digest']
url = args.registry + '/v2/' + args.name + '/manifests/' + digest
r = requests.delete(url, headers=header)
if r.status_code != 202:
print('error')
else:
print('deleted')
def main():
"""Main entrypoint."""
parser = argparse.ArgumentParser(description="Docker Registry client")
parser.add_argument("-r", "--registry", help="Registry URL", default="http:127.0.0.1:5000")
subparser = parser.add_subparsers()
argparser_list = subparser.add_parser('list', help="Lists all the images in the registry")
argparser_list.set_defaults(func=list_cmd)
argparser_list_tags = subparser.add_parser('list-tags', help="Lists all tags for an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.set_defaults(func=list_tags_cmd)
argparser_list_tags = subparser.add_parser('delete', help="Delete a tag from an image")
argparser_list_tags.add_argument('name', help="Name of the image")
argparser_list_tags.add_argument('tag', help="Name of the tag to delete")
argparser_list_tags.set_defaults(func=delete_tag_cmd)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
|
4bb12a983f45c920088087f1edf3e00d9801b4bf
|
swarm_extraction/extraction.py
|
swarm_extraction/extraction.py
|
#
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
_campaign_states = [
('draft', 'Draft'),
('progress', 'In progress'),
('done', 'Done'),
('cancel', 'Cancel')
]
class Campaign(osv.osv):
"""
Class to configure data extraction campaigns
"""
_name = 'swarm.campaign'
_description = __doc__
_columns = {
'name': fields.char('Name'),
'init_date': fields.datetime('Start date'),
'end_date': fields.datetime('End date'),
'tags_ids': fields.many2many('swarm_campaign_tag', 'swarm_campaign_tag_rel',
'campaign_id', 'tag_id', 'Tags'),
'max_items': fields.integer('Max items'),
'state': fields.selection(_campaign_states, 'State'),
}
class CampaignTag(osv.osv):
"""
Campaign tags
"""
_name = 'swarm.campaign.tag'
_description = __doc__
_columns = {
'name', fields.char('Name')
}
|
Add Campain & tag classes, created assembler package dir
|
Add Campain & tag classes, created assembler package dir
|
Python
|
agpl-3.0
|
jonathanf/swarm
|
Add Campain & tag classes, created assembler package dir
|
#
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
_campaign_states = [
('draft', 'Draft'),
('progress', 'In progress'),
('done', 'Done'),
('cancel', 'Cancel')
]
class Campaign(osv.osv):
"""
Class to configure data extraction campaigns
"""
_name = 'swarm.campaign'
_description = __doc__
_columns = {
'name': fields.char('Name'),
'init_date': fields.datetime('Start date'),
'end_date': fields.datetime('End date'),
'tags_ids': fields.many2many('swarm_campaign_tag', 'swarm_campaign_tag_rel',
'campaign_id', 'tag_id', 'Tags'),
'max_items': fields.integer('Max items'),
'state': fields.selection(_campaign_states, 'State'),
}
class CampaignTag(osv.osv):
"""
Campaign tags
"""
_name = 'swarm.campaign.tag'
_description = __doc__
_columns = {
'name', fields.char('Name')
}
|
<commit_before><commit_msg>Add Campain & tag classes, created assembler package dir<commit_after>
|
#
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
_campaign_states = [
('draft', 'Draft'),
('progress', 'In progress'),
('done', 'Done'),
('cancel', 'Cancel')
]
class Campaign(osv.osv):
"""
Class to configure data extraction campaigns
"""
_name = 'swarm.campaign'
_description = __doc__
_columns = {
'name': fields.char('Name'),
'init_date': fields.datetime('Start date'),
'end_date': fields.datetime('End date'),
'tags_ids': fields.many2many('swarm_campaign_tag', 'swarm_campaign_tag_rel',
'campaign_id', 'tag_id', 'Tags'),
'max_items': fields.integer('Max items'),
'state': fields.selection(_campaign_states, 'State'),
}
class CampaignTag(osv.osv):
"""
Campaign tags
"""
_name = 'swarm.campaign.tag'
_description = __doc__
_columns = {
'name', fields.char('Name')
}
|
Add Campain & tag classes, created assembler package dir#
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
_campaign_states = [
('draft', 'Draft'),
('progress', 'In progress'),
('done', 'Done'),
('cancel', 'Cancel')
]
class Campaign(osv.osv):
"""
Class to configure data extraction campaigns
"""
_name = 'swarm.campaign'
_description = __doc__
_columns = {
'name': fields.char('Name'),
'init_date': fields.datetime('Start date'),
'end_date': fields.datetime('End date'),
'tags_ids': fields.many2many('swarm_campaign_tag', 'swarm_campaign_tag_rel',
'campaign_id', 'tag_id', 'Tags'),
'max_items': fields.integer('Max items'),
'state': fields.selection(_campaign_states, 'State'),
}
class CampaignTag(osv.osv):
"""
Campaign tags
"""
_name = 'swarm.campaign.tag'
_description = __doc__
_columns = {
'name', fields.char('Name')
}
|
<commit_before><commit_msg>Add Campain & tag classes, created assembler package dir<commit_after>#
# Copyright (C) 2015 Jonathan Finlay <jfinlay@riseup.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp.osv import osv, fields
_campaign_states = [
('draft', 'Draft'),
('progress', 'In progress'),
('done', 'Done'),
('cancel', 'Cancel')
]
class Campaign(osv.osv):
"""
Class to configure data extraction campaigns
"""
_name = 'swarm.campaign'
_description = __doc__
_columns = {
'name': fields.char('Name'),
'init_date': fields.datetime('Start date'),
'end_date': fields.datetime('End date'),
'tags_ids': fields.many2many('swarm_campaign_tag', 'swarm_campaign_tag_rel',
'campaign_id', 'tag_id', 'Tags'),
'max_items': fields.integer('Max items'),
'state': fields.selection(_campaign_states, 'State'),
}
class CampaignTag(osv.osv):
"""
Campaign tags
"""
_name = 'swarm.campaign.tag'
_description = __doc__
_columns = {
'name', fields.char('Name')
}
|
|
b78cdaa7460b6453eef3c3e825ba1f29279cf582
|
tests/test_graphs_threshold.py
|
tests/test_graphs_threshold.py
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
# dyfunconn
from dyfunconn.graphs import (threshold_mean_degree,
threshold_mst_mean_degree,
threshold_shortest_paths,
threshold_global_cost_efficiency,
threshold_omst_global_cost_efficiency)
def test_graph_threshold_mean_degree():
pass
def test_graph_threshold_mst_mean_degree():
pass
def test_graph_threshold_shortest_paths():
pass
def test_graph_threshold_global_cost_efficiency():
pass
def test_graph_threshold_omst_global_cost_efficiency():
mtx1 = np.load("data/test_graphs_gdd_mtx1_5x5.npy")
mtx2 = np.load("data/test_graphs_gdd_mtx2_5x5.npy")
gdd, t = graph_diffusion_distance(mtx1, mtx2)
np.testing.assert_almost_equal(gdd, 0.281495413972)
np.testing.assert_almost_equal(t, 0.443594234747)
|
Add placeholder unit tests for the thresholding methods.
|
Add placeholder unit tests for the thresholding methods.
|
Python
|
bsd-3-clause
|
makism/dyfunconn
|
Add placeholder unit tests for the thresholding methods.
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
# dyfunconn
from dyfunconn.graphs import (threshold_mean_degree,
threshold_mst_mean_degree,
threshold_shortest_paths,
threshold_global_cost_efficiency,
threshold_omst_global_cost_efficiency)
def test_graph_threshold_mean_degree():
pass
def test_graph_threshold_mst_mean_degree():
pass
def test_graph_threshold_shortest_paths():
pass
def test_graph_threshold_global_cost_efficiency():
pass
def test_graph_threshold_omst_global_cost_efficiency():
mtx1 = np.load("data/test_graphs_gdd_mtx1_5x5.npy")
mtx2 = np.load("data/test_graphs_gdd_mtx2_5x5.npy")
gdd, t = graph_diffusion_distance(mtx1, mtx2)
np.testing.assert_almost_equal(gdd, 0.281495413972)
np.testing.assert_almost_equal(t, 0.443594234747)
|
<commit_before><commit_msg>Add placeholder unit tests for the thresholding methods.<commit_after>
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
# dyfunconn
from dyfunconn.graphs import (threshold_mean_degree,
threshold_mst_mean_degree,
threshold_shortest_paths,
threshold_global_cost_efficiency,
threshold_omst_global_cost_efficiency)
def test_graph_threshold_mean_degree():
pass
def test_graph_threshold_mst_mean_degree():
pass
def test_graph_threshold_shortest_paths():
pass
def test_graph_threshold_global_cost_efficiency():
pass
def test_graph_threshold_omst_global_cost_efficiency():
mtx1 = np.load("data/test_graphs_gdd_mtx1_5x5.npy")
mtx2 = np.load("data/test_graphs_gdd_mtx2_5x5.npy")
gdd, t = graph_diffusion_distance(mtx1, mtx2)
np.testing.assert_almost_equal(gdd, 0.281495413972)
np.testing.assert_almost_equal(t, 0.443594234747)
|
Add placeholder unit tests for the thresholding methods.# -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
# dyfunconn
from dyfunconn.graphs import (threshold_mean_degree,
threshold_mst_mean_degree,
threshold_shortest_paths,
threshold_global_cost_efficiency,
threshold_omst_global_cost_efficiency)
def test_graph_threshold_mean_degree():
pass
def test_graph_threshold_mst_mean_degree():
pass
def test_graph_threshold_shortest_paths():
pass
def test_graph_threshold_global_cost_efficiency():
pass
def test_graph_threshold_omst_global_cost_efficiency():
mtx1 = np.load("data/test_graphs_gdd_mtx1_5x5.npy")
mtx2 = np.load("data/test_graphs_gdd_mtx2_5x5.npy")
gdd, t = graph_diffusion_distance(mtx1, mtx2)
np.testing.assert_almost_equal(gdd, 0.281495413972)
np.testing.assert_almost_equal(t, 0.443594234747)
|
<commit_before><commit_msg>Add placeholder unit tests for the thresholding methods.<commit_after># -*- coding: utf-8 -*-
import numpy as np
from numpy import testing
# dyfunconn
from dyfunconn.graphs import (threshold_mean_degree,
threshold_mst_mean_degree,
threshold_shortest_paths,
threshold_global_cost_efficiency,
threshold_omst_global_cost_efficiency)
def test_graph_threshold_mean_degree():
pass
def test_graph_threshold_mst_mean_degree():
pass
def test_graph_threshold_shortest_paths():
pass
def test_graph_threshold_global_cost_efficiency():
pass
def test_graph_threshold_omst_global_cost_efficiency():
mtx1 = np.load("data/test_graphs_gdd_mtx1_5x5.npy")
mtx2 = np.load("data/test_graphs_gdd_mtx2_5x5.npy")
gdd, t = graph_diffusion_distance(mtx1, mtx2)
np.testing.assert_almost_equal(gdd, 0.281495413972)
np.testing.assert_almost_equal(t, 0.443594234747)
|
|
4d659f566f094a42057023dcc82a3ca8810c5ba6
|
tests/micropython/heapalloc.py
|
tests/micropython/heapalloc.py
|
# check that we can do certain things without allocating heap memory
import gc
def f(a):
print(a)
def g(a, b=2):
print(a, b)
global_var = 1
def h():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f(i) # function call
f(i * 2 + 1) # binary operation with small ints
f(a=i) # keyword arguments
g(i) # default arg (second one)
g(i, i) # 2 args
# call h with heap allocation disabled
gc.disable()
h()
gc.enable()
|
Add feature test for when heap allocation is disabled.
|
tests: Add feature test for when heap allocation is disabled.
|
Python
|
mit
|
toolmacher/micropython,utopiaprince/micropython,alex-robbins/micropython,methoxid/micropystat,adamkh/micropython,TDAbboud/micropython,blmorris/micropython,AriZuu/micropython,paul-xxx/micropython,adafruit/circuitpython,tuc-osg/micropython,rubencabrera/micropython,tuc-osg/micropython,ruffy91/micropython,drrk/micropython,heisewangluo/micropython,xuxiaoxin/micropython,tobbad/micropython,rubencabrera/micropython,hiway/micropython,oopy/micropython,deshipu/micropython,MrSurly/micropython,dinau/micropython,praemdonck/micropython,cloudformdesign/micropython,infinnovation/micropython,TDAbboud/micropython,dinau/micropython,HenrikSolver/micropython,ceramos/micropython,dxxb/micropython,jimkmc/micropython,tralamazza/micropython,adafruit/micropython,pramasoul/micropython,adafruit/micropython,selste/micropython,EcmaXp/micropython,omtinez/micropython,noahchense/micropython,dxxb/micropython,hiway/micropython,toolmacher/micropython,mianos/micropython,MrSurly/micropython,Vogtinator/micropython,pfalcon/micropython,mhoffma/micropython,ceramos/micropython,kostyll/micropython,orionrobots/micropython,noahchense/micropython,MrSurly/micropython-esp32,alex-march/micropython,Peetz0r/micropython-esp32,chrisdearman/micropython,SHA2017-badge/micropython-esp32,tuc-osg/micropython,alex-march/micropython,mpalomer/micropython,ryannathans/micropython,omtinez/micropython,chrisdearman/micropython,HenrikSolver/micropython,cwyark/micropython,ceramos/micropython,cloudformdesign/micropython,neilh10/micropython,rubencabrera/micropython,orionrobots/micropython,matthewelse/micropython,slzatz/micropython,swegener/micropython,matthewelse/micropython,mgyenik/micropython,SHA2017-badge/micropython-esp32,omtinez/micropython,suda/micropython,ceramos/micropython,HenrikSolver/micropython,kostyll/micropython,emfcamp/micropython,puuu/micropython,martinribelotta/micropython,ceramos/micropython,lowRISC/micropython,praemdonck/micropython,ChuckM/micropython,hiway/micropython,deshipu/micropython,SHA2017-badge/micropython-esp32,SungEun-Steve-Kim/test-mp,Timmenem/micropython,mpalomer/micropython,jimkmc/micropython,tdautc19841202/micropython,firstval/micropython,xuxiaoxin/micropython,mgyenik/micropython,mgyenik/micropython,stonegithubs/micropython,hosaka/micropython,ahotam/micropython,rubencabrera/micropython,xyb/micropython,pozetroninc/micropython,methoxid/micropystat,adafruit/micropython,SungEun-Steve-Kim/test-mp,infinnovation/micropython,neilh10/micropython,Vogtinator/micropython,noahchense/micropython,selste/micropython,emfcamp/micropython,tralamazza/micropython,MrSurly/micropython,alex-robbins/micropython,mhoffma/micropython,skybird6672/micropython,trezor/micropython,dhylands/micropython,cwyark/micropython,blazewicz/micropython,vitiral/micropython,dmazzella/micropython,tuc-osg/micropython,kerneltask/micropython,TDAbboud/micropython,trezor/micropython,noahwilliamsson/micropython,pramasoul/micropython,matthewelse/micropython,ahotam/micropython,puuu/micropython,paul-xxx/micropython,MrSurly/micropython-esp32,SungEun-Steve-Kim/test-mp,HenrikSolver/micropython,warner83/micropython,MrSurly/micropython-esp32,praemdonck/micropython,torwag/micropython,lbattraw/micropython,EcmaXp/micropython,xuxiaoxin/micropython,cwyark/micropython,tobbad/micropython,dmazzella/micropython,feilongfl/micropython,blmorris/micropython,mgyenik/micropython,pfalcon/micropython,ruffy91/micropython,trezor/micropython,ericsnowcurrently/micropython,ruffy91/micropython,infinnovation/micropython,utopiaprince/micropython,Peetz0r/micropython-esp32,alex-robbins/micropython,PappaPeppar/micropython,omtinez/micropython,henriknelson/micropython,blazewicz/micropython,dhylands/micropython,mhoffma/micropython,adafruit/circuitpython,feilongfl/micropython,vitiral/micropython,feilongfl/micropython,turbinenreiter/micropython,oopy/micropython,EcmaXp/micropython,toolmacher/micropython,cnoviello/micropython,oopy/micropython,PappaPeppar/micropython,martinribelotta/micropython,suda/micropython,lbattraw/micropython,ChuckM/micropython,neilh10/micropython,SHA2017-badge/micropython-esp32,dmazzella/micropython,orionrobots/micropython,drrk/micropython,jmarcelino/pycom-micropython,utopiaprince/micropython,misterdanb/micropython,ericsnowcurrently/micropython,chrisdearman/micropython,deshipu/micropython,micropython/micropython-esp32,blmorris/micropython,matthewelse/micropython,TDAbboud/micropython,oopy/micropython,noahchense/micropython,xyb/micropython,warner83/micropython,paul-xxx/micropython,mianos/micropython,KISSMonX/micropython,selste/micropython,torwag/micropython,adamkh/micropython,matthewelse/micropython,ernesto-g/micropython,noahwilliamsson/micropython,Peetz0r/micropython-esp32,redbear/micropython,xyb/micropython,cnoviello/micropython,xhat/micropython,methoxid/micropystat,neilh10/micropython,aethaniel/micropython,infinnovation/micropython,bvernoux/micropython,dinau/micropython,kerneltask/micropython,MrSurly/micropython-esp32,firstval/micropython,bvernoux/micropython,swegener/micropython,dxxb/micropython,feilongfl/micropython,feilongfl/micropython,misterdanb/micropython,xhat/micropython,alex-march/micropython,pramasoul/micropython,Peetz0r/micropython-esp32,adamkh/micropython,lowRISC/micropython,supergis/micropython,praemdonck/micropython,mpalomer/micropython,aethaniel/micropython,ChuckM/micropython,dinau/micropython,cwyark/micropython,methoxid/micropystat,slzatz/micropython,galenhz/micropython,hiway/micropython,PappaPeppar/micropython,danicampora/micropython,puuu/micropython,TDAbboud/micropython,torwag/micropython,emfcamp/micropython,lowRISC/micropython,turbinenreiter/micropython,aethaniel/micropython,EcmaXp/micropython,jlillest/micropython,blazewicz/micropython,kerneltask/micropython,galenhz/micropython,toolmacher/micropython,ganshun666/micropython,jlillest/micropython,tobbad/micropython,hosaka/micropython,supergis/micropython,swegener/micropython,adafruit/circuitpython,ernesto-g/micropython,vitiral/micropython,cnoviello/micropython,puuu/micropython,puuu/micropython,ernesto-g/micropython,micropython/micropython-esp32,chrisdearman/micropython,bvernoux/micropython,alex-robbins/micropython,selste/micropython,redbear/micropython,redbear/micropython,orionrobots/micropython,xyb/micropython,aethaniel/micropython,kostyll/micropython,supergis/micropython,selste/micropython,SungEun-Steve-Kim/test-mp,dmazzella/micropython,ChuckM/micropython,pozetroninc/micropython,dhylands/micropython,lbattraw/micropython,ruffy91/micropython,xhat/micropython,MrSurly/micropython-esp32,swegener/micropython,ganshun666/micropython,misterdanb/micropython,danicampora/micropython,tuc-osg/micropython,utopiaprince/micropython,dxxb/micropython,vriera/micropython,micropython/micropython-esp32,turbinenreiter/micropython,paul-xxx/micropython,KISSMonX/micropython,chrisdearman/micropython,henriknelson/micropython,heisewangluo/micropython,mianos/micropython,KISSMonX/micropython,vitiral/micropython,stonegithubs/micropython,ryannathans/micropython,suda/micropython,firstval/micropython,emfcamp/micropython,torwag/micropython,KISSMonX/micropython,redbear/micropython,martinribelotta/micropython,pozetroninc/micropython,firstval/micropython,adafruit/circuitpython,slzatz/micropython,infinnovation/micropython,Timmenem/micropython,micropython/micropython-esp32,ericsnowcurrently/micropython,adamkh/micropython,cloudformdesign/micropython,adafruit/micropython,hosaka/micropython,suda/micropython,stonegithubs/micropython,supergis/micropython,ryannathans/micropython,micropython/micropython-esp32,skybird6672/micropython,ericsnowcurrently/micropython,heisewangluo/micropython,supergis/micropython,adafruit/circuitpython,Timmenem/micropython,mgyenik/micropython,ahotam/micropython,alex-robbins/micropython,ruffy91/micropython,matthewelse/micropython,alex-march/micropython,firstval/micropython,SHA2017-badge/micropython-esp32,vriera/micropython,pozetroninc/micropython,misterdanb/micropython,cnoviello/micropython,heisewangluo/micropython,henriknelson/micropython,galenhz/micropython,kerneltask/micropython,ahotam/micropython,alex-march/micropython,cloudformdesign/micropython,AriZuu/micropython,mhoffma/micropython,suda/micropython,xuxiaoxin/micropython,noahwilliamsson/micropython,skybird6672/micropython,blazewicz/micropython,deshipu/micropython,tralamazza/micropython,torwag/micropython,ganshun666/micropython,redbear/micropython,stonegithubs/micropython,swegener/micropython,tobbad/micropython,slzatz/micropython,galenhz/micropython,tdautc19841202/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,slzatz/micropython,ernesto-g/micropython,cnoviello/micropython,blmorris/micropython,henriknelson/micropython,oopy/micropython,jimkmc/micropython,mhoffma/micropython,PappaPeppar/micropython,drrk/micropython,mianos/micropython,warner83/micropython,turbinenreiter/micropython,PappaPeppar/micropython,drrk/micropython,AriZuu/micropython,misterdanb/micropython,xyb/micropython,pfalcon/micropython,mpalomer/micropython,utopiaprince/micropython,ericsnowcurrently/micropython,AriZuu/micropython,jmarcelino/pycom-micropython,jmarcelino/pycom-micropython,danicampora/micropython,trezor/micropython,pfalcon/micropython,adamkh/micropython,danicampora/micropython,ahotam/micropython,blmorris/micropython,Vogtinator/micropython,paul-xxx/micropython,lbattraw/micropython,dhylands/micropython,noahchense/micropython,xuxiaoxin/micropython,ernesto-g/micropython,pramasoul/micropython,toolmacher/micropython,ChuckM/micropython,jlillest/micropython,ryannathans/micropython,dhylands/micropython,jmarcelino/pycom-micropython,vriera/micropython,MrSurly/micropython,adafruit/micropython,SungEun-Steve-Kim/test-mp,cloudformdesign/micropython,mianos/micropython,tdautc19841202/micropython,mpalomer/micropython,hosaka/micropython,trezor/micropython,jlillest/micropython,vitiral/micropython,kostyll/micropython,jmarcelino/pycom-micropython,stonegithubs/micropython,warner83/micropython,pozetroninc/micropython,pfalcon/micropython,EcmaXp/micropython,tralamazza/micropython,Vogtinator/micropython,vriera/micropython,galenhz/micropython,hiway/micropython,emfcamp/micropython,xhat/micropython,neilh10/micropython,lowRISC/micropython,tdautc19841202/micropython,praemdonck/micropython,warner83/micropython,orionrobots/micropython,martinribelotta/micropython,adafruit/circuitpython,xhat/micropython,hosaka/micropython,heisewangluo/micropython,KISSMonX/micropython,tobbad/micropython,noahwilliamsson/micropython,jlillest/micropython,rubencabrera/micropython,danicampora/micropython,ganshun666/micropython,bvernoux/micropython,jimkmc/micropython,lowRISC/micropython,pramasoul/micropython,MrSurly/micropython,tdautc19841202/micropython,dinau/micropython,methoxid/micropystat,HenrikSolver/micropython,bvernoux/micropython,kerneltask/micropython,Vogtinator/micropython,cwyark/micropython,drrk/micropython,omtinez/micropython,AriZuu/micropython,deshipu/micropython,dxxb/micropython,turbinenreiter/micropython,ryannathans/micropython,ganshun666/micropython,skybird6672/micropython,Timmenem/micropython,vriera/micropython,Timmenem/micropython,noahwilliamsson/micropython,jimkmc/micropython,kostyll/micropython,martinribelotta/micropython,blazewicz/micropython,skybird6672/micropython,lbattraw/micropython,aethaniel/micropython
|
tests: Add feature test for when heap allocation is disabled.
|
# check that we can do certain things without allocating heap memory
import gc
def f(a):
print(a)
def g(a, b=2):
print(a, b)
global_var = 1
def h():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f(i) # function call
f(i * 2 + 1) # binary operation with small ints
f(a=i) # keyword arguments
g(i) # default arg (second one)
g(i, i) # 2 args
# call h with heap allocation disabled
gc.disable()
h()
gc.enable()
|
<commit_before><commit_msg>tests: Add feature test for when heap allocation is disabled.<commit_after>
|
# check that we can do certain things without allocating heap memory
import gc
def f(a):
print(a)
def g(a, b=2):
print(a, b)
global_var = 1
def h():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f(i) # function call
f(i * 2 + 1) # binary operation with small ints
f(a=i) # keyword arguments
g(i) # default arg (second one)
g(i, i) # 2 args
# call h with heap allocation disabled
gc.disable()
h()
gc.enable()
|
tests: Add feature test for when heap allocation is disabled.# check that we can do certain things without allocating heap memory
import gc
def f(a):
print(a)
def g(a, b=2):
print(a, b)
global_var = 1
def h():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f(i) # function call
f(i * 2 + 1) # binary operation with small ints
f(a=i) # keyword arguments
g(i) # default arg (second one)
g(i, i) # 2 args
# call h with heap allocation disabled
gc.disable()
h()
gc.enable()
|
<commit_before><commit_msg>tests: Add feature test for when heap allocation is disabled.<commit_after># check that we can do certain things without allocating heap memory
import gc
def f(a):
print(a)
def g(a, b=2):
print(a, b)
global_var = 1
def h():
global global_var
global_var = 2 # set an existing global variable
for i in range(2): # for loop
f(i) # function call
f(i * 2 + 1) # binary operation with small ints
f(a=i) # keyword arguments
g(i) # default arg (second one)
g(i, i) # 2 args
# call h with heap allocation disabled
gc.disable()
h()
gc.enable()
|
|
9b7ca0f242d1c28c9ca53be4be0a83a6d1121ae6
|
conference/management/commands/make_speaker_profiles_public.py
|
conference/management/commands/make_speaker_profiles_public.py
|
# -*- coding: UTF-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from assopy import models as amodels
from conference import models as cmodels
def make_speaker_profiles_public_for_conference(conference):
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
for speaker in speakers:
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
if profile.visibility != 'p':
print ('Setting profile %r to public' % profile)
profile.visibility = 'p'
profile.save()
class Command(BaseCommand):
""" When accepting talks via database updates, the speaker profiles are
not automatically set to public. This command fixes this.
Argument: <conference year>
"""
args = '<conference>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
make_speaker_profiles_public_for_conference(conference)
|
Add script to make speaker profiles public.
|
Add script to make speaker profiles public.
|
Python
|
bsd-2-clause
|
EuroPython/epcon,EuroPython/epcon,EuroPython/epcon,EuroPython/epcon
|
Add script to make speaker profiles public.
|
# -*- coding: UTF-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from assopy import models as amodels
from conference import models as cmodels
def make_speaker_profiles_public_for_conference(conference):
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
for speaker in speakers:
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
if profile.visibility != 'p':
print ('Setting profile %r to public' % profile)
profile.visibility = 'p'
profile.save()
class Command(BaseCommand):
""" When accepting talks via database updates, the speaker profiles are
not automatically set to public. This command fixes this.
Argument: <conference year>
"""
args = '<conference>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
make_speaker_profiles_public_for_conference(conference)
|
<commit_before><commit_msg>Add script to make speaker profiles public.<commit_after>
|
# -*- coding: UTF-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from assopy import models as amodels
from conference import models as cmodels
def make_speaker_profiles_public_for_conference(conference):
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
for speaker in speakers:
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
if profile.visibility != 'p':
print ('Setting profile %r to public' % profile)
profile.visibility = 'p'
profile.save()
class Command(BaseCommand):
""" When accepting talks via database updates, the speaker profiles are
not automatically set to public. This command fixes this.
Argument: <conference year>
"""
args = '<conference>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
make_speaker_profiles_public_for_conference(conference)
|
Add script to make speaker profiles public.# -*- coding: UTF-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from assopy import models as amodels
from conference import models as cmodels
def make_speaker_profiles_public_for_conference(conference):
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
for speaker in speakers:
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
if profile.visibility != 'p':
print ('Setting profile %r to public' % profile)
profile.visibility = 'p'
profile.save()
class Command(BaseCommand):
""" When accepting talks via database updates, the speaker profiles are
not automatically set to public. This command fixes this.
Argument: <conference year>
"""
args = '<conference>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
make_speaker_profiles_public_for_conference(conference)
|
<commit_before><commit_msg>Add script to make speaker profiles public.<commit_after># -*- coding: UTF-8 -*-
from __future__ import print_function
from django.core.management.base import BaseCommand
from assopy import models as amodels
from conference import models as cmodels
def make_speaker_profiles_public_for_conference(conference):
# Get speaker records
speakers = set()
talks = cmodels.Talk.objects.accepted(conference)
for t in talks:
speakers |= set(t.get_all_speakers())
for speaker in speakers:
user = speaker.user
profile = cmodels.AttendeeProfile.objects.get(user=user)
if profile.visibility != 'p':
print ('Setting profile %r to public' % profile)
profile.visibility = 'p'
profile.save()
class Command(BaseCommand):
""" When accepting talks via database updates, the speaker profiles are
not automatically set to public. This command fixes this.
Argument: <conference year>
"""
args = '<conference>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
make_speaker_profiles_public_for_conference(conference)
|
|
0b3e8506f4b1d63cb54414b9462a85828855b926
|
ui/gfx/compositor/PRESUBMIT.py
|
ui/gfx/compositor/PRESUBMIT.py
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (2).
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
ltilve/chromium,keishi/chromium,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,ltilve/chromium,dednal/chromium.src,dushu1203/chromium.src,Just-D/chromium-1,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,dednal/chromium.src,pozdnyakov/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,Just-D/chromium-1,rogerwang/chromium,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,M4sse/chromium.src,patrickm/chromium.src,hujiajie/pa-chromium,ondra-novak/chromium.src,littlstar/chromium.src,mogoweb/chromium-crosswalk,zcbenz/cefode-chromium,jaruba/chromium.src,jaruba/chromium.src,Chilledheart/chromium,zcbenz/cefode-chromium,markYoungH/chromium.src,ltilve/chromium,Just-D/chromium-1,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,robclark/chromium,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,robclark/chromium,nacl-webkit/chrome_deps,robclark/chromium,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,rogerwang/chromium,PeterWangIntel/chromium-crosswalk,zcbenz/cefode-chromium,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,zcbenz/cefode-chromium,junmin-zhu/chromium-rivertrail,timopulkkinen/BubbleFish,markYoungH/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,M4sse/chromium.src,rogerwang/chromium,littlstar/chromium.src,Fireblend/chromium-crosswalk,nacl-webkit/chrome_deps,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,keishi/chromium,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,ltilve/chromium,rogerwang/chromium,ondra-novak/chromium.src,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,timopulkkinen/BubbleFish,dushu1203/chromium.src,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,chuan9/chromium-crosswalk,patrickm/chromium.src,junmin-zhu/chromium-rivertrail,robclark/chromium,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,rogerwang/chromium,PeterWangIntel/chromium-crosswalk,pozdnyakov/chromium-crosswalk,rogerwang/chromium,zcbenz/cefode-chromium,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,littlstar/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,zcbenz/cefode-chromium,keishi/chromium,patrickm/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,Jonekee/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,dednal/chromium.src,ChromiumWebApps/chromium,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,nacl-webkit/chrome_deps,dushu1203/chromium.src,timopulkkinen/BubbleFish,dushu1203/chromium.src,M4sse/chromium.src,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,hujiajie/pa-chromium,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,nacl-webkit/chrome_deps,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,dednal/chromium.src,keishi/chromium,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,anirudhSK/chromium,rogerwang/chromium,timopulkkinen/BubbleFish,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,markYoungH/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,rogerwang/chromium,keishi/chromium,anirudhSK/chromium,Jonekee/chromium.src,nacl-webkit/chrome_deps,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,pozdnyakov/chromium-crosswalk,robclark/chromium,Jonekee/chromium.src,robclark/chromium,keishi/chromium,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,ondra-novak/chromium.src,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,dednal/chromium.src,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,hujiajie/pa-chromium,junmin-zhu/chromium-rivertrail,junmin-zhu/chromium-rivertrail,dushu1203/chromium.src,dednal/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,mogoweb/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,Chilledheart/chromium,M4sse/chromium.src,Chilledheart/chromium,hujiajie/pa-chromium,anirudhSK/chromium,nacl-webkit/chrome_deps,markYoungH/chromium.src,timopulkkinen/BubbleFish,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,keishi/chromium,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,bright-sparks/chromium-spacewalk,rogerwang/chromium,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,ltilve/chromium,anirudhSK/chromium,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,robclark/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,hujiajie/pa-chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,Jonekee/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,hujiajie/pa-chromium,ltilve/chromium,zcbenz/cefode-chromium,robclark/chromium,Pluto-tv/chromium-crosswalk,keishi/chromium,dednal/chromium.src,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,patrickm/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,ChromiumWebApps/chromium,Just-D/chromium-1,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,rogerwang/chromium,robclark/chromium,pozdnyakov/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,patrickm/chromium.src,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,hujiajie/pa-chromium,zcbenz/cefode-chromium,dushu1203/chromium.src,hgl888/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,keishi/chromium,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,keishi/chromium,Chilledheart/chromium,keishi/chromium,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,robclark/chromium,dushu1203/chromium.src,bright-sparks/chromium-spacewalk
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
|
016c3b50ef2fbb8d508a7f5aecd20135ae3c37ee
|
froide/foirequest/migrations/0017_auto_20180405_1626.py
|
froide/foirequest/migrations/0017_auto_20180405_1626.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-05 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import froide.foirequest.models.attachment
import froide.helper.storage
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0016_foiproject_publicbodies'),
]
operations = [
migrations.AlterField(
model_name='foiattachment',
name='file',
field=models.FileField(max_length=255, storage=froide.helper.storage.HashedFilenameStorage(), upload_to=froide.foirequest.models.attachment.upload_to, verbose_name='File'),
),
]
|
Use hashed filename storage in attachment model
|
Use hashed filename storage in attachment model
|
Python
|
mit
|
fin/froide,stefanw/froide,fin/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide,stefanw/froide,fin/froide
|
Use hashed filename storage in attachment model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-05 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import froide.foirequest.models.attachment
import froide.helper.storage
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0016_foiproject_publicbodies'),
]
operations = [
migrations.AlterField(
model_name='foiattachment',
name='file',
field=models.FileField(max_length=255, storage=froide.helper.storage.HashedFilenameStorage(), upload_to=froide.foirequest.models.attachment.upload_to, verbose_name='File'),
),
]
|
<commit_before><commit_msg>Use hashed filename storage in attachment model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-05 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import froide.foirequest.models.attachment
import froide.helper.storage
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0016_foiproject_publicbodies'),
]
operations = [
migrations.AlterField(
model_name='foiattachment',
name='file',
field=models.FileField(max_length=255, storage=froide.helper.storage.HashedFilenameStorage(), upload_to=froide.foirequest.models.attachment.upload_to, verbose_name='File'),
),
]
|
Use hashed filename storage in attachment model# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-05 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import froide.foirequest.models.attachment
import froide.helper.storage
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0016_foiproject_publicbodies'),
]
operations = [
migrations.AlterField(
model_name='foiattachment',
name='file',
field=models.FileField(max_length=255, storage=froide.helper.storage.HashedFilenameStorage(), upload_to=froide.foirequest.models.attachment.upload_to, verbose_name='File'),
),
]
|
<commit_before><commit_msg>Use hashed filename storage in attachment model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-05 14:26
from __future__ import unicode_literals
from django.db import migrations, models
import froide.foirequest.models.attachment
import froide.helper.storage
class Migration(migrations.Migration):
dependencies = [
('foirequest', '0016_foiproject_publicbodies'),
]
operations = [
migrations.AlterField(
model_name='foiattachment',
name='file',
field=models.FileField(max_length=255, storage=froide.helper.storage.HashedFilenameStorage(), upload_to=froide.foirequest.models.attachment.upload_to, verbose_name='File'),
),
]
|
|
bee38446253b865d7f9ebcc0e507666b6bf7314c
|
examples/remote_image.py
|
examples/remote_image.py
|
import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
api_key = environ['KEY']
URL = 'https://raw.githubusercontent.com/ad-m/python-anticaptcha/master/examples/captcha_ms.jpeg'
EXPECTED_RESULT = '56nn2'
def process(url):
session = requests.Session()
client = AnticaptchaClient(api_key)
task = ImageToTextTask(session.get(url, stream=True).raw)
job = client.createTask(task)
job.join()
return job.get_captcha_text()
if __name__ == '__main__':
print("URL: " + URL)
print("Result: " + str(process(URL)))
print("Expected: " + str(EXPECTED_RESULT))
|
Add example for remote image in ImageToTextTask
|
Add example for remote image in ImageToTextTask
|
Python
|
mit
|
ad-m/python-anticaptcha
|
Add example for remote image in ImageToTextTask
|
import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
api_key = environ['KEY']
URL = 'https://raw.githubusercontent.com/ad-m/python-anticaptcha/master/examples/captcha_ms.jpeg'
EXPECTED_RESULT = '56nn2'
def process(url):
session = requests.Session()
client = AnticaptchaClient(api_key)
task = ImageToTextTask(session.get(url, stream=True).raw)
job = client.createTask(task)
job.join()
return job.get_captcha_text()
if __name__ == '__main__':
print("URL: " + URL)
print("Result: " + str(process(URL)))
print("Expected: " + str(EXPECTED_RESULT))
|
<commit_before><commit_msg>Add example for remote image in ImageToTextTask<commit_after>
|
import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
api_key = environ['KEY']
URL = 'https://raw.githubusercontent.com/ad-m/python-anticaptcha/master/examples/captcha_ms.jpeg'
EXPECTED_RESULT = '56nn2'
def process(url):
session = requests.Session()
client = AnticaptchaClient(api_key)
task = ImageToTextTask(session.get(url, stream=True).raw)
job = client.createTask(task)
job.join()
return job.get_captcha_text()
if __name__ == '__main__':
print("URL: " + URL)
print("Result: " + str(process(URL)))
print("Expected: " + str(EXPECTED_RESULT))
|
Add example for remote image in ImageToTextTaskimport requests
from os import environ
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
api_key = environ['KEY']
URL = 'https://raw.githubusercontent.com/ad-m/python-anticaptcha/master/examples/captcha_ms.jpeg'
EXPECTED_RESULT = '56nn2'
def process(url):
session = requests.Session()
client = AnticaptchaClient(api_key)
task = ImageToTextTask(session.get(url, stream=True).raw)
job = client.createTask(task)
job.join()
return job.get_captcha_text()
if __name__ == '__main__':
print("URL: " + URL)
print("Result: " + str(process(URL)))
print("Expected: " + str(EXPECTED_RESULT))
|
<commit_before><commit_msg>Add example for remote image in ImageToTextTask<commit_after>import requests
from os import environ
from python_anticaptcha import AnticaptchaClient, ImageToTextTask
api_key = environ['KEY']
URL = 'https://raw.githubusercontent.com/ad-m/python-anticaptcha/master/examples/captcha_ms.jpeg'
EXPECTED_RESULT = '56nn2'
def process(url):
session = requests.Session()
client = AnticaptchaClient(api_key)
task = ImageToTextTask(session.get(url, stream=True).raw)
job = client.createTask(task)
job.join()
return job.get_captcha_text()
if __name__ == '__main__':
print("URL: " + URL)
print("Result: " + str(process(URL)))
print("Expected: " + str(EXPECTED_RESULT))
|
|
59dbe66c2e69397b98e1df933f4b33d9d4a3fffc
|
examples/spacy_basics.py
|
examples/spacy_basics.py
|
#!/usr/bin/env python
"""Check basic SpaCy functionality."""
# set path to project -> change if needed
project_path = '/Users/EB/Google Drive/Projects/breweries'
# modules
import pickle
import spacy
import os
# change directory and load Beer class
os.chdir(project_path + '/modules/')
from beeradvocate.classes import Beer
# create "custom" spacy pipeline (would also be standard)
def spacy_pipe(nlp):
return(nlp.tagger, nlp.parser, nlp.entity)
# Load custom pipeline for English
nlp = spacy.load('en', create_pipeline = spacy_pipe)
# load pickled beer reviews
os.chdir(project_path + '/data/')
beers = pickle.load(open('reviews_sample.p', 'rb'))
# start with one review to check functionality
review = beers[0].reviews[0][0]
review = nlp(review)
# Lemmatize the review and keep only (proper) nouns and adjectives
# This might be "enough" pre-processing for e.g. cluster analysis
lemmas = []
for word in review:
if word.pos_ in ('NOUN', 'PROPN', 'ADJ'):
lemmas.append(word.lemma_)
print(lemmas)
# Parser
# Extract noun chunks in the text (with length > 1)
# Note: if dependency parsing is not needed, use:
# spacy.load('en', parser = False) to increase speed
for np in review.noun_chunks:
if len(np) > 1:
print(np.lemma_)
# some of these dependencies (e.g. "creamy head", "earthy spice")
# are more interesting than others (e.g. "this one")
# we can use a rule based system to extract them
for np in review.noun_chunks:
toks = [token.pos_ for token in np]
tok_count = toks.count('PROPN') + toks.count('NOUN') + toks.count('ADJ')
if tok_count == len(toks) & len(toks) > 1:
print(np.lemma_)
|
Check out spaCy basic functionality
|
Check out spaCy basic functionality
|
Python
|
mit
|
ebommes/breweries
|
Check out spaCy basic functionality
|
#!/usr/bin/env python
"""Check basic SpaCy functionality."""
# set path to project -> change if needed
project_path = '/Users/EB/Google Drive/Projects/breweries'
# modules
import pickle
import spacy
import os
# change directory and load Beer class
os.chdir(project_path + '/modules/')
from beeradvocate.classes import Beer
# create "custom" spacy pipeline (would also be standard)
def spacy_pipe(nlp):
return(nlp.tagger, nlp.parser, nlp.entity)
# Load custom pipeline for English
nlp = spacy.load('en', create_pipeline = spacy_pipe)
# load pickled beer reviews
os.chdir(project_path + '/data/')
beers = pickle.load(open('reviews_sample.p', 'rb'))
# start with one review to check functionality
review = beers[0].reviews[0][0]
review = nlp(review)
# Lemmatize the review and keep only (proper) nouns and adjectives
# This might be "enough" pre-processing for e.g. cluster analysis
lemmas = []
for word in review:
if word.pos_ in ('NOUN', 'PROPN', 'ADJ'):
lemmas.append(word.lemma_)
print(lemmas)
# Parser
# Extract noun chunks in the text (with length > 1)
# Note: if dependency parsing is not needed, use:
# spacy.load('en', parser = False) to increase speed
for np in review.noun_chunks:
if len(np) > 1:
print(np.lemma_)
# some of these dependencies (e.g. "creamy head", "earthy spice")
# are more interesting than others (e.g. "this one")
# we can use a rule based system to extract them
for np in review.noun_chunks:
toks = [token.pos_ for token in np]
tok_count = toks.count('PROPN') + toks.count('NOUN') + toks.count('ADJ')
if tok_count == len(toks) & len(toks) > 1:
print(np.lemma_)
|
<commit_before><commit_msg>Check out spaCy basic functionality<commit_after>
|
#!/usr/bin/env python
"""Check basic SpaCy functionality."""
# set path to project -> change if needed
project_path = '/Users/EB/Google Drive/Projects/breweries'
# modules
import pickle
import spacy
import os
# change directory and load Beer class
os.chdir(project_path + '/modules/')
from beeradvocate.classes import Beer
# create "custom" spacy pipeline (would also be standard)
def spacy_pipe(nlp):
return(nlp.tagger, nlp.parser, nlp.entity)
# Load custom pipeline for English
nlp = spacy.load('en', create_pipeline = spacy_pipe)
# load pickled beer reviews
os.chdir(project_path + '/data/')
beers = pickle.load(open('reviews_sample.p', 'rb'))
# start with one review to check functionality
review = beers[0].reviews[0][0]
review = nlp(review)
# Lemmatize the review and keep only (proper) nouns and adjectives
# This might be "enough" pre-processing for e.g. cluster analysis
lemmas = []
for word in review:
if word.pos_ in ('NOUN', 'PROPN', 'ADJ'):
lemmas.append(word.lemma_)
print(lemmas)
# Parser
# Extract noun chunks in the text (with length > 1)
# Note: if dependency parsing is not needed, use:
# spacy.load('en', parser = False) to increase speed
for np in review.noun_chunks:
if len(np) > 1:
print(np.lemma_)
# some of these dependencies (e.g. "creamy head", "earthy spice")
# are more interesting than others (e.g. "this one")
# we can use a rule based system to extract them
for np in review.noun_chunks:
toks = [token.pos_ for token in np]
tok_count = toks.count('PROPN') + toks.count('NOUN') + toks.count('ADJ')
if tok_count == len(toks) & len(toks) > 1:
print(np.lemma_)
|
Check out spaCy basic functionality#!/usr/bin/env python
"""Check basic SpaCy functionality."""
# set path to project -> change if needed
project_path = '/Users/EB/Google Drive/Projects/breweries'
# modules
import pickle
import spacy
import os
# change directory and load Beer class
os.chdir(project_path + '/modules/')
from beeradvocate.classes import Beer
# create "custom" spacy pipeline (would also be standard)
def spacy_pipe(nlp):
return(nlp.tagger, nlp.parser, nlp.entity)
# Load custom pipeline for English
nlp = spacy.load('en', create_pipeline = spacy_pipe)
# load pickled beer reviews
os.chdir(project_path + '/data/')
beers = pickle.load(open('reviews_sample.p', 'rb'))
# start with one review to check functionality
review = beers[0].reviews[0][0]
review = nlp(review)
# Lemmatize the review and keep only (proper) nouns and adjectives
# This might be "enough" pre-processing for e.g. cluster analysis
lemmas = []
for word in review:
if word.pos_ in ('NOUN', 'PROPN', 'ADJ'):
lemmas.append(word.lemma_)
print(lemmas)
# Parser
# Extract noun chunks in the text (with length > 1)
# Note: if dependency parsing is not needed, use:
# spacy.load('en', parser = False) to increase speed
for np in review.noun_chunks:
if len(np) > 1:
print(np.lemma_)
# some of these dependencies (e.g. "creamy head", "earthy spice")
# are more interesting than others (e.g. "this one")
# we can use a rule based system to extract them
for np in review.noun_chunks:
toks = [token.pos_ for token in np]
tok_count = toks.count('PROPN') + toks.count('NOUN') + toks.count('ADJ')
if tok_count == len(toks) & len(toks) > 1:
print(np.lemma_)
|
<commit_before><commit_msg>Check out spaCy basic functionality<commit_after>#!/usr/bin/env python
"""Check basic SpaCy functionality."""
# set path to project -> change if needed
project_path = '/Users/EB/Google Drive/Projects/breweries'
# modules
import pickle
import spacy
import os
# change directory and load Beer class
os.chdir(project_path + '/modules/')
from beeradvocate.classes import Beer
# create "custom" spacy pipeline (would also be standard)
def spacy_pipe(nlp):
return(nlp.tagger, nlp.parser, nlp.entity)
# Load custom pipeline for English
nlp = spacy.load('en', create_pipeline = spacy_pipe)
# load pickled beer reviews
os.chdir(project_path + '/data/')
beers = pickle.load(open('reviews_sample.p', 'rb'))
# start with one review to check functionality
review = beers[0].reviews[0][0]
review = nlp(review)
# Lemmatize the review and keep only (proper) nouns and adjectives
# This might be "enough" pre-processing for e.g. cluster analysis
lemmas = []
for word in review:
if word.pos_ in ('NOUN', 'PROPN', 'ADJ'):
lemmas.append(word.lemma_)
print(lemmas)
# Parser
# Extract noun chunks in the text (with length > 1)
# Note: if dependency parsing is not needed, use:
# spacy.load('en', parser = False) to increase speed
for np in review.noun_chunks:
if len(np) > 1:
print(np.lemma_)
# some of these dependencies (e.g. "creamy head", "earthy spice")
# are more interesting than others (e.g. "this one")
# we can use a rule based system to extract them
for np in review.noun_chunks:
toks = [token.pos_ for token in np]
tok_count = toks.count('PROPN') + toks.count('NOUN') + toks.count('ADJ')
if tok_count == len(toks) & len(toks) > 1:
print(np.lemma_)
|
|
a97a1a15f80c9a7e33daa34392dc432b77e2461d
|
src/oscar/apps/catalogue/migrations/0005_auto_20150604_1450.py
|
src/oscar/apps/catalogue/migrations/0005_auto_20150604_1450.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150217_1710'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_class',
field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),
),
]
|
Add migration that allows product_class FK to be blank
|
Add migration that allows product_class FK to be blank
|
Python
|
bsd-3-clause
|
Jannes123/django-oscar,pdonadeo/django-oscar,itbabu/django-oscar,pasqualguerrero/django-oscar,kapari/django-oscar,anentropic/django-oscar,lijoantony/django-oscar,binarydud/django-oscar,MatthewWilkes/django-oscar,jmt4/django-oscar,WillisXChen/django-oscar,sonofatailor/django-oscar,michaelkuty/django-oscar,michaelkuty/django-oscar,rocopartners/django-oscar,django-oscar/django-oscar,lijoantony/django-oscar,dongguangming/django-oscar,anentropic/django-oscar,solarissmoke/django-oscar,nfletton/django-oscar,eddiep1101/django-oscar,michaelkuty/django-oscar,pdonadeo/django-oscar,bnprk/django-oscar,QLGu/django-oscar,kapari/django-oscar,jmt4/django-oscar,solarissmoke/django-oscar,nickpack/django-oscar,Jannes123/django-oscar,ka7eh/django-oscar,solarissmoke/django-oscar,spartonia/django-oscar,MatthewWilkes/django-oscar,QLGu/django-oscar,binarydud/django-oscar,anentropic/django-oscar,WillisXChen/django-oscar,sonofatailor/django-oscar,amirrpp/django-oscar,Jannes123/django-oscar,bschuon/django-oscar,WadeYuChen/django-oscar,vovanbo/django-oscar,amirrpp/django-oscar,rocopartners/django-oscar,sasha0/django-oscar,ka7eh/django-oscar,jlmadurga/django-oscar,dongguangming/django-oscar,saadatqadri/django-oscar,mexeniz/django-oscar,nickpack/django-oscar,john-parton/django-oscar,django-oscar/django-oscar,okfish/django-oscar,QLGu/django-oscar,nfletton/django-oscar,WadeYuChen/django-oscar,faratro/django-oscar,pasqualguerrero/django-oscar,pasqualguerrero/django-oscar,faratro/django-oscar,john-parton/django-oscar,jlmadurga/django-oscar,jlmadurga/django-oscar,sonofatailor/django-oscar,thechampanurag/django-oscar,rocopartners/django-oscar,john-parton/django-oscar,nfletton/django-oscar,mexeniz/django-oscar,mexeniz/django-oscar,kapari/django-oscar,WillisXChen/django-oscar,thechampanurag/django-oscar,jmt4/django-oscar,Bogh/django-oscar,bnprk/django-oscar,sasha0/django-oscar,thechampanurag/django-oscar,eddiep1101/django-oscar,spartonia/django-oscar,itbabu/django-oscar,john-parton/django-oscar,vovanbo/django-oscar,pasqualguerrero/django-oscar,ka7eh/django-oscar,amirrpp/django-oscar,bnprk/django-oscar,saadatqadri/django-oscar,michaelkuty/django-oscar,taedori81/django-oscar,WillisXChen/django-oscar,nfletton/django-oscar,thechampanurag/django-oscar,bschuon/django-oscar,amirrpp/django-oscar,WadeYuChen/django-oscar,mexeniz/django-oscar,nickpack/django-oscar,okfish/django-oscar,MatthewWilkes/django-oscar,lijoantony/django-oscar,sasha0/django-oscar,WadeYuChen/django-oscar,taedori81/django-oscar,bschuon/django-oscar,faratro/django-oscar,lijoantony/django-oscar,okfish/django-oscar,solarissmoke/django-oscar,eddiep1101/django-oscar,nickpack/django-oscar,bschuon/django-oscar,dongguangming/django-oscar,WillisXChen/django-oscar,jmt4/django-oscar,taedori81/django-oscar,eddiep1101/django-oscar,ka7eh/django-oscar,anentropic/django-oscar,jlmadurga/django-oscar,pdonadeo/django-oscar,kapari/django-oscar,dongguangming/django-oscar,vovanbo/django-oscar,Bogh/django-oscar,django-oscar/django-oscar,Bogh/django-oscar,saadatqadri/django-oscar,WillisXChen/django-oscar,itbabu/django-oscar,vovanbo/django-oscar,pdonadeo/django-oscar,saadatqadri/django-oscar,Bogh/django-oscar,binarydud/django-oscar,taedori81/django-oscar,binarydud/django-oscar,sasha0/django-oscar,django-oscar/django-oscar,Jannes123/django-oscar,okfish/django-oscar,itbabu/django-oscar,MatthewWilkes/django-oscar,spartonia/django-oscar,bnprk/django-oscar,sonofatailor/django-oscar,spartonia/django-oscar,rocopartners/django-oscar,faratro/django-oscar,QLGu/django-oscar
|
Add migration that allows product_class FK to be blank
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150217_1710'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_class',
field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),
),
]
|
<commit_before><commit_msg>Add migration that allows product_class FK to be blank<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150217_1710'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_class',
field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),
),
]
|
Add migration that allows product_class FK to be blank# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150217_1710'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_class',
field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),
),
]
|
<commit_before><commit_msg>Add migration that allows product_class FK to be blank<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150217_1710'),
]
operations = [
migrations.AlterField(
model_name='product',
name='product_class',
field=models.ForeignKey(related_name='products', on_delete=django.db.models.deletion.PROTECT, blank=True, to='catalogue.ProductClass', help_text='Choose what type of product this is', null=True, verbose_name='Product type'),
),
]
|
|
f05cecd96aacccbdd567807516bcf79d6ab83ed0
|
web/ext/mongodb/mongoengine.py
|
web/ext/mongodb/mongoengine.py
|
# encoding: utf-8
import re
from mongoengine import connect
from marrow.package.loader import load
from web.core.compat import native, iteritems
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoEngineExtension:
__slots__ = ('uri', 'db', 'connection', 'cb')
provides = ['db']
def __init__(self, uri, **config):
self.uri = uri
log.info("Connecting MongoEngine to '%s'.", _safe_uri_replace.sub(r'\1://\2@', uri))
connection = self.connection = dict(tz_aware=True)
scheme, parts = uri.split('://', 1)
parts, self.db = parts.split('/', 1)
auth, host = parts.split('@', 1) if '@' in parts else (None, parts)
if scheme != 'mongo':
raise Exception('The URL must begin with \'mongo://\'!')
connection['host'], connection['port'] = host.split(':') if ':' in host else (host, '27017')
connection['port'] = int(connection['port'])
if auth: # pragma: no cover
connection['username'], _, connection['password'] = auth.partition(':')
# Accept additional keyword arguments to mongoengine.connect() from the INI.
for k, v in iteritems(config):
pfx, _, k = k.rpartition('.')
if pfx != prefix or k in ('alias', 'engine', 'model', 'ready'): continue
connection[k] = int(v) if v.isdigit() else v
self.cb = config.get('ready', None)
def start(self, context):
db, connection = self.db, self.connection
log.debug("Connecting to %s database with connection information: %r", db, connection)
context.mongoengine = connect(db, **connection)
cb = self.cb
if cb is not None:
cb = load(cb) if isinstance(cb, native) else cb
if hasattr(cb, '__call__'):
cb()
|
Add MongoEngine to the pile.
|
Add MongoEngine to the pile.
|
Python
|
mit
|
marrow/WebCore,marrow/WebCore
|
Add MongoEngine to the pile.
|
# encoding: utf-8
import re
from mongoengine import connect
from marrow.package.loader import load
from web.core.compat import native, iteritems
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoEngineExtension:
__slots__ = ('uri', 'db', 'connection', 'cb')
provides = ['db']
def __init__(self, uri, **config):
self.uri = uri
log.info("Connecting MongoEngine to '%s'.", _safe_uri_replace.sub(r'\1://\2@', uri))
connection = self.connection = dict(tz_aware=True)
scheme, parts = uri.split('://', 1)
parts, self.db = parts.split('/', 1)
auth, host = parts.split('@', 1) if '@' in parts else (None, parts)
if scheme != 'mongo':
raise Exception('The URL must begin with \'mongo://\'!')
connection['host'], connection['port'] = host.split(':') if ':' in host else (host, '27017')
connection['port'] = int(connection['port'])
if auth: # pragma: no cover
connection['username'], _, connection['password'] = auth.partition(':')
# Accept additional keyword arguments to mongoengine.connect() from the INI.
for k, v in iteritems(config):
pfx, _, k = k.rpartition('.')
if pfx != prefix or k in ('alias', 'engine', 'model', 'ready'): continue
connection[k] = int(v) if v.isdigit() else v
self.cb = config.get('ready', None)
def start(self, context):
db, connection = self.db, self.connection
log.debug("Connecting to %s database with connection information: %r", db, connection)
context.mongoengine = connect(db, **connection)
cb = self.cb
if cb is not None:
cb = load(cb) if isinstance(cb, native) else cb
if hasattr(cb, '__call__'):
cb()
|
<commit_before><commit_msg>Add MongoEngine to the pile.<commit_after>
|
# encoding: utf-8
import re
from mongoengine import connect
from marrow.package.loader import load
from web.core.compat import native, iteritems
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoEngineExtension:
__slots__ = ('uri', 'db', 'connection', 'cb')
provides = ['db']
def __init__(self, uri, **config):
self.uri = uri
log.info("Connecting MongoEngine to '%s'.", _safe_uri_replace.sub(r'\1://\2@', uri))
connection = self.connection = dict(tz_aware=True)
scheme, parts = uri.split('://', 1)
parts, self.db = parts.split('/', 1)
auth, host = parts.split('@', 1) if '@' in parts else (None, parts)
if scheme != 'mongo':
raise Exception('The URL must begin with \'mongo://\'!')
connection['host'], connection['port'] = host.split(':') if ':' in host else (host, '27017')
connection['port'] = int(connection['port'])
if auth: # pragma: no cover
connection['username'], _, connection['password'] = auth.partition(':')
# Accept additional keyword arguments to mongoengine.connect() from the INI.
for k, v in iteritems(config):
pfx, _, k = k.rpartition('.')
if pfx != prefix or k in ('alias', 'engine', 'model', 'ready'): continue
connection[k] = int(v) if v.isdigit() else v
self.cb = config.get('ready', None)
def start(self, context):
db, connection = self.db, self.connection
log.debug("Connecting to %s database with connection information: %r", db, connection)
context.mongoengine = connect(db, **connection)
cb = self.cb
if cb is not None:
cb = load(cb) if isinstance(cb, native) else cb
if hasattr(cb, '__call__'):
cb()
|
Add MongoEngine to the pile.# encoding: utf-8
import re
from mongoengine import connect
from marrow.package.loader import load
from web.core.compat import native, iteritems
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoEngineExtension:
__slots__ = ('uri', 'db', 'connection', 'cb')
provides = ['db']
def __init__(self, uri, **config):
self.uri = uri
log.info("Connecting MongoEngine to '%s'.", _safe_uri_replace.sub(r'\1://\2@', uri))
connection = self.connection = dict(tz_aware=True)
scheme, parts = uri.split('://', 1)
parts, self.db = parts.split('/', 1)
auth, host = parts.split('@', 1) if '@' in parts else (None, parts)
if scheme != 'mongo':
raise Exception('The URL must begin with \'mongo://\'!')
connection['host'], connection['port'] = host.split(':') if ':' in host else (host, '27017')
connection['port'] = int(connection['port'])
if auth: # pragma: no cover
connection['username'], _, connection['password'] = auth.partition(':')
# Accept additional keyword arguments to mongoengine.connect() from the INI.
for k, v in iteritems(config):
pfx, _, k = k.rpartition('.')
if pfx != prefix or k in ('alias', 'engine', 'model', 'ready'): continue
connection[k] = int(v) if v.isdigit() else v
self.cb = config.get('ready', None)
def start(self, context):
db, connection = self.db, self.connection
log.debug("Connecting to %s database with connection information: %r", db, connection)
context.mongoengine = connect(db, **connection)
cb = self.cb
if cb is not None:
cb = load(cb) if isinstance(cb, native) else cb
if hasattr(cb, '__call__'):
cb()
|
<commit_before><commit_msg>Add MongoEngine to the pile.<commit_after># encoding: utf-8
import re
from mongoengine import connect
from marrow.package.loader import load
from web.core.compat import native, iteritems
log = __import__('logging').getLogger(__name__)
_safe_uri_replace = re.compile(r'(\w+)://(\w+):(?P<password>[^@]+)@')
class MongoEngineExtension:
__slots__ = ('uri', 'db', 'connection', 'cb')
provides = ['db']
def __init__(self, uri, **config):
self.uri = uri
log.info("Connecting MongoEngine to '%s'.", _safe_uri_replace.sub(r'\1://\2@', uri))
connection = self.connection = dict(tz_aware=True)
scheme, parts = uri.split('://', 1)
parts, self.db = parts.split('/', 1)
auth, host = parts.split('@', 1) if '@' in parts else (None, parts)
if scheme != 'mongo':
raise Exception('The URL must begin with \'mongo://\'!')
connection['host'], connection['port'] = host.split(':') if ':' in host else (host, '27017')
connection['port'] = int(connection['port'])
if auth: # pragma: no cover
connection['username'], _, connection['password'] = auth.partition(':')
# Accept additional keyword arguments to mongoengine.connect() from the INI.
for k, v in iteritems(config):
pfx, _, k = k.rpartition('.')
if pfx != prefix or k in ('alias', 'engine', 'model', 'ready'): continue
connection[k] = int(v) if v.isdigit() else v
self.cb = config.get('ready', None)
def start(self, context):
db, connection = self.db, self.connection
log.debug("Connecting to %s database with connection information: %r", db, connection)
context.mongoengine = connect(db, **connection)
cb = self.cb
if cb is not None:
cb = load(cb) if isinstance(cb, native) else cb
if hasattr(cb, '__call__'):
cb()
|
|
302dc5cb006dc6a068b5d78428bec5106d614391
|
glaciercmd/command_list_known_archives_for_vault.py
|
glaciercmd/command_list_known_archives_for_vault.py
|
import boto
import datetime
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import HashKey
from boto.dynamodb2.layer1 import DynamoDBConnection
class CommandListKnownArchivesForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
dynamo_connection = DynamoDBConnection(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
archive_id_table = Table(config.get('configuration', 'dynamodb_table'), connection=dynamo_connection, schema=[HashKey('Account ID')])
count = 1
for archive in archive_id_table.scan():
time_str = datetime.datetime.fromtimestamp(archive['Upload Timestamp']).strftime('%d, %b %Y')
print "{}.\tFilename: {}\n\tTimestamp: {}\n\tArchive ID: {}".format(count, archive['Filename'], time_str, archive['Archive ID'])
count += 1
def accept(self, args):
return len(args) >= 6 and args[0] == 'list' and args[1] == 'known' and args[2] == 'archives' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "list known archives for vault <vault name>"
def command_init():
return CommandListKnownArchivesForVault()
|
Add list known archives command
|
Add list known archives command
|
Python
|
mit
|
carsonmcdonald/glacier-cmd
|
Add list known archives command
|
import boto
import datetime
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import HashKey
from boto.dynamodb2.layer1 import DynamoDBConnection
class CommandListKnownArchivesForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
dynamo_connection = DynamoDBConnection(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
archive_id_table = Table(config.get('configuration', 'dynamodb_table'), connection=dynamo_connection, schema=[HashKey('Account ID')])
count = 1
for archive in archive_id_table.scan():
time_str = datetime.datetime.fromtimestamp(archive['Upload Timestamp']).strftime('%d, %b %Y')
print "{}.\tFilename: {}\n\tTimestamp: {}\n\tArchive ID: {}".format(count, archive['Filename'], time_str, archive['Archive ID'])
count += 1
def accept(self, args):
return len(args) >= 6 and args[0] == 'list' and args[1] == 'known' and args[2] == 'archives' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "list known archives for vault <vault name>"
def command_init():
return CommandListKnownArchivesForVault()
|
<commit_before><commit_msg>Add list known archives command<commit_after>
|
import boto
import datetime
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import HashKey
from boto.dynamodb2.layer1 import DynamoDBConnection
class CommandListKnownArchivesForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
dynamo_connection = DynamoDBConnection(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
archive_id_table = Table(config.get('configuration', 'dynamodb_table'), connection=dynamo_connection, schema=[HashKey('Account ID')])
count = 1
for archive in archive_id_table.scan():
time_str = datetime.datetime.fromtimestamp(archive['Upload Timestamp']).strftime('%d, %b %Y')
print "{}.\tFilename: {}\n\tTimestamp: {}\n\tArchive ID: {}".format(count, archive['Filename'], time_str, archive['Archive ID'])
count += 1
def accept(self, args):
return len(args) >= 6 and args[0] == 'list' and args[1] == 'known' and args[2] == 'archives' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "list known archives for vault <vault name>"
def command_init():
return CommandListKnownArchivesForVault()
|
Add list known archives commandimport boto
import datetime
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import HashKey
from boto.dynamodb2.layer1 import DynamoDBConnection
class CommandListKnownArchivesForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
dynamo_connection = DynamoDBConnection(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
archive_id_table = Table(config.get('configuration', 'dynamodb_table'), connection=dynamo_connection, schema=[HashKey('Account ID')])
count = 1
for archive in archive_id_table.scan():
time_str = datetime.datetime.fromtimestamp(archive['Upload Timestamp']).strftime('%d, %b %Y')
print "{}.\tFilename: {}\n\tTimestamp: {}\n\tArchive ID: {}".format(count, archive['Filename'], time_str, archive['Archive ID'])
count += 1
def accept(self, args):
return len(args) >= 6 and args[0] == 'list' and args[1] == 'known' and args[2] == 'archives' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "list known archives for vault <vault name>"
def command_init():
return CommandListKnownArchivesForVault()
|
<commit_before><commit_msg>Add list known archives command<commit_after>import boto
import datetime
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import HashKey
from boto.dynamodb2.layer1 import DynamoDBConnection
class CommandListKnownArchivesForVault(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[5])
except:
vault = None
if vault is None:
print "Vault named '{}' does not exist.".format(args[5])
else:
dynamo_connection = DynamoDBConnection(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
archive_id_table = Table(config.get('configuration', 'dynamodb_table'), connection=dynamo_connection, schema=[HashKey('Account ID')])
count = 1
for archive in archive_id_table.scan():
time_str = datetime.datetime.fromtimestamp(archive['Upload Timestamp']).strftime('%d, %b %Y')
print "{}.\tFilename: {}\n\tTimestamp: {}\n\tArchive ID: {}".format(count, archive['Filename'], time_str, archive['Archive ID'])
count += 1
def accept(self, args):
return len(args) >= 6 and args[0] == 'list' and args[1] == 'known' and args[2] == 'archives' and args[3] == 'for' and args[4] == 'vault'
def help(self):
return "list known archives for vault <vault name>"
def command_init():
return CommandListKnownArchivesForVault()
|
|
6c820df40cf410314679c08502ba41a44b489b45
|
senlin/tests/tempest/api/api_versions/test_api_version_show.py
|
senlin/tests/tempest/api/api_versions/test_api_version_show.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestAPIVersionShow(base.BaseSenlinTest):
@decorators.idempotent_id('4a270caa-9917-4acd-98ef-6636f9618037')
def test_API_version_show(self):
resp, body = self.client.request('GET', '/v1/')
res = self.client._parsed_resp(resp, body)
# Verify resp of API version show
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
version = res['body']
for key in ['id', 'links', 'media-types', 'status', 'updated']:
self.assertIn(key, version)
self.assertEqual('1.0', version['id'])
|
Add tempest API test for API version show
|
Add tempest API test for API version show
This patch add tempest API test for API version show.
Change-Id: I1cf7cba550bb04629acab9899be310fd3b767576
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,stackforge/senlin,openstack/senlin
|
Add tempest API test for API version show
This patch add tempest API test for API version show.
Change-Id: I1cf7cba550bb04629acab9899be310fd3b767576
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestAPIVersionShow(base.BaseSenlinTest):
@decorators.idempotent_id('4a270caa-9917-4acd-98ef-6636f9618037')
def test_API_version_show(self):
resp, body = self.client.request('GET', '/v1/')
res = self.client._parsed_resp(resp, body)
# Verify resp of API version show
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
version = res['body']
for key in ['id', 'links', 'media-types', 'status', 'updated']:
self.assertIn(key, version)
self.assertEqual('1.0', version['id'])
|
<commit_before><commit_msg>Add tempest API test for API version show
This patch add tempest API test for API version show.
Change-Id: I1cf7cba550bb04629acab9899be310fd3b767576<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestAPIVersionShow(base.BaseSenlinTest):
@decorators.idempotent_id('4a270caa-9917-4acd-98ef-6636f9618037')
def test_API_version_show(self):
resp, body = self.client.request('GET', '/v1/')
res = self.client._parsed_resp(resp, body)
# Verify resp of API version show
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
version = res['body']
for key in ['id', 'links', 'media-types', 'status', 'updated']:
self.assertIn(key, version)
self.assertEqual('1.0', version['id'])
|
Add tempest API test for API version show
This patch add tempest API test for API version show.
Change-Id: I1cf7cba550bb04629acab9899be310fd3b767576# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestAPIVersionShow(base.BaseSenlinTest):
@decorators.idempotent_id('4a270caa-9917-4acd-98ef-6636f9618037')
def test_API_version_show(self):
resp, body = self.client.request('GET', '/v1/')
res = self.client._parsed_resp(resp, body)
# Verify resp of API version show
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
version = res['body']
for key in ['id', 'links', 'media-types', 'status', 'updated']:
self.assertIn(key, version)
self.assertEqual('1.0', version['id'])
|
<commit_before><commit_msg>Add tempest API test for API version show
This patch add tempest API test for API version show.
Change-Id: I1cf7cba550bb04629acab9899be310fd3b767576<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestAPIVersionShow(base.BaseSenlinTest):
@decorators.idempotent_id('4a270caa-9917-4acd-98ef-6636f9618037')
def test_API_version_show(self):
resp, body = self.client.request('GET', '/v1/')
res = self.client._parsed_resp(resp, body)
# Verify resp of API version show
self.assertEqual(200, res['status'])
self.assertIsNotNone(res['body'])
version = res['body']
for key in ['id', 'links', 'media-types', 'status', 'updated']:
self.assertIn(key, version)
self.assertEqual('1.0', version['id'])
|
|
dff9dced1bf1bbb684af4ba02febd255352484d5
|
official_documents/migrations/0004_auto_20150410_1054.py
|
official_documents/migrations/0004_auto_20150410_1054.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_documents', '0003_add_group'),
]
operations = [
migrations.AlterField(
model_name='officialdocument',
name='document_type',
field=models.CharField(max_length=100, choices=[(b'Nomination paper', b'Nomination paper')]),
preserve_default=True,
),
]
|
Add a migration that Django detects as a change
|
Add a migration that Django detects as a change
|
Python
|
agpl-3.0
|
neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add a migration that Django detects as a change
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_documents', '0003_add_group'),
]
operations = [
migrations.AlterField(
model_name='officialdocument',
name='document_type',
field=models.CharField(max_length=100, choices=[(b'Nomination paper', b'Nomination paper')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration that Django detects as a change<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_documents', '0003_add_group'),
]
operations = [
migrations.AlterField(
model_name='officialdocument',
name='document_type',
field=models.CharField(max_length=100, choices=[(b'Nomination paper', b'Nomination paper')]),
preserve_default=True,
),
]
|
Add a migration that Django detects as a change# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_documents', '0003_add_group'),
]
operations = [
migrations.AlterField(
model_name='officialdocument',
name='document_type',
field=models.CharField(max_length=100, choices=[(b'Nomination paper', b'Nomination paper')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration that Django detects as a change<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('official_documents', '0003_add_group'),
]
operations = [
migrations.AlterField(
model_name='officialdocument',
name='document_type',
field=models.CharField(max_length=100, choices=[(b'Nomination paper', b'Nomination paper')]),
preserve_default=True,
),
]
|
|
e60c3e6c89d31d030af963722d44f668781ed461
|
9/e9.py
|
9/e9.py
|
#!/usr/bin/env python -tt -Wall
import itertools
def main():
k = 1000
factors = [4, 25]
ranges = (xrange(f, k/4, f) for f in factors)
multiples = itertools.ifilter(lambda m: not k%(2*m), itertools.chain(*ranges))
raw_pairs = itertools.imap(lambda m: (m, (k/(2*m)) - m), multiples)
pairs = itertools.ifilter(lambda (m,n): m>n and n>0, raw_pairs)
triplets = itertools.imap(lambda (m,n): (m**2 - n**2, 2 * m * n, m**2 + n**2),pairs)
for a,b,c in triplets:
print('a = {0}, b = {1}, c = {2}, a+b+c = {3}, abc = {4}'.format(a, b, c, a+b+c, a*b*c))
if __name__ == '__main__':
main()
|
Add a script that solves problem 9.
|
Add a script that solves problem 9.
|
Python
|
mit
|
cveazey/ProjectEuler,cveazey/ProjectEuler
|
Add a script that solves problem 9.
|
#!/usr/bin/env python -tt -Wall
import itertools
def main():
k = 1000
factors = [4, 25]
ranges = (xrange(f, k/4, f) for f in factors)
multiples = itertools.ifilter(lambda m: not k%(2*m), itertools.chain(*ranges))
raw_pairs = itertools.imap(lambda m: (m, (k/(2*m)) - m), multiples)
pairs = itertools.ifilter(lambda (m,n): m>n and n>0, raw_pairs)
triplets = itertools.imap(lambda (m,n): (m**2 - n**2, 2 * m * n, m**2 + n**2),pairs)
for a,b,c in triplets:
print('a = {0}, b = {1}, c = {2}, a+b+c = {3}, abc = {4}'.format(a, b, c, a+b+c, a*b*c))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script that solves problem 9.<commit_after>
|
#!/usr/bin/env python -tt -Wall
import itertools
def main():
k = 1000
factors = [4, 25]
ranges = (xrange(f, k/4, f) for f in factors)
multiples = itertools.ifilter(lambda m: not k%(2*m), itertools.chain(*ranges))
raw_pairs = itertools.imap(lambda m: (m, (k/(2*m)) - m), multiples)
pairs = itertools.ifilter(lambda (m,n): m>n and n>0, raw_pairs)
triplets = itertools.imap(lambda (m,n): (m**2 - n**2, 2 * m * n, m**2 + n**2),pairs)
for a,b,c in triplets:
print('a = {0}, b = {1}, c = {2}, a+b+c = {3}, abc = {4}'.format(a, b, c, a+b+c, a*b*c))
if __name__ == '__main__':
main()
|
Add a script that solves problem 9.#!/usr/bin/env python -tt -Wall
import itertools
def main():
k = 1000
factors = [4, 25]
ranges = (xrange(f, k/4, f) for f in factors)
multiples = itertools.ifilter(lambda m: not k%(2*m), itertools.chain(*ranges))
raw_pairs = itertools.imap(lambda m: (m, (k/(2*m)) - m), multiples)
pairs = itertools.ifilter(lambda (m,n): m>n and n>0, raw_pairs)
triplets = itertools.imap(lambda (m,n): (m**2 - n**2, 2 * m * n, m**2 + n**2),pairs)
for a,b,c in triplets:
print('a = {0}, b = {1}, c = {2}, a+b+c = {3}, abc = {4}'.format(a, b, c, a+b+c, a*b*c))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script that solves problem 9.<commit_after>#!/usr/bin/env python -tt -Wall
import itertools
def main():
k = 1000
factors = [4, 25]
ranges = (xrange(f, k/4, f) for f in factors)
multiples = itertools.ifilter(lambda m: not k%(2*m), itertools.chain(*ranges))
raw_pairs = itertools.imap(lambda m: (m, (k/(2*m)) - m), multiples)
pairs = itertools.ifilter(lambda (m,n): m>n and n>0, raw_pairs)
triplets = itertools.imap(lambda (m,n): (m**2 - n**2, 2 * m * n, m**2 + n**2),pairs)
for a,b,c in triplets:
print('a = {0}, b = {1}, c = {2}, a+b+c = {3}, abc = {4}'.format(a, b, c, a+b+c, a*b*c))
if __name__ == '__main__':
main()
|
|
5b15efa07d9e619ead833d1ef0056a3c2b5df930
|
openstack/tests/functional/network/v2/test_quota.py
|
openstack/tests/functional/network/v2/test_quota.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestQuota(base.BaseFunctionalTest):
def test_list(self):
sot = self.conn.network.quotas()
for qot in sot:
self.assertIn('subnet', qot)
self.assertIn('network', qot)
self.assertIn('router', qot)
self.assertIn('port', qot)
self.assertIn('floatingip', qot)
self.assertIn('security_rule_group', qot)
self.assertIn('security_group', qot)
|
Add functional test for Network Quota
|
Add functional test for Network Quota
test:
test_list
Change-Id: Ie484bc8b36da22a2b3894efb35903ae04a78db7b
|
Python
|
apache-2.0
|
stackforge/python-openstacksdk,briancurtin/python-openstacksdk,dudymas/python-openstacksdk,stackforge/python-openstacksdk,mtougeron/python-openstacksdk,openstack/python-openstacksdk,dudymas/python-openstacksdk,briancurtin/python-openstacksdk,dtroyer/python-openstacksdk,openstack/python-openstacksdk,mtougeron/python-openstacksdk,dtroyer/python-openstacksdk
|
Add functional test for Network Quota
test:
test_list
Change-Id: Ie484bc8b36da22a2b3894efb35903ae04a78db7b
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestQuota(base.BaseFunctionalTest):
def test_list(self):
sot = self.conn.network.quotas()
for qot in sot:
self.assertIn('subnet', qot)
self.assertIn('network', qot)
self.assertIn('router', qot)
self.assertIn('port', qot)
self.assertIn('floatingip', qot)
self.assertIn('security_rule_group', qot)
self.assertIn('security_group', qot)
|
<commit_before><commit_msg>Add functional test for Network Quota
test:
test_list
Change-Id: Ie484bc8b36da22a2b3894efb35903ae04a78db7b<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestQuota(base.BaseFunctionalTest):
def test_list(self):
sot = self.conn.network.quotas()
for qot in sot:
self.assertIn('subnet', qot)
self.assertIn('network', qot)
self.assertIn('router', qot)
self.assertIn('port', qot)
self.assertIn('floatingip', qot)
self.assertIn('security_rule_group', qot)
self.assertIn('security_group', qot)
|
Add functional test for Network Quota
test:
test_list
Change-Id: Ie484bc8b36da22a2b3894efb35903ae04a78db7b# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestQuota(base.BaseFunctionalTest):
def test_list(self):
sot = self.conn.network.quotas()
for qot in sot:
self.assertIn('subnet', qot)
self.assertIn('network', qot)
self.assertIn('router', qot)
self.assertIn('port', qot)
self.assertIn('floatingip', qot)
self.assertIn('security_rule_group', qot)
self.assertIn('security_group', qot)
|
<commit_before><commit_msg>Add functional test for Network Quota
test:
test_list
Change-Id: Ie484bc8b36da22a2b3894efb35903ae04a78db7b<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.functional import base
class TestQuota(base.BaseFunctionalTest):
def test_list(self):
sot = self.conn.network.quotas()
for qot in sot:
self.assertIn('subnet', qot)
self.assertIn('network', qot)
self.assertIn('router', qot)
self.assertIn('port', qot)
self.assertIn('floatingip', qot)
self.assertIn('security_rule_group', qot)
self.assertIn('security_group', qot)
|
|
2edf63fe25ccc1479ececb83eca005a221a67fa1
|
candidates/management/commands/candidates_fix_party_ids_in_json.py
|
candidates/management/commands/candidates_fix_party_ids_in_json.py
|
from collections import defaultdict
import json
from os.path import join
import re
from django.core.management.base import BaseCommand
from candidates.static_data import data_directory
def get_max_party_id(party_data):
"""Find the highest numeric ID from party IDs of the party:1234 form"""
max_id = -1
for party in party_data:
party_id = party['id']
m = re.search(r'party:(\d+)', party_id)
if not m:
continue
int_party_id = int(m.group(1))
max_id = max(max_id, int_party_id)
return max_id
class Command(BaseCommand):
help = "Fix parties with duplicate IDs in all-parties-from-popit.json"
def handle(self, **options):
json_filename = join(data_directory, 'all-parties-from-popit.json')
with open(json_filename) as f:
data = json.load(f)
max_party_id = get_max_party_id(data)
print "got max_party_id:", max_party_id
next_party_id = max_party_id + 1
new_party_data = []
party_id_total = defaultdict(int)
for party in data:
party_id_total[party['id']] += 1
party_id_times_seen = defaultdict(int)
for party in data:
party_id = party['id']
# We should pick a new for a party if it's not the last
# occurence of that ID, so we look at how many of that ID
# we've already seen compared to the total.
# e.g. if there are 3 parties with ID party:42, then we
# should create a new ID for the party the first two times
# we see that ID; i.e. if we've seen it 0 or 1 times
# previously.
if party_id_times_seen[party_id] < (party_id_total[party_id] - 1):
party['id'] = 'party:{0}'.format(next_party_id)
next_party_id += 1
new_party_data.append(party)
party_id_times_seen[party_id] += 1
output_json_filename = json_filename + '.updated'
print "Writing a file with unique IDs to", output_json_filename
with open(output_json_filename, 'w') as f:
json.dump(new_party_data, f, indent=4, sort_keys=True)
|
Add a command to fix duplicate 'party:' IDs in the party JSON
|
Add a command to fix duplicate 'party:' IDs in the party JSON
|
Python
|
agpl-3.0
|
datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit
|
Add a command to fix duplicate 'party:' IDs in the party JSON
|
from collections import defaultdict
import json
from os.path import join
import re
from django.core.management.base import BaseCommand
from candidates.static_data import data_directory
def get_max_party_id(party_data):
"""Find the highest numeric ID from party IDs of the party:1234 form"""
max_id = -1
for party in party_data:
party_id = party['id']
m = re.search(r'party:(\d+)', party_id)
if not m:
continue
int_party_id = int(m.group(1))
max_id = max(max_id, int_party_id)
return max_id
class Command(BaseCommand):
help = "Fix parties with duplicate IDs in all-parties-from-popit.json"
def handle(self, **options):
json_filename = join(data_directory, 'all-parties-from-popit.json')
with open(json_filename) as f:
data = json.load(f)
max_party_id = get_max_party_id(data)
print "got max_party_id:", max_party_id
next_party_id = max_party_id + 1
new_party_data = []
party_id_total = defaultdict(int)
for party in data:
party_id_total[party['id']] += 1
party_id_times_seen = defaultdict(int)
for party in data:
party_id = party['id']
# We should pick a new for a party if it's not the last
# occurence of that ID, so we look at how many of that ID
# we've already seen compared to the total.
# e.g. if there are 3 parties with ID party:42, then we
# should create a new ID for the party the first two times
# we see that ID; i.e. if we've seen it 0 or 1 times
# previously.
if party_id_times_seen[party_id] < (party_id_total[party_id] - 1):
party['id'] = 'party:{0}'.format(next_party_id)
next_party_id += 1
new_party_data.append(party)
party_id_times_seen[party_id] += 1
output_json_filename = json_filename + '.updated'
print "Writing a file with unique IDs to", output_json_filename
with open(output_json_filename, 'w') as f:
json.dump(new_party_data, f, indent=4, sort_keys=True)
|
<commit_before><commit_msg>Add a command to fix duplicate 'party:' IDs in the party JSON<commit_after>
|
from collections import defaultdict
import json
from os.path import join
import re
from django.core.management.base import BaseCommand
from candidates.static_data import data_directory
def get_max_party_id(party_data):
"""Find the highest numeric ID from party IDs of the party:1234 form"""
max_id = -1
for party in party_data:
party_id = party['id']
m = re.search(r'party:(\d+)', party_id)
if not m:
continue
int_party_id = int(m.group(1))
max_id = max(max_id, int_party_id)
return max_id
class Command(BaseCommand):
help = "Fix parties with duplicate IDs in all-parties-from-popit.json"
def handle(self, **options):
json_filename = join(data_directory, 'all-parties-from-popit.json')
with open(json_filename) as f:
data = json.load(f)
max_party_id = get_max_party_id(data)
print "got max_party_id:", max_party_id
next_party_id = max_party_id + 1
new_party_data = []
party_id_total = defaultdict(int)
for party in data:
party_id_total[party['id']] += 1
party_id_times_seen = defaultdict(int)
for party in data:
party_id = party['id']
# We should pick a new for a party if it's not the last
# occurence of that ID, so we look at how many of that ID
# we've already seen compared to the total.
# e.g. if there are 3 parties with ID party:42, then we
# should create a new ID for the party the first two times
# we see that ID; i.e. if we've seen it 0 or 1 times
# previously.
if party_id_times_seen[party_id] < (party_id_total[party_id] - 1):
party['id'] = 'party:{0}'.format(next_party_id)
next_party_id += 1
new_party_data.append(party)
party_id_times_seen[party_id] += 1
output_json_filename = json_filename + '.updated'
print "Writing a file with unique IDs to", output_json_filename
with open(output_json_filename, 'w') as f:
json.dump(new_party_data, f, indent=4, sort_keys=True)
|
Add a command to fix duplicate 'party:' IDs in the party JSONfrom collections import defaultdict
import json
from os.path import join
import re
from django.core.management.base import BaseCommand
from candidates.static_data import data_directory
def get_max_party_id(party_data):
"""Find the highest numeric ID from party IDs of the party:1234 form"""
max_id = -1
for party in party_data:
party_id = party['id']
m = re.search(r'party:(\d+)', party_id)
if not m:
continue
int_party_id = int(m.group(1))
max_id = max(max_id, int_party_id)
return max_id
class Command(BaseCommand):
help = "Fix parties with duplicate IDs in all-parties-from-popit.json"
def handle(self, **options):
json_filename = join(data_directory, 'all-parties-from-popit.json')
with open(json_filename) as f:
data = json.load(f)
max_party_id = get_max_party_id(data)
print "got max_party_id:", max_party_id
next_party_id = max_party_id + 1
new_party_data = []
party_id_total = defaultdict(int)
for party in data:
party_id_total[party['id']] += 1
party_id_times_seen = defaultdict(int)
for party in data:
party_id = party['id']
# We should pick a new for a party if it's not the last
# occurence of that ID, so we look at how many of that ID
# we've already seen compared to the total.
# e.g. if there are 3 parties with ID party:42, then we
# should create a new ID for the party the first two times
# we see that ID; i.e. if we've seen it 0 or 1 times
# previously.
if party_id_times_seen[party_id] < (party_id_total[party_id] - 1):
party['id'] = 'party:{0}'.format(next_party_id)
next_party_id += 1
new_party_data.append(party)
party_id_times_seen[party_id] += 1
output_json_filename = json_filename + '.updated'
print "Writing a file with unique IDs to", output_json_filename
with open(output_json_filename, 'w') as f:
json.dump(new_party_data, f, indent=4, sort_keys=True)
|
<commit_before><commit_msg>Add a command to fix duplicate 'party:' IDs in the party JSON<commit_after>from collections import defaultdict
import json
from os.path import join
import re
from django.core.management.base import BaseCommand
from candidates.static_data import data_directory
def get_max_party_id(party_data):
"""Find the highest numeric ID from party IDs of the party:1234 form"""
max_id = -1
for party in party_data:
party_id = party['id']
m = re.search(r'party:(\d+)', party_id)
if not m:
continue
int_party_id = int(m.group(1))
max_id = max(max_id, int_party_id)
return max_id
class Command(BaseCommand):
help = "Fix parties with duplicate IDs in all-parties-from-popit.json"
def handle(self, **options):
json_filename = join(data_directory, 'all-parties-from-popit.json')
with open(json_filename) as f:
data = json.load(f)
max_party_id = get_max_party_id(data)
print "got max_party_id:", max_party_id
next_party_id = max_party_id + 1
new_party_data = []
party_id_total = defaultdict(int)
for party in data:
party_id_total[party['id']] += 1
party_id_times_seen = defaultdict(int)
for party in data:
party_id = party['id']
# We should pick a new for a party if it's not the last
# occurence of that ID, so we look at how many of that ID
# we've already seen compared to the total.
# e.g. if there are 3 parties with ID party:42, then we
# should create a new ID for the party the first two times
# we see that ID; i.e. if we've seen it 0 or 1 times
# previously.
if party_id_times_seen[party_id] < (party_id_total[party_id] - 1):
party['id'] = 'party:{0}'.format(next_party_id)
next_party_id += 1
new_party_data.append(party)
party_id_times_seen[party_id] += 1
output_json_filename = json_filename + '.updated'
print "Writing a file with unique IDs to", output_json_filename
with open(output_json_filename, 'w') as f:
json.dump(new_party_data, f, indent=4, sort_keys=True)
|
|
0253f5a3746fc3d2eebf1bf8cc997023062dd7e5
|
SOAPpy/tests/ZeroLengthArray.py
|
SOAPpy/tests/ZeroLengthArray.py
|
import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
|
Test handling of zero-length typed lists. Currently fails.
|
Test handling of zero-length typed lists. Currently fails.
git-svn-id: c4afb4e777bcbfe9afa898413b708b5abcd43877@930 7150bf37-e60d-0410-b93f-83e91ef0e581
|
Python
|
mit
|
acigna/pywez,acigna/pywez,acigna/pywez
|
Test handling of zero-length typed lists. Currently fails.
git-svn-id: c4afb4e777bcbfe9afa898413b708b5abcd43877@930 7150bf37-e60d-0410-b93f-83e91ef0e581
|
import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
|
<commit_before><commit_msg>Test handling of zero-length typed lists. Currently fails.
git-svn-id: c4afb4e777bcbfe9afa898413b708b5abcd43877@930 7150bf37-e60d-0410-b93f-83e91ef0e581<commit_after>
|
import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
|
Test handling of zero-length typed lists. Currently fails.
git-svn-id: c4afb4e777bcbfe9afa898413b708b5abcd43877@930 7150bf37-e60d-0410-b93f-83e91ef0e581import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
|
<commit_before><commit_msg>Test handling of zero-length typed lists. Currently fails.
git-svn-id: c4afb4e777bcbfe9afa898413b708b5abcd43877@930 7150bf37-e60d-0410-b93f-83e91ef0e581<commit_after>import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
|
|
f509b56386a9b9d278345c0132757251291a4c51
|
contract_sale_generation/tests/test_contract_sale_recurrency.py
|
contract_sale_generation/tests/test_contract_sale_recurrency.py
|
# Copyright 2022 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from freezegun.api import freeze_time
from odoo import fields
from odoo.tests import Form
from odoo.tests.common import SavepointCase
from .common import ContractSaleCommon
def to_date(date):
return fields.Date.to_date(date)
today = "2020-01-15"
class TestContractSale(ContractSaleCommon, SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.contract_obj = cls.env["contract.contract"]
@classmethod
def _create_contract(cls):
cls.contract = cls.contract.create(
{
"name": "Test Contract",
"partner_id": cls.partner.id,
}
)
with Form(cls.contract) as contract_form:
contract_form.partner_id = cls.partner
contract_form.generation_type = "sale"
contract_form.group_id = cls.analytic_account
cls.contract = contract_form.save()
def test_contract_next_date(self):
"""
Change recurrence to weekly
Check the recurring next date value on lines
"""
with freeze_time(today):
self._create_contract()
self.contract.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 1
line_form.price_unit = 100.0
line_form.discount = 50
line_form.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 2
line_form.price_unit = 50.0
line_form.recurring_rule_type = "weekly"
self.assertEqual(
fields.Date.to_date("2020-01-15"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-22"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-29"), self.contract.recurring_next_date
)
|
Add tests with another recurrency
|
[14.0][IMP] contract_sale_generation: Add tests with another recurrency
|
Python
|
agpl-3.0
|
OCA/contract,OCA/contract,OCA/contract
|
[14.0][IMP] contract_sale_generation: Add tests with another recurrency
|
# Copyright 2022 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from freezegun.api import freeze_time
from odoo import fields
from odoo.tests import Form
from odoo.tests.common import SavepointCase
from .common import ContractSaleCommon
def to_date(date):
return fields.Date.to_date(date)
today = "2020-01-15"
class TestContractSale(ContractSaleCommon, SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.contract_obj = cls.env["contract.contract"]
@classmethod
def _create_contract(cls):
cls.contract = cls.contract.create(
{
"name": "Test Contract",
"partner_id": cls.partner.id,
}
)
with Form(cls.contract) as contract_form:
contract_form.partner_id = cls.partner
contract_form.generation_type = "sale"
contract_form.group_id = cls.analytic_account
cls.contract = contract_form.save()
def test_contract_next_date(self):
"""
Change recurrence to weekly
Check the recurring next date value on lines
"""
with freeze_time(today):
self._create_contract()
self.contract.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 1
line_form.price_unit = 100.0
line_form.discount = 50
line_form.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 2
line_form.price_unit = 50.0
line_form.recurring_rule_type = "weekly"
self.assertEqual(
fields.Date.to_date("2020-01-15"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-22"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-29"), self.contract.recurring_next_date
)
|
<commit_before><commit_msg>[14.0][IMP] contract_sale_generation: Add tests with another recurrency<commit_after>
|
# Copyright 2022 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from freezegun.api import freeze_time
from odoo import fields
from odoo.tests import Form
from odoo.tests.common import SavepointCase
from .common import ContractSaleCommon
def to_date(date):
return fields.Date.to_date(date)
today = "2020-01-15"
class TestContractSale(ContractSaleCommon, SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.contract_obj = cls.env["contract.contract"]
@classmethod
def _create_contract(cls):
cls.contract = cls.contract.create(
{
"name": "Test Contract",
"partner_id": cls.partner.id,
}
)
with Form(cls.contract) as contract_form:
contract_form.partner_id = cls.partner
contract_form.generation_type = "sale"
contract_form.group_id = cls.analytic_account
cls.contract = contract_form.save()
def test_contract_next_date(self):
"""
Change recurrence to weekly
Check the recurring next date value on lines
"""
with freeze_time(today):
self._create_contract()
self.contract.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 1
line_form.price_unit = 100.0
line_form.discount = 50
line_form.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 2
line_form.price_unit = 50.0
line_form.recurring_rule_type = "weekly"
self.assertEqual(
fields.Date.to_date("2020-01-15"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-22"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-29"), self.contract.recurring_next_date
)
|
[14.0][IMP] contract_sale_generation: Add tests with another recurrency# Copyright 2022 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from freezegun.api import freeze_time
from odoo import fields
from odoo.tests import Form
from odoo.tests.common import SavepointCase
from .common import ContractSaleCommon
def to_date(date):
return fields.Date.to_date(date)
today = "2020-01-15"
class TestContractSale(ContractSaleCommon, SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.contract_obj = cls.env["contract.contract"]
@classmethod
def _create_contract(cls):
cls.contract = cls.contract.create(
{
"name": "Test Contract",
"partner_id": cls.partner.id,
}
)
with Form(cls.contract) as contract_form:
contract_form.partner_id = cls.partner
contract_form.generation_type = "sale"
contract_form.group_id = cls.analytic_account
cls.contract = contract_form.save()
def test_contract_next_date(self):
"""
Change recurrence to weekly
Check the recurring next date value on lines
"""
with freeze_time(today):
self._create_contract()
self.contract.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 1
line_form.price_unit = 100.0
line_form.discount = 50
line_form.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 2
line_form.price_unit = 50.0
line_form.recurring_rule_type = "weekly"
self.assertEqual(
fields.Date.to_date("2020-01-15"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-22"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-29"), self.contract.recurring_next_date
)
|
<commit_before><commit_msg>[14.0][IMP] contract_sale_generation: Add tests with another recurrency<commit_after># Copyright 2022 ACSONE SA/NV
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from freezegun.api import freeze_time
from odoo import fields
from odoo.tests import Form
from odoo.tests.common import SavepointCase
from .common import ContractSaleCommon
def to_date(date):
return fields.Date.to_date(date)
today = "2020-01-15"
class TestContractSale(ContractSaleCommon, SavepointCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.contract_obj = cls.env["contract.contract"]
@classmethod
def _create_contract(cls):
cls.contract = cls.contract.create(
{
"name": "Test Contract",
"partner_id": cls.partner.id,
}
)
with Form(cls.contract) as contract_form:
contract_form.partner_id = cls.partner
contract_form.generation_type = "sale"
contract_form.group_id = cls.analytic_account
cls.contract = contract_form.save()
def test_contract_next_date(self):
"""
Change recurrence to weekly
Check the recurring next date value on lines
"""
with freeze_time(today):
self._create_contract()
self.contract.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 1
line_form.price_unit = 100.0
line_form.discount = 50
line_form.recurring_rule_type = "weekly"
with freeze_time(today):
with Form(self.contract) as contract_form:
with contract_form.contract_line_ids.new() as line_form:
line_form.product_id = self.product_1
line_form.name = "Services from #START# to #END#"
line_form.quantity = 2
line_form.price_unit = 50.0
line_form.recurring_rule_type = "weekly"
self.assertEqual(
fields.Date.to_date("2020-01-15"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-22"), self.contract.recurring_next_date
)
self.contract.recurring_create_sale()
self.assertEqual(
fields.Date.to_date("2020-01-29"), self.contract.recurring_next_date
)
|
|
251d6500e8a5406cfc16c3a8b92a8f9d28d5a308
|
firecares/firestation/migrations/0040_auto_20170126_1640.py
|
firecares/firestation/migrations/0040_auto_20170126_1640.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
Replace FTS functions since the FD table definition has changed.
|
Replace FTS functions since the FD table definition has changed.
|
Python
|
mit
|
FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares
|
Replace FTS functions since the FD table definition has changed.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
<commit_before><commit_msg>Replace FTS functions since the FD table definition has changed.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
Replace FTS functions since the FD table definition has changed.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
<commit_before><commit_msg>Replace FTS functions since the FD table definition has changed.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('firestation', '0039_auto_20170126_0857'),
]
sql = """
CREATE OR REPLACE FUNCTION department_fts_document(integer) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
RAISE NOTICE 'WRONG FUUNCTIONS';
SELECT fd.name, add.city, fd.state, states.state_name, add.postal_code
INTO name, city, state, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=$1;
SELECT concat_ws(' ', name, city, state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
-- Overload the department_fts_document by calling this version the same name but accepting a different argument type.
-- This one takes a Fire Department object.
CREATE OR REPLACE FUNCTION department_fts_document(department firestation_firedepartment) RETURNS tsvector AS $$
DECLARE
department_document TEXT;
name varchar;
city varchar;
state varchar(2);
state_name varchar(40);
postal_code varchar(10);
BEGIN
SELECT add.city, states.state_name, add.postal_code
INTO city, state_name, postal_code
FROM firestation_firedepartment fd
LEFT JOIN firecares_core_address add
ON fd.headquarters_address_id=add.id
LEFT JOIN usgs_stateorterritoryhigh states
ON ST_CoveredBy(ST_Centroid(fd.geom), states.geom)
WHERE fd.id=department.id;
SELECT concat_ws(' ', department.name, city, department.state, state_name, postal_code) INTO department_document;
RETURN to_tsvector('pg_catalog.simple', department_document);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION department_fts_document_trigger() RETURNS TRIGGER AS $$
BEGIN
raise warning 'before set %', NEW;
NEW.fts_document=department_fts_document(NEW);
raise warning 'after set';
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
operations = [
migrations.RunSQL(sql)
]
|
|
7d897f8238be590e1359fa2b70eb43bc0fd62a9c
|
easy_thumbnails/tests/test_management.py
|
easy_thumbnails/tests/test_management.py
|
from easy_thumbnails.tests import utils as test
class ThumbnailCleanupTests(test.BaseTest):
def test_can_import(self):
"""
Just a simple test to see if we can actually import the command without
any syntax errors.
"""
from easy_thumbnails.management.commands import thumbnail_cleanup
|
Add a simple stupid test to just import the management command
|
Add a simple stupid test to just import the management command
|
Python
|
bsd-3-clause
|
SmileyChris/easy-thumbnails,siovene/easy-thumbnails,jaddison/easy-thumbnails,sandow-digital/easy-thumbnails-cropman,sandow-digital/easy-thumbnails-cropman,Mactory/easy-thumbnails
|
Add a simple stupid test to just import the management command
|
from easy_thumbnails.tests import utils as test
class ThumbnailCleanupTests(test.BaseTest):
def test_can_import(self):
"""
Just a simple test to see if we can actually import the command without
any syntax errors.
"""
from easy_thumbnails.management.commands import thumbnail_cleanup
|
<commit_before><commit_msg>Add a simple stupid test to just import the management command<commit_after>
|
from easy_thumbnails.tests import utils as test
class ThumbnailCleanupTests(test.BaseTest):
def test_can_import(self):
"""
Just a simple test to see if we can actually import the command without
any syntax errors.
"""
from easy_thumbnails.management.commands import thumbnail_cleanup
|
Add a simple stupid test to just import the management commandfrom easy_thumbnails.tests import utils as test
class ThumbnailCleanupTests(test.BaseTest):
def test_can_import(self):
"""
Just a simple test to see if we can actually import the command without
any syntax errors.
"""
from easy_thumbnails.management.commands import thumbnail_cleanup
|
<commit_before><commit_msg>Add a simple stupid test to just import the management command<commit_after>from easy_thumbnails.tests import utils as test
class ThumbnailCleanupTests(test.BaseTest):
def test_can_import(self):
"""
Just a simple test to see if we can actually import the command without
any syntax errors.
"""
from easy_thumbnails.management.commands import thumbnail_cleanup
|
|
b3daa95278783424705d18d51313180680b053e2
|
tests/test_i19screen.py
|
tests/test_i19screen.py
|
from __future__ import division, absolute_import
from i19.command_line.screen import i19_screen
def test_i19screen_command_line_help_does_not_crash():
i19_screen().run('')
def test_i19screen(tmpdir):
import os
import libtbx
xia2_regression = libtbx.env.under_build("xia2_regression")
data_dir = os.path.join(xia2_regression, "test_data", "X4_wide")
olddir = tmpdir.chdir()
i19_screen().run([data_dir])
with tmpdir.join('i19.screen.log').open() as fh:
logfile = fh.read()
assert 'i19.screen successfully completed' in logfile
assert 'photon incidence rate is outside the linear response region' in logfile
|
Add integration test for i19 screen using xia2 regression data
|
Add integration test for i19 screen using xia2 regression data
|
Python
|
bsd-3-clause
|
xia2/i19
|
Add integration test for i19 screen using xia2 regression data
|
from __future__ import division, absolute_import
from i19.command_line.screen import i19_screen
def test_i19screen_command_line_help_does_not_crash():
i19_screen().run('')
def test_i19screen(tmpdir):
import os
import libtbx
xia2_regression = libtbx.env.under_build("xia2_regression")
data_dir = os.path.join(xia2_regression, "test_data", "X4_wide")
olddir = tmpdir.chdir()
i19_screen().run([data_dir])
with tmpdir.join('i19.screen.log').open() as fh:
logfile = fh.read()
assert 'i19.screen successfully completed' in logfile
assert 'photon incidence rate is outside the linear response region' in logfile
|
<commit_before><commit_msg>Add integration test for i19 screen using xia2 regression data<commit_after>
|
from __future__ import division, absolute_import
from i19.command_line.screen import i19_screen
def test_i19screen_command_line_help_does_not_crash():
i19_screen().run('')
def test_i19screen(tmpdir):
import os
import libtbx
xia2_regression = libtbx.env.under_build("xia2_regression")
data_dir = os.path.join(xia2_regression, "test_data", "X4_wide")
olddir = tmpdir.chdir()
i19_screen().run([data_dir])
with tmpdir.join('i19.screen.log').open() as fh:
logfile = fh.read()
assert 'i19.screen successfully completed' in logfile
assert 'photon incidence rate is outside the linear response region' in logfile
|
Add integration test for i19 screen using xia2 regression datafrom __future__ import division, absolute_import
from i19.command_line.screen import i19_screen
def test_i19screen_command_line_help_does_not_crash():
i19_screen().run('')
def test_i19screen(tmpdir):
import os
import libtbx
xia2_regression = libtbx.env.under_build("xia2_regression")
data_dir = os.path.join(xia2_regression, "test_data", "X4_wide")
olddir = tmpdir.chdir()
i19_screen().run([data_dir])
with tmpdir.join('i19.screen.log').open() as fh:
logfile = fh.read()
assert 'i19.screen successfully completed' in logfile
assert 'photon incidence rate is outside the linear response region' in logfile
|
<commit_before><commit_msg>Add integration test for i19 screen using xia2 regression data<commit_after>from __future__ import division, absolute_import
from i19.command_line.screen import i19_screen
def test_i19screen_command_line_help_does_not_crash():
i19_screen().run('')
def test_i19screen(tmpdir):
import os
import libtbx
xia2_regression = libtbx.env.under_build("xia2_regression")
data_dir = os.path.join(xia2_regression, "test_data", "X4_wide")
olddir = tmpdir.chdir()
i19_screen().run([data_dir])
with tmpdir.join('i19.screen.log').open() as fh:
logfile = fh.read()
assert 'i19.screen successfully completed' in logfile
assert 'photon incidence rate is outside the linear response region' in logfile
|
|
776ba9510fa80b68904b03c9a7620751ffdc41ac
|
contentcuration/contentcuration/tests/test_makemessages.py
|
contentcuration/contentcuration/tests/test_makemessages.py
|
import os
import pathlib
import subprocess
from django.conf import settings
from django.test import TestCase
class MakeMessagesCommandRunTestCase(TestCase):
"""
Sanity check to make sure makemessages runs to completion.
"""
def test_command_succeeds_without_postgres(self):
"""
Test that we can run makemessages when postgres is not activated.
"""
repo_root = pathlib.Path(settings.BASE_DIR).parent
cmd = ["make", "makemessages"]
env = os.environ.copy()
# We fake postgres not being available, by setting the wrong IP address.
# hopefully postgres isn't running at 127.0.0.2!
env.update({"DATA_DB_HOST": "127.0.0.2"})
subprocess.check_output(
cmd,
env=env,
cwd=str(repo_root)
)
|
Write test to confirm that makemessages works without needing postgres
|
Write test to confirm that makemessages works without needing postgres
|
Python
|
mit
|
DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation,DXCanas/content-curation
|
Write test to confirm that makemessages works without needing postgres
|
import os
import pathlib
import subprocess
from django.conf import settings
from django.test import TestCase
class MakeMessagesCommandRunTestCase(TestCase):
"""
Sanity check to make sure makemessages runs to completion.
"""
def test_command_succeeds_without_postgres(self):
"""
Test that we can run makemessages when postgres is not activated.
"""
repo_root = pathlib.Path(settings.BASE_DIR).parent
cmd = ["make", "makemessages"]
env = os.environ.copy()
# We fake postgres not being available, by setting the wrong IP address.
# hopefully postgres isn't running at 127.0.0.2!
env.update({"DATA_DB_HOST": "127.0.0.2"})
subprocess.check_output(
cmd,
env=env,
cwd=str(repo_root)
)
|
<commit_before><commit_msg>Write test to confirm that makemessages works without needing postgres<commit_after>
|
import os
import pathlib
import subprocess
from django.conf import settings
from django.test import TestCase
class MakeMessagesCommandRunTestCase(TestCase):
"""
Sanity check to make sure makemessages runs to completion.
"""
def test_command_succeeds_without_postgres(self):
"""
Test that we can run makemessages when postgres is not activated.
"""
repo_root = pathlib.Path(settings.BASE_DIR).parent
cmd = ["make", "makemessages"]
env = os.environ.copy()
# We fake postgres not being available, by setting the wrong IP address.
# hopefully postgres isn't running at 127.0.0.2!
env.update({"DATA_DB_HOST": "127.0.0.2"})
subprocess.check_output(
cmd,
env=env,
cwd=str(repo_root)
)
|
Write test to confirm that makemessages works without needing postgresimport os
import pathlib
import subprocess
from django.conf import settings
from django.test import TestCase
class MakeMessagesCommandRunTestCase(TestCase):
"""
Sanity check to make sure makemessages runs to completion.
"""
def test_command_succeeds_without_postgres(self):
"""
Test that we can run makemessages when postgres is not activated.
"""
repo_root = pathlib.Path(settings.BASE_DIR).parent
cmd = ["make", "makemessages"]
env = os.environ.copy()
# We fake postgres not being available, by setting the wrong IP address.
# hopefully postgres isn't running at 127.0.0.2!
env.update({"DATA_DB_HOST": "127.0.0.2"})
subprocess.check_output(
cmd,
env=env,
cwd=str(repo_root)
)
|
<commit_before><commit_msg>Write test to confirm that makemessages works without needing postgres<commit_after>import os
import pathlib
import subprocess
from django.conf import settings
from django.test import TestCase
class MakeMessagesCommandRunTestCase(TestCase):
"""
Sanity check to make sure makemessages runs to completion.
"""
def test_command_succeeds_without_postgres(self):
"""
Test that we can run makemessages when postgres is not activated.
"""
repo_root = pathlib.Path(settings.BASE_DIR).parent
cmd = ["make", "makemessages"]
env = os.environ.copy()
# We fake postgres not being available, by setting the wrong IP address.
# hopefully postgres isn't running at 127.0.0.2!
env.update({"DATA_DB_HOST": "127.0.0.2"})
subprocess.check_output(
cmd,
env=env,
cwd=str(repo_root)
)
|
|
b3c60add1a75975b2e420747b545219ae954b1e2
|
sort-readlist.py
|
sort-readlist.py
|
#!/usr/bin/python
import logging
import re
import subprocess
import sys
import os
import calibre_config
from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import prefs
import args
TODO_DIR = os.path.join(os.environ['HOME'], 'Dropbox', 'todo')
ISSUE_PATTERN = re.compile('(\d+) (.*)$')
args.add_argument('--todobin', '-t', help='path to todo.txt script',
type=str, default='todo.sh')
args.add_argument('--todofile', '-f', help='path to todo.txt file',
type=str, default=os.path.join(TODO_DIR, 'todo.txt'))
args.add_argument('--verbose', '-v', help='Enable verbose logging',
action='store_true')
ARGS={}
def get_issues():
for line in open(ARGS.todofile, 'r'):
issueid = ISSUE_PATTERN.match(line)
if issueid:
yield int(issueid.group(1)), issueid.group(2)
def get_issue_details():
db = LibraryDatabase2(prefs['library_path'])
for issue, title in get_issues():
mi = db.get_metadata(issue, index_is_id=True)
yield mi.pubdate, title, issue
def main():
logger = logging.getLogger()
if ARGS.verbose:
logger.setLevel(logging.DEBUG)
# archive todo list
try:
output = subprocess.check_output([ARGS.todobin, 'archive'])
except CalledProcessError as e:
logging.error('Unable to archive old items: %s', e.output)
issues = sorted(get_issue_details())
# Write out sorted list
for pubdate, title, idx in issues:
print "%d %s" % (idx, title)
if __name__ == '__main__':
ARGS = args.parse_args()
main()
|
Sort to-read list by publication date
|
Sort to-read list by publication date
|
Python
|
mit
|
xchewtoyx/comicmgt,xchewtoyx/comicmgt
|
Sort to-read list by publication date
|
#!/usr/bin/python
import logging
import re
import subprocess
import sys
import os
import calibre_config
from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import prefs
import args
TODO_DIR = os.path.join(os.environ['HOME'], 'Dropbox', 'todo')
ISSUE_PATTERN = re.compile('(\d+) (.*)$')
args.add_argument('--todobin', '-t', help='path to todo.txt script',
type=str, default='todo.sh')
args.add_argument('--todofile', '-f', help='path to todo.txt file',
type=str, default=os.path.join(TODO_DIR, 'todo.txt'))
args.add_argument('--verbose', '-v', help='Enable verbose logging',
action='store_true')
ARGS={}
def get_issues():
for line in open(ARGS.todofile, 'r'):
issueid = ISSUE_PATTERN.match(line)
if issueid:
yield int(issueid.group(1)), issueid.group(2)
def get_issue_details():
db = LibraryDatabase2(prefs['library_path'])
for issue, title in get_issues():
mi = db.get_metadata(issue, index_is_id=True)
yield mi.pubdate, title, issue
def main():
logger = logging.getLogger()
if ARGS.verbose:
logger.setLevel(logging.DEBUG)
# archive todo list
try:
output = subprocess.check_output([ARGS.todobin, 'archive'])
except CalledProcessError as e:
logging.error('Unable to archive old items: %s', e.output)
issues = sorted(get_issue_details())
# Write out sorted list
for pubdate, title, idx in issues:
print "%d %s" % (idx, title)
if __name__ == '__main__':
ARGS = args.parse_args()
main()
|
<commit_before><commit_msg>Sort to-read list by publication date<commit_after>
|
#!/usr/bin/python
import logging
import re
import subprocess
import sys
import os
import calibre_config
from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import prefs
import args
TODO_DIR = os.path.join(os.environ['HOME'], 'Dropbox', 'todo')
ISSUE_PATTERN = re.compile('(\d+) (.*)$')
args.add_argument('--todobin', '-t', help='path to todo.txt script',
type=str, default='todo.sh')
args.add_argument('--todofile', '-f', help='path to todo.txt file',
type=str, default=os.path.join(TODO_DIR, 'todo.txt'))
args.add_argument('--verbose', '-v', help='Enable verbose logging',
action='store_true')
ARGS={}
def get_issues():
for line in open(ARGS.todofile, 'r'):
issueid = ISSUE_PATTERN.match(line)
if issueid:
yield int(issueid.group(1)), issueid.group(2)
def get_issue_details():
db = LibraryDatabase2(prefs['library_path'])
for issue, title in get_issues():
mi = db.get_metadata(issue, index_is_id=True)
yield mi.pubdate, title, issue
def main():
logger = logging.getLogger()
if ARGS.verbose:
logger.setLevel(logging.DEBUG)
# archive todo list
try:
output = subprocess.check_output([ARGS.todobin, 'archive'])
except CalledProcessError as e:
logging.error('Unable to archive old items: %s', e.output)
issues = sorted(get_issue_details())
# Write out sorted list
for pubdate, title, idx in issues:
print "%d %s" % (idx, title)
if __name__ == '__main__':
ARGS = args.parse_args()
main()
|
Sort to-read list by publication date#!/usr/bin/python
import logging
import re
import subprocess
import sys
import os
import calibre_config
from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import prefs
import args
TODO_DIR = os.path.join(os.environ['HOME'], 'Dropbox', 'todo')
ISSUE_PATTERN = re.compile('(\d+) (.*)$')
args.add_argument('--todobin', '-t', help='path to todo.txt script',
type=str, default='todo.sh')
args.add_argument('--todofile', '-f', help='path to todo.txt file',
type=str, default=os.path.join(TODO_DIR, 'todo.txt'))
args.add_argument('--verbose', '-v', help='Enable verbose logging',
action='store_true')
ARGS={}
def get_issues():
for line in open(ARGS.todofile, 'r'):
issueid = ISSUE_PATTERN.match(line)
if issueid:
yield int(issueid.group(1)), issueid.group(2)
def get_issue_details():
db = LibraryDatabase2(prefs['library_path'])
for issue, title in get_issues():
mi = db.get_metadata(issue, index_is_id=True)
yield mi.pubdate, title, issue
def main():
logger = logging.getLogger()
if ARGS.verbose:
logger.setLevel(logging.DEBUG)
# archive todo list
try:
output = subprocess.check_output([ARGS.todobin, 'archive'])
except CalledProcessError as e:
logging.error('Unable to archive old items: %s', e.output)
issues = sorted(get_issue_details())
# Write out sorted list
for pubdate, title, idx in issues:
print "%d %s" % (idx, title)
if __name__ == '__main__':
ARGS = args.parse_args()
main()
|
<commit_before><commit_msg>Sort to-read list by publication date<commit_after>#!/usr/bin/python
import logging
import re
import subprocess
import sys
import os
import calibre_config
from calibre.library.database2 import LibraryDatabase2
from calibre.utils.config import prefs
import args
TODO_DIR = os.path.join(os.environ['HOME'], 'Dropbox', 'todo')
ISSUE_PATTERN = re.compile('(\d+) (.*)$')
args.add_argument('--todobin', '-t', help='path to todo.txt script',
type=str, default='todo.sh')
args.add_argument('--todofile', '-f', help='path to todo.txt file',
type=str, default=os.path.join(TODO_DIR, 'todo.txt'))
args.add_argument('--verbose', '-v', help='Enable verbose logging',
action='store_true')
ARGS={}
def get_issues():
for line in open(ARGS.todofile, 'r'):
issueid = ISSUE_PATTERN.match(line)
if issueid:
yield int(issueid.group(1)), issueid.group(2)
def get_issue_details():
db = LibraryDatabase2(prefs['library_path'])
for issue, title in get_issues():
mi = db.get_metadata(issue, index_is_id=True)
yield mi.pubdate, title, issue
def main():
logger = logging.getLogger()
if ARGS.verbose:
logger.setLevel(logging.DEBUG)
# archive todo list
try:
output = subprocess.check_output([ARGS.todobin, 'archive'])
except CalledProcessError as e:
logging.error('Unable to archive old items: %s', e.output)
issues = sorted(get_issue_details())
# Write out sorted list
for pubdate, title, idx in issues:
print "%d %s" % (idx, title)
if __name__ == '__main__':
ARGS = args.parse_args()
main()
|
|
17f0b440acc0e67067704d93427c3b4ce7ef5d9e
|
common/lib/xmodule/xmodule/modulestore/mongoengine_fields.py
|
common/lib/xmodule/xmodule/modulestore/mongoengine_fields.py
|
"""
Custom field types for mongoengine
"""
import mongoengine
from xmodule.modulestore.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from xmodule.modulestore.keys import CourseKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, SlashSeparatedCourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, SlashSeparatedCourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, SlashSeparatedCourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, SlashSeparatedCourseKey))
if location is None:
return location
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, Location))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, Location))
if isinstance(value, Location):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
|
Make course ids and usage ids opaque to LMS and Studio [partial commit]
|
Make course ids and usage ids opaque to LMS and Studio [partial commit]
This commit adds custom mongoengine fields for CourseKeys and UsageKeys.
These keys are now objects with a limited interface, and the particular
internal representation is managed by the data storage layer (the
modulestore).
For the LMS, there should be no outward-facing changes to the system.
The keys are, for now, a change to internal representation only. For
Studio, the new serialized form of the keys is used in urls, to allow
for further migration in the future.
Co-Author: Andy Armstrong <andya@edx.org>
Co-Author: Christina Roberts <christina@edx.org>
Co-Author: David Baumgold <db@edx.org>
Co-Author: Diana Huang <dkh@edx.org>
Co-Author: Don Mitchell <dmitchell@edx.org>
Co-Author: Julia Hansbrough <julia@edx.org>
Co-Author: Nimisha Asthagiri <nasthagiri@edx.org>
Co-Author: Sarina Canelake <sarina@edx.org>
[LMS-2370]
|
Python
|
agpl-3.0
|
analyseuc3m/ANALYSE-v1,LearnEra/LearnEraPlaftform,jazkarta/edx-platform,dsajkl/123,zubair-arbi/edx-platform,cecep-edu/edx-platform,jamiefolsom/edx-platform,peterm-itr/edx-platform,proversity-org/edx-platform,ferabra/edx-platform,ESOedX/edx-platform,tiagochiavericosta/edx-platform,beacloudgenius/edx-platform,chand3040/cloud_that,beacloudgenius/edx-platform,dsajkl/123,andyzsf/edx,jamiefolsom/edx-platform,eemirtekin/edx-platform,unicri/edx-platform,Endika/edx-platform,dcosentino/edx-platform,chauhanhardik/populo_2,auferack08/edx-platform,dcosentino/edx-platform,jamiefolsom/edx-platform,cecep-edu/edx-platform,caesar2164/edx-platform,nanolearning/edx-platform,jonathan-beard/edx-platform,alu042/edx-platform,kmoocdev2/edx-platform,mcgachey/edx-platform,Softmotions/edx-platform,motion2015/edx-platform,jjmiranda/edx-platform,kmoocdev2/edx-platform,waheedahmed/edx-platform,B-MOOC/edx-platform,nanolearningllc/edx-platform-cypress,edx/edx-platform,eduNEXT/edx-platform,hastexo/edx-platform,shurihell/testasia,zhenzhai/edx-platform,Edraak/circleci-edx-platform,Endika/edx-platform,nttks/jenkins-test,polimediaupv/edx-platform,Unow/edx-platform,naresh21/synergetics-edx-platform,appliedx/edx-platform,cyanna/edx-platform,cognitiveclass/edx-platform,OmarIthawi/edx-platform,ovnicraft/edx-platform,dsajkl/123,Unow/edx-platform,antonve/s4-project-mooc,SravanthiSinha/edx-platform,analyseuc3m/ANALYSE-v1,xuxiao19910803/edx,ferabra/edx-platform,eduNEXT/edunext-platform,chauhanhardik/populo,shubhdev/edx-platform,alexthered/kienhoc-platform,nttks/jenkins-test,edry/edx-platform,eduNEXT/edunext-platform,zerobatu/edx-platform,etzhou/edx-platform,alexthered/kienhoc-platform,IndonesiaX/edx-platform,SravanthiSinha/edx-platform,jamesblunt/edx-platform,nttks/jenkins-test,fly19890211/edx-platform,edx/edx-platform,prarthitm/edxplatform,tanmaykm/edx-platform,utecuy/edx-platform,synergeticsedx/deployment-wipro,hmcmooc/muddx-platform,OmarIthawi/edx-platform,IONISx/edx-platform,procangroup/edx-platform,JCBarahona/edX,ahmadiga/min_edx,DNFcode/edx-platform,mcgachey/edx-platform,bdero/edx-platform,itsjeyd/edx-platform,jazztpt/edx-platform,jamesblunt/edx-platform,mbareta/edx-platform-ft,caesar2164/edx-platform,J861449197/edx-platform,longmen21/edx-platform,Semi-global/edx-platform,ferabra/edx-platform,rismalrv/edx-platform,iivic/BoiseStateX,nanolearningllc/edx-platform-cypress-2,UOMx/edx-platform,bitifirefly/edx-platform,msegado/edx-platform,jazkarta/edx-platform,eestay/edx-platform,pabloborrego93/edx-platform,MSOpenTech/edx-platform,appsembler/edx-platform,iivic/BoiseStateX,utecuy/edx-platform,xuxiao19910803/edx-platform,auferack08/edx-platform,solashirai/edx-platform,devs1991/test_edx_docmode,jazkarta/edx-platform-for-isc,romain-li/edx-platform,jswope00/griffinx,vikas1885/test1,raccoongang/edx-platform,ahmadio/edx-platform,nanolearningllc/edx-platform-cypress-2,antonve/s4-project-mooc,adoosii/edx-platform,rue89-tech/edx-platform,MSOpenTech/edx-platform,DefyVentures/edx-platform,amir-qayyum-khan/edx-platform,dcosentino/edx-platform,stvstnfrd/edx-platform,iivic/BoiseStateX,miptliot/edx-platform,SravanthiSinha/edx-platform,halvertoluke/edx-platform,zerobatu/edx-platform,alu042/edx-platform,eestay/edx-platform,vismartltd/edx-platform,antoviaque/edx-platform,nagyistoce/edx-platform,kamalx/edx-platform,mjirayu/sit_academy,xingyepei/edx-platform,teltek/edx-platform,alexthered/kienhoc-platform,hamzehd/edx-platform,unicri/edx-platform,pabloborrego93/edx-platform,TeachAtTUM/edx-platform,sameetb-cuelogic/edx-platform-test,philanthropy-u/edx-platform,10clouds/edx-platform,appliedx/edx-platform,ampax/edx-platform,deepsrijit1105/edx-platform,chand3040/cloud_that,mtlchun/edx,wwj718/ANALYSE,inares/edx-platform,unicri/edx-platform,mbareta/edx-platform-ft,y12uc231/edx-platform,teltek/edx-platform,fintech-circle/edx-platform,vikas1885/test1,caesar2164/edx-platform,cpennington/edx-platform,arbrandes/edx-platform,CredoReference/edx-platform,mitocw/edx-platform,chauhanhardik/populo,4eek/edx-platform,carsongee/edx-platform,openfun/edx-platform,tiagochiavericosta/edx-platform,BehavioralInsightsTeam/edx-platform,dkarakats/edx-platform,romain-li/edx-platform,shubhdev/openedx,playm2mboy/edx-platform,Livit/Livit.Learn.EdX,Edraak/circleci-edx-platform,cognitiveclass/edx-platform,miptliot/edx-platform,ak2703/edx-platform,Softmotions/edx-platform,Unow/edx-platform,edx-solutions/edx-platform,solashirai/edx-platform,chudaol/edx-platform,dsajkl/reqiop,gsehub/edx-platform,fly19890211/edx-platform,eduNEXT/edx-platform,Livit/Livit.Learn.EdX,pabloborrego93/edx-platform,nanolearning/edx-platform,hastexo/edx-platform,kxliugang/edx-platform,nikolas/edx-platform,shubhdev/openedx,pabloborrego93/edx-platform,simbs/edx-platform,mtlchun/edx,nanolearningllc/edx-platform-cypress,mjirayu/sit_academy,edx-solutions/edx-platform,gymnasium/edx-platform,philanthropy-u/edx-platform,hamzehd/edx-platform,knehez/edx-platform,MakeHer/edx-platform,hmcmooc/muddx-platform,fly19890211/edx-platform,amir-qayyum-khan/edx-platform,ZLLab-Mooc/edx-platform,xuxiao19910803/edx,synergeticsedx/deployment-wipro,cselis86/edx-platform,eduNEXT/edunext-platform,don-github/edx-platform,halvertoluke/edx-platform,AkA84/edx-platform,JioEducation/edx-platform,lduarte1991/edx-platform,kmoocdev/edx-platform,arifsetiawan/edx-platform,zerobatu/edx-platform,J861449197/edx-platform,JioEducation/edx-platform,inares/edx-platform,dcosentino/edx-platform,B-MOOC/edx-platform,nagyistoce/edx-platform,Lektorium-LLC/edx-platform,Edraak/circleci-edx-platform,J861449197/edx-platform,peterm-itr/edx-platform,ferabra/edx-platform,mahendra-r/edx-platform,naresh21/synergetics-edx-platform,nikolas/edx-platform,nanolearningllc/edx-platform-cypress-2,eduNEXT/edunext-platform,zerobatu/edx-platform,ubc/edx-platform,eduNEXT/edx-platform,RPI-OPENEDX/edx-platform,stvstnfrd/edx-platform,dcosentino/edx-platform,andyzsf/edx,chudaol/edx-platform,valtech-mooc/edx-platform,prarthitm/edxplatform,jswope00/griffinx,utecuy/edx-platform,shashank971/edx-platform,nanolearningllc/edx-platform-cypress,BehavioralInsightsTeam/edx-platform,msegado/edx-platform,tanmaykm/edx-platform,adoosii/edx-platform,IONISx/edx-platform,zhenzhai/edx-platform,nttks/edx-platform,pomegranited/edx-platform,4eek/edx-platform,jazkarta/edx-platform-for-isc,IONISx/edx-platform,mbareta/edx-platform-ft,rhndg/openedx,openfun/edx-platform,Softmotions/edx-platform,carsongee/edx-platform,nanolearning/edx-platform,Edraak/edx-platform,Edraak/edraak-platform,iivic/BoiseStateX,devs1991/test_edx_docmode,pepeportela/edx-platform,benpatterson/edx-platform,amir-qayyum-khan/edx-platform,deepsrijit1105/edx-platform,procangroup/edx-platform,hamzehd/edx-platform,UXE/local-edx,ESOedX/edx-platform,doismellburning/edx-platform,inares/edx-platform,Stanford-Online/edx-platform,jonathan-beard/edx-platform,shubhdev/edxOnBaadal,appliedx/edx-platform,Unow/edx-platform,morenopc/edx-platform,ZLLab-Mooc/edx-platform,atsolakid/edx-platform,Stanford-Online/edx-platform,angelapper/edx-platform,eemirtekin/edx-platform,beni55/edx-platform,zofuthan/edx-platform,JCBarahona/edX,jzoldak/edx-platform,Shrhawk/edx-platform,longmen21/edx-platform,jelugbo/tundex,abdoosh00/edraak,etzhou/edx-platform,kxliugang/edx-platform,leansoft/edx-platform,ubc/edx-platform,eestay/edx-platform,caesar2164/edx-platform,jelugbo/tundex,jolyonb/edx-platform,LICEF/edx-platform,jazztpt/edx-platform,jolyonb/edx-platform,jazkarta/edx-platform-for-isc,adoosii/edx-platform,arifsetiawan/edx-platform,proversity-org/edx-platform,devs1991/test_edx_docmode,UXE/local-edx,prarthitm/edxplatform,doismellburning/edx-platform,cselis86/edx-platform,proversity-org/edx-platform,mushtaqak/edx-platform,TeachAtTUM/edx-platform,IONISx/edx-platform,rhndg/openedx,zadgroup/edx-platform,y12uc231/edx-platform,playm2mboy/edx-platform,shabab12/edx-platform,edry/edx-platform,CredoReference/edx-platform,miptliot/edx-platform,EDUlib/edx-platform,Edraak/circleci-edx-platform,olexiim/edx-platform,jjmiranda/edx-platform,mahendra-r/edx-platform,TeachAtTUM/edx-platform,arbrandes/edx-platform,shubhdev/edx-platform,shubhdev/openedx,gsehub/edx-platform,Kalyzee/edx-platform,RPI-OPENEDX/edx-platform,sudheerchintala/LearnEraPlatForm,fly19890211/edx-platform,bitifirefly/edx-platform,zubair-arbi/edx-platform,jonathan-beard/edx-platform,kamalx/edx-platform,Semi-global/edx-platform,CredoReference/edx-platform,MSOpenTech/edx-platform,ampax/edx-platform,vasyarv/edx-platform,beacloudgenius/edx-platform,mitocw/edx-platform,zofuthan/edx-platform,cpennington/edx-platform,fintech-circle/edx-platform,beni55/edx-platform,cecep-edu/edx-platform,EDUlib/edx-platform,4eek/edx-platform,hamzehd/edx-platform,martynovp/edx-platform,franosincic/edx-platform,MakeHer/edx-platform,lduarte1991/edx-platform,polimediaupv/edx-platform,Edraak/edraak-platform,auferack08/edx-platform,Semi-global/edx-platform,hamzehd/edx-platform,raccoongang/edx-platform,EDUlib/edx-platform,raccoongang/edx-platform,stvstnfrd/edx-platform,openfun/edx-platform,playm2mboy/edx-platform,cselis86/edx-platform,pepeportela/edx-platform,chauhanhardik/populo_2,nanolearning/edx-platform,JioEducation/edx-platform,playm2mboy/edx-platform,marcore/edx-platform,xingyepei/edx-platform,jelugbo/tundex,UOMx/edx-platform,B-MOOC/edx-platform,cecep-edu/edx-platform,Semi-global/edx-platform,JCBarahona/edX,bigdatauniversity/edx-platform,mushtaqak/edx-platform,benpatterson/edx-platform,rismalrv/edx-platform,Endika/edx-platform,vismartltd/edx-platform,kmoocdev2/edx-platform,jelugbo/tundex,mushtaqak/edx-platform,jonathan-beard/edx-platform,kxliugang/edx-platform,xuxiao19910803/edx-platform,valtech-mooc/edx-platform,chudaol/edx-platform,jbassen/edx-platform,jazztpt/edx-platform,leansoft/edx-platform,bdero/edx-platform,utecuy/edx-platform,beni55/edx-platform,waheedahmed/edx-platform,mitocw/edx-platform,kmoocdev/edx-platform,Stanford-Online/edx-platform,doismellburning/edx-platform,valtech-mooc/edx-platform,shurihell/testasia,morenopc/edx-platform,OmarIthawi/edx-platform,nanolearningllc/edx-platform-cypress,kursitet/edx-platform,mahendra-r/edx-platform,ampax/edx-platform-backup,cognitiveclass/edx-platform,mbareta/edx-platform-ft,auferack08/edx-platform,jruiperezv/ANALYSE,cselis86/edx-platform,antonve/s4-project-mooc,marcore/edx-platform,vasyarv/edx-platform,ZLLab-Mooc/edx-platform,chrisndodge/edx-platform,cognitiveclass/edx-platform,peterm-itr/edx-platform,mjirayu/sit_academy,UXE/local-edx,angelapper/edx-platform,jolyonb/edx-platform,adoosii/edx-platform,mtlchun/edx,nagyistoce/edx-platform,doismellburning/edx-platform,rismalrv/edx-platform,zadgroup/edx-platform,Ayub-Khan/edx-platform,shurihell/testasia,cselis86/edx-platform,stvstnfrd/edx-platform,kamalx/edx-platform,pomegranited/edx-platform,alexthered/kienhoc-platform,nttks/edx-platform,devs1991/test_edx_docmode,chrisndodge/edx-platform,halvertoluke/edx-platform,proversity-org/edx-platform,jjmiranda/edx-platform,Stanford-Online/edx-platform,olexiim/edx-platform,jzoldak/edx-platform,abdoosh00/edraak,ferabra/edx-platform,lduarte1991/edx-platform,doismellburning/edx-platform,rismalrv/edx-platform,tiagochiavericosta/edx-platform,xinjiguaike/edx-platform,polimediaupv/edx-platform,bitifirefly/edx-platform,carsongee/edx-platform,y12uc231/edx-platform,franosincic/edx-platform,carsongee/edx-platform,ak2703/edx-platform,nttks/jenkins-test,defance/edx-platform,Edraak/edx-platform,naresh21/synergetics-edx-platform,halvertoluke/edx-platform,ESOedX/edx-platform,cyanna/edx-platform,simbs/edx-platform,shashank971/edx-platform,jruiperezv/ANALYSE,doganov/edx-platform,morenopc/edx-platform,kmoocdev/edx-platform,xinjiguaike/edx-platform,bigdatauniversity/edx-platform,synergeticsedx/deployment-wipro,edry/edx-platform,eemirtekin/edx-platform,edx-solutions/edx-platform,kursitet/edx-platform,Lektorium-LLC/edx-platform,cecep-edu/edx-platform,msegado/edx-platform,philanthropy-u/edx-platform,zhenzhai/edx-platform,shubhdev/edxOnBaadal,jbassen/edx-platform,pomegranited/edx-platform,philanthropy-u/edx-platform,chudaol/edx-platform,antoviaque/edx-platform,jamesblunt/edx-platform,analyseuc3m/ANALYSE-v1,appsembler/edx-platform,kursitet/edx-platform,beni55/edx-platform,ovnicraft/edx-platform,marcore/edx-platform,devs1991/test_edx_docmode,eestay/edx-platform,DNFcode/edx-platform,cyanna/edx-platform,xinjiguaike/edx-platform,olexiim/edx-platform,Edraak/edx-platform,motion2015/a3,nttks/edx-platform,romain-li/edx-platform,J861449197/edx-platform,jbzdak/edx-platform,ampax/edx-platform,Lektorium-LLC/edx-platform,zhenzhai/edx-platform,chauhanhardik/populo,Ayub-Khan/edx-platform,sameetb-cuelogic/edx-platform-test,motion2015/a3,unicri/edx-platform,angelapper/edx-platform,ahmedaljazzar/edx-platform,Edraak/edx-platform,CourseTalk/edx-platform,xuxiao19910803/edx,polimediaupv/edx-platform,louyihua/edx-platform,ZLLab-Mooc/edx-platform,Livit/Livit.Learn.EdX,BehavioralInsightsTeam/edx-platform,wwj718/ANALYSE,SivilTaram/edx-platform,procangroup/edx-platform,ampax/edx-platform-backup,kursitet/edx-platform,sameetb-cuelogic/edx-platform-test,benpatterson/edx-platform,B-MOOC/edx-platform,ahmadiga/min_edx,atsolakid/edx-platform,knehez/edx-platform,dsajkl/123,UOMx/edx-platform,gymnasium/edx-platform,jbassen/edx-platform,chand3040/cloud_that,jazkarta/edx-platform,don-github/edx-platform,ubc/edx-platform,ubc/edx-platform,jswope00/griffinx,dsajkl/reqiop,shubhdev/edxOnBaadal,jamiefolsom/edx-platform,shubhdev/edxOnBaadal,naresh21/synergetics-edx-platform,rue89-tech/edx-platform,andyzsf/edx,ahmadio/edx-platform,IONISx/edx-platform,analyseuc3m/ANALYSE-v1,hastexo/edx-platform,fly19890211/edx-platform,arbrandes/edx-platform,ampax/edx-platform-backup,10clouds/edx-platform,peterm-itr/edx-platform,eestay/edx-platform,Edraak/edraak-platform,IndonesiaX/edx-platform,Edraak/circleci-edx-platform,rhndg/openedx,vikas1885/test1,prarthitm/edxplatform,a-parhom/edx-platform,franosincic/edx-platform,jruiperezv/ANALYSE,chudaol/edx-platform,motion2015/a3,vasyarv/edx-platform,amir-qayyum-khan/edx-platform,valtech-mooc/edx-platform,xingyepei/edx-platform,deepsrijit1105/edx-platform,inares/edx-platform,xuxiao19910803/edx,mahendra-r/edx-platform,Livit/Livit.Learn.EdX,B-MOOC/edx-platform,appliedx/edx-platform,nttks/edx-platform,antonve/s4-project-mooc,dsajkl/123,beacloudgenius/edx-platform,IndonesiaX/edx-platform,sameetb-cuelogic/edx-platform-test,defance/edx-platform,AkA84/edx-platform,shubhdev/openedx,chauhanhardik/populo_2,Kalyzee/edx-platform,kmoocdev2/edx-platform,Edraak/edraak-platform,jonathan-beard/edx-platform,jruiperezv/ANALYSE,cpennington/edx-platform,IndonesiaX/edx-platform,unicri/edx-platform,vasyarv/edx-platform,zerobatu/edx-platform,knehez/edx-platform,LearnEra/LearnEraPlaftform,nttks/edx-platform,simbs/edx-platform,doganov/edx-platform,wwj718/edx-platform,rhndg/openedx,jbassen/edx-platform,polimediaupv/edx-platform,kamalx/edx-platform,cyanna/edx-platform,morenopc/edx-platform,LearnEra/LearnEraPlaftform,openfun/edx-platform,SivilTaram/edx-platform,zadgroup/edx-platform,msegado/edx-platform,10clouds/edx-platform,Lektorium-LLC/edx-platform,itsjeyd/edx-platform,gymnasium/edx-platform,ak2703/edx-platform,mushtaqak/edx-platform,arbrandes/edx-platform,mtlchun/edx,rue89-tech/edx-platform,defance/edx-platform,longmen21/edx-platform,ahmadiga/min_edx,jazztpt/edx-platform,Softmotions/edx-platform,atsolakid/edx-platform,louyihua/edx-platform,wwj718/ANALYSE,dkarakats/edx-platform,BehavioralInsightsTeam/edx-platform,cpennington/edx-platform,waheedahmed/edx-platform,gsehub/edx-platform,nanolearning/edx-platform,jruiperezv/ANALYSE,antoviaque/edx-platform,shashank971/edx-platform,nanolearningllc/edx-platform-cypress-2,kursitet/edx-platform,4eek/edx-platform,ubc/edx-platform,CourseTalk/edx-platform,jbzdak/edx-platform,LICEF/edx-platform,etzhou/edx-platform,waheedahmed/edx-platform,chauhanhardik/populo_2,lduarte1991/edx-platform,sameetb-cuelogic/edx-platform-test,xinjiguaike/edx-platform,JCBarahona/edX,MakeHer/edx-platform,wwj718/edx-platform,pepeportela/edx-platform,kamalx/edx-platform,motion2015/edx-platform,chauhanhardik/populo,tanmaykm/edx-platform,teltek/edx-platform,halvertoluke/edx-platform,EDUlib/edx-platform,dkarakats/edx-platform,mtlchun/edx,cognitiveclass/edx-platform,tiagochiavericosta/edx-platform,SravanthiSinha/edx-platform,chrisndodge/edx-platform,kmoocdev/edx-platform,morenopc/edx-platform,nanolearningllc/edx-platform-cypress,jzoldak/edx-platform,AkA84/edx-platform,solashirai/edx-platform,wwj718/ANALYSE,motion2015/a3,itsjeyd/edx-platform,10clouds/edx-platform,martynovp/edx-platform,longmen21/edx-platform,Shrhawk/edx-platform,simbs/edx-platform,J861449197/edx-platform,solashirai/edx-platform,nttks/jenkins-test,tiagochiavericosta/edx-platform,shabab12/edx-platform,jazkarta/edx-platform,OmarIthawi/edx-platform,bigdatauniversity/edx-platform,longmen21/edx-platform,Semi-global/edx-platform,nikolas/edx-platform,raccoongang/edx-platform,mahendra-r/edx-platform,atsolakid/edx-platform,Edraak/edx-platform,martynovp/edx-platform,devs1991/test_edx_docmode,bigdatauniversity/edx-platform,DefyVentures/edx-platform,kmoocdev2/edx-platform,bigdatauniversity/edx-platform,Ayub-Khan/edx-platform,jolyonb/edx-platform,DefyVentures/edx-platform,AkA84/edx-platform,bdero/edx-platform,ampax/edx-platform,UOMx/edx-platform,Kalyzee/edx-platform,mcgachey/edx-platform,y12uc231/edx-platform,gymnasium/edx-platform,ovnicraft/edx-platform,vismartltd/edx-platform,arifsetiawan/edx-platform,louyihua/edx-platform,MSOpenTech/edx-platform,shashank971/edx-platform,fintech-circle/edx-platform,Shrhawk/edx-platform,adoosii/edx-platform,xuxiao19910803/edx-platform,deepsrijit1105/edx-platform,zofuthan/edx-platform,chand3040/cloud_that,solashirai/edx-platform,UXE/local-edx,doganov/edx-platform,devs1991/test_edx_docmode,chauhanhardik/populo,jazkarta/edx-platform-for-isc,xuxiao19910803/edx,Softmotions/edx-platform,appsembler/edx-platform,don-github/edx-platform,kxliugang/edx-platform,playm2mboy/edx-platform,louyihua/edx-platform,JCBarahona/edX,xuxiao19910803/edx-platform,pepeportela/edx-platform,DefyVentures/edx-platform,beacloudgenius/edx-platform,vismartltd/edx-platform,Kalyzee/edx-platform,shurihell/testasia,ak2703/edx-platform,xingyepei/edx-platform,motion2015/edx-platform,inares/edx-platform,a-parhom/edx-platform,procangroup/edx-platform,appliedx/edx-platform,dkarakats/edx-platform,knehez/edx-platform,edx/edx-platform,jazkarta/edx-platform-for-isc,vikas1885/test1,franosincic/edx-platform,teltek/edx-platform,franosincic/edx-platform,jzoldak/edx-platform,CredoReference/edx-platform,chrisndodge/edx-platform,sudheerchintala/LearnEraPlatForm,jazkarta/edx-platform,motion2015/a3,RPI-OPENEDX/edx-platform,sudheerchintala/LearnEraPlatForm,jazztpt/edx-platform,zofuthan/edx-platform,wwj718/edx-platform,atsolakid/edx-platform,jbzdak/edx-platform,RPI-OPENEDX/edx-platform,edry/edx-platform,nanolearningllc/edx-platform-cypress-2,dsajkl/reqiop,olexiim/edx-platform,zofuthan/edx-platform,simbs/edx-platform,rue89-tech/edx-platform,shubhdev/edx-platform,jjmiranda/edx-platform,arifsetiawan/edx-platform,edx-solutions/edx-platform,jamesblunt/edx-platform,motion2015/edx-platform,zadgroup/edx-platform,abdoosh00/edraak,chand3040/cloud_that,IndonesiaX/edx-platform,olexiim/edx-platform,SivilTaram/edx-platform,hastexo/edx-platform,zhenzhai/edx-platform,arifsetiawan/edx-platform,JioEducation/edx-platform,Shrhawk/edx-platform,jbassen/edx-platform,bitifirefly/edx-platform,leansoft/edx-platform,CourseTalk/edx-platform,waheedahmed/edx-platform,martynovp/edx-platform,eemirtekin/edx-platform,alu042/edx-platform,mjirayu/sit_academy,a-parhom/edx-platform,cyanna/edx-platform,leansoft/edx-platform,LICEF/edx-platform,4eek/edx-platform,eduNEXT/edx-platform,RPI-OPENEDX/edx-platform,ahmadio/edx-platform,angelapper/edx-platform,andyzsf/edx,shabab12/edx-platform,jamiefolsom/edx-platform,zubair-arbi/edx-platform,nikolas/edx-platform,ahmedaljazzar/edx-platform,knehez/edx-platform,chauhanhardik/populo_2,MakeHer/edx-platform,beni55/edx-platform,utecuy/edx-platform,SivilTaram/edx-platform,mitocw/edx-platform,jbzdak/edx-platform,DefyVentures/edx-platform,dkarakats/edx-platform,miptliot/edx-platform,CourseTalk/edx-platform,vasyarv/edx-platform,bitifirefly/edx-platform,LICEF/edx-platform,vikas1885/test1,xinjiguaike/edx-platform,jswope00/griffinx,wwj718/edx-platform,eemirtekin/edx-platform,dsajkl/reqiop,wwj718/ANALYSE,ESOedX/edx-platform,Ayub-Khan/edx-platform,leansoft/edx-platform,don-github/edx-platform,appsembler/edx-platform,Kalyzee/edx-platform,ovnicraft/edx-platform,ak2703/edx-platform,mcgachey/edx-platform,iivic/BoiseStateX,zubair-arbi/edx-platform,valtech-mooc/edx-platform,shabab12/edx-platform,ahmadio/edx-platform,MSOpenTech/edx-platform,AkA84/edx-platform,alu042/edx-platform,rismalrv/edx-platform,xingyepei/edx-platform,ahmadiga/min_edx,hmcmooc/muddx-platform,mushtaqak/edx-platform,don-github/edx-platform,gsehub/edx-platform,shubhdev/edx-platform,doganov/edx-platform,nagyistoce/edx-platform,Ayub-Khan/edx-platform,devs1991/test_edx_docmode,kxliugang/edx-platform,romain-li/edx-platform,hmcmooc/muddx-platform,antoviaque/edx-platform,SravanthiSinha/edx-platform,a-parhom/edx-platform,nagyistoce/edx-platform,nikolas/edx-platform,ahmedaljazzar/edx-platform,DNFcode/edx-platform,marcore/edx-platform,benpatterson/edx-platform,msegado/edx-platform,LearnEra/LearnEraPlaftform,zadgroup/edx-platform,ahmadio/edx-platform,fintech-circle/edx-platform,pomegranited/edx-platform,SivilTaram/edx-platform,Shrhawk/edx-platform,benpatterson/edx-platform,defance/edx-platform,shubhdev/edxOnBaadal,martynovp/edx-platform,jbzdak/edx-platform,tanmaykm/edx-platform,shurihell/testasia,mcgachey/edx-platform,kmoocdev/edx-platform,pomegranited/edx-platform,zubair-arbi/edx-platform,etzhou/edx-platform,jamesblunt/edx-platform,doganov/edx-platform,jelugbo/tundex,etzhou/edx-platform,openfun/edx-platform,edry/edx-platform,ampax/edx-platform-backup,romain-li/edx-platform,xuxiao19910803/edx-platform,MakeHer/edx-platform,TeachAtTUM/edx-platform,antonve/s4-project-mooc,vismartltd/edx-platform,LICEF/edx-platform,synergeticsedx/deployment-wipro,alexthered/kienhoc-platform,abdoosh00/edraak,sudheerchintala/LearnEraPlatForm,ovnicraft/edx-platform,mjirayu/sit_academy,edx/edx-platform,shubhdev/openedx,ampax/edx-platform-backup,DNFcode/edx-platform,bdero/edx-platform,wwj718/edx-platform,jswope00/griffinx,ahmedaljazzar/edx-platform,shashank971/edx-platform,shubhdev/edx-platform,ahmadiga/min_edx,ZLLab-Mooc/edx-platform,DNFcode/edx-platform,motion2015/edx-platform,rhndg/openedx,rue89-tech/edx-platform,Endika/edx-platform,itsjeyd/edx-platform,y12uc231/edx-platform
|
Make course ids and usage ids opaque to LMS and Studio [partial commit]
This commit adds custom mongoengine fields for CourseKeys and UsageKeys.
These keys are now objects with a limited interface, and the particular
internal representation is managed by the data storage layer (the
modulestore).
For the LMS, there should be no outward-facing changes to the system.
The keys are, for now, a change to internal representation only. For
Studio, the new serialized form of the keys is used in urls, to allow
for further migration in the future.
Co-Author: Andy Armstrong <andya@edx.org>
Co-Author: Christina Roberts <christina@edx.org>
Co-Author: David Baumgold <db@edx.org>
Co-Author: Diana Huang <dkh@edx.org>
Co-Author: Don Mitchell <dmitchell@edx.org>
Co-Author: Julia Hansbrough <julia@edx.org>
Co-Author: Nimisha Asthagiri <nasthagiri@edx.org>
Co-Author: Sarina Canelake <sarina@edx.org>
[LMS-2370]
|
"""
Custom field types for mongoengine
"""
import mongoengine
from xmodule.modulestore.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from xmodule.modulestore.keys import CourseKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, SlashSeparatedCourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, SlashSeparatedCourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, SlashSeparatedCourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, SlashSeparatedCourseKey))
if location is None:
return location
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, Location))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, Location))
if isinstance(value, Location):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
|
<commit_before><commit_msg>Make course ids and usage ids opaque to LMS and Studio [partial commit]
This commit adds custom mongoengine fields for CourseKeys and UsageKeys.
These keys are now objects with a limited interface, and the particular
internal representation is managed by the data storage layer (the
modulestore).
For the LMS, there should be no outward-facing changes to the system.
The keys are, for now, a change to internal representation only. For
Studio, the new serialized form of the keys is used in urls, to allow
for further migration in the future.
Co-Author: Andy Armstrong <andya@edx.org>
Co-Author: Christina Roberts <christina@edx.org>
Co-Author: David Baumgold <db@edx.org>
Co-Author: Diana Huang <dkh@edx.org>
Co-Author: Don Mitchell <dmitchell@edx.org>
Co-Author: Julia Hansbrough <julia@edx.org>
Co-Author: Nimisha Asthagiri <nasthagiri@edx.org>
Co-Author: Sarina Canelake <sarina@edx.org>
[LMS-2370]<commit_after>
|
"""
Custom field types for mongoengine
"""
import mongoengine
from xmodule.modulestore.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from xmodule.modulestore.keys import CourseKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, SlashSeparatedCourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, SlashSeparatedCourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, SlashSeparatedCourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, SlashSeparatedCourseKey))
if location is None:
return location
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, Location))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, Location))
if isinstance(value, Location):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
|
Make course ids and usage ids opaque to LMS and Studio [partial commit]
This commit adds custom mongoengine fields for CourseKeys and UsageKeys.
These keys are now objects with a limited interface, and the particular
internal representation is managed by the data storage layer (the
modulestore).
For the LMS, there should be no outward-facing changes to the system.
The keys are, for now, a change to internal representation only. For
Studio, the new serialized form of the keys is used in urls, to allow
for further migration in the future.
Co-Author: Andy Armstrong <andya@edx.org>
Co-Author: Christina Roberts <christina@edx.org>
Co-Author: David Baumgold <db@edx.org>
Co-Author: Diana Huang <dkh@edx.org>
Co-Author: Don Mitchell <dmitchell@edx.org>
Co-Author: Julia Hansbrough <julia@edx.org>
Co-Author: Nimisha Asthagiri <nasthagiri@edx.org>
Co-Author: Sarina Canelake <sarina@edx.org>
[LMS-2370]"""
Custom field types for mongoengine
"""
import mongoengine
from xmodule.modulestore.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from xmodule.modulestore.keys import CourseKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, SlashSeparatedCourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, SlashSeparatedCourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, SlashSeparatedCourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, SlashSeparatedCourseKey))
if location is None:
return location
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, Location))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, Location))
if isinstance(value, Location):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
|
<commit_before><commit_msg>Make course ids and usage ids opaque to LMS and Studio [partial commit]
This commit adds custom mongoengine fields for CourseKeys and UsageKeys.
These keys are now objects with a limited interface, and the particular
internal representation is managed by the data storage layer (the
modulestore).
For the LMS, there should be no outward-facing changes to the system.
The keys are, for now, a change to internal representation only. For
Studio, the new serialized form of the keys is used in urls, to allow
for further migration in the future.
Co-Author: Andy Armstrong <andya@edx.org>
Co-Author: Christina Roberts <christina@edx.org>
Co-Author: David Baumgold <db@edx.org>
Co-Author: Diana Huang <dkh@edx.org>
Co-Author: Don Mitchell <dmitchell@edx.org>
Co-Author: Julia Hansbrough <julia@edx.org>
Co-Author: Nimisha Asthagiri <nasthagiri@edx.org>
Co-Author: Sarina Canelake <sarina@edx.org>
[LMS-2370]<commit_after>"""
Custom field types for mongoengine
"""
import mongoengine
from xmodule.modulestore.locations import SlashSeparatedCourseKey, Location
from types import NoneType
from xmodule.modulestore.keys import CourseKey
class CourseKeyField(mongoengine.StringField):
"""
Serializes and deserializes CourseKey's to mongo dbs which use mongoengine
"""
def __init__(self, **kwargs):
# it'd be useful to add init args such as support_deprecated, force_deprecated
super(CourseKeyField, self).__init__(**kwargs)
def to_mongo(self, course_key):
"""
For now saves the course key in the deprecated form
"""
assert isinstance(course_key, (NoneType, SlashSeparatedCourseKey))
if course_key:
# don't call super as base.BaseField.to_mongo calls to_python() for some odd reason
return course_key.to_deprecated_string()
else:
return None
def to_python(self, course_key):
"""
Deserialize to a CourseKey instance
"""
# calling super b/c it decodes utf (and doesn't have circularity of from_python)
course_key = super(CourseKeyField, self).to_python(course_key)
assert isinstance(course_key, (NoneType, basestring, SlashSeparatedCourseKey))
if course_key == '':
return None
if isinstance(course_key, basestring):
return SlashSeparatedCourseKey.from_deprecated_string(course_key)
else:
return course_key
def validate(self, value):
assert isinstance(value, (NoneType, basestring, SlashSeparatedCourseKey))
if isinstance(value, CourseKey):
return super(CourseKeyField, self).validate(value.to_deprecated_string())
else:
return super(CourseKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
class UsageKeyField(mongoengine.StringField):
"""
Represent a UsageKey as a single string in Mongo
"""
def to_mongo(self, location):
"""
For now saves the usage key in the deprecated location i4x/c4x form
"""
assert isinstance(location, (NoneType, SlashSeparatedCourseKey))
if location is None:
return location
return super(UsageKeyField, self).to_mongo(location.to_deprecated_string())
def to_python(self, location):
"""
Deserialize to a UsageKey instance: for now it's a location missing the run
"""
assert isinstance(location, (NoneType, basestring, Location))
if location == '':
return None
if isinstance(location, basestring):
location = super(UsageKeyField, self).to_python(location)
return Location.from_deprecated_string(location)
else:
return location
def validate(self, value):
assert isinstance(value, (NoneType, basestring, Location))
if isinstance(value, Location):
return super(UsageKeyField, self).validate(value.to_deprecated_string())
else:
return super(UsageKeyField, self).validate(value)
def prepare_query_value(self, _opt, value):
return self.to_mongo(value)
|
|
3c07cf2efb8bddedcbba814e97229df99fcd44cf
|
ditto/twitter/migrations/0055_re_save_tweets_for_new_html.py
|
ditto/twitter/migrations/0055_re_save_tweets_for_new_html.py
|
# Generated by Django 2.0.4 on 2018-04-16 11:44
from django.db import migrations
def re_save_tweets(apps, schema_editor):
"""
Re-save all of the Tweets so that the HTML version of their text is updated
using the newer version of Twython.
"""
Tweet = apps.get_model('twitter', 'Tweet')
for tweet in Tweet.objects.all():
tweet.save(update_fields=['text_html'])
class Migration(migrations.Migration):
dependencies = [
('twitter', '0054_auto_20171113_1001'),
]
operations = [
migrations.RunPython(re_save_tweets),
]
|
Add migration to re-save all Tweets
|
Add migration to re-save all Tweets
So that the HTML version of their text uses the newer version of Twython
which fixes some issues with entities in tweets.
|
Python
|
mit
|
philgyford/django-ditto,philgyford/django-ditto,philgyford/django-ditto
|
Add migration to re-save all Tweets
So that the HTML version of their text uses the newer version of Twython
which fixes some issues with entities in tweets.
|
# Generated by Django 2.0.4 on 2018-04-16 11:44
from django.db import migrations
def re_save_tweets(apps, schema_editor):
"""
Re-save all of the Tweets so that the HTML version of their text is updated
using the newer version of Twython.
"""
Tweet = apps.get_model('twitter', 'Tweet')
for tweet in Tweet.objects.all():
tweet.save(update_fields=['text_html'])
class Migration(migrations.Migration):
dependencies = [
('twitter', '0054_auto_20171113_1001'),
]
operations = [
migrations.RunPython(re_save_tweets),
]
|
<commit_before><commit_msg>Add migration to re-save all Tweets
So that the HTML version of their text uses the newer version of Twython
which fixes some issues with entities in tweets.<commit_after>
|
# Generated by Django 2.0.4 on 2018-04-16 11:44
from django.db import migrations
def re_save_tweets(apps, schema_editor):
"""
Re-save all of the Tweets so that the HTML version of their text is updated
using the newer version of Twython.
"""
Tweet = apps.get_model('twitter', 'Tweet')
for tweet in Tweet.objects.all():
tweet.save(update_fields=['text_html'])
class Migration(migrations.Migration):
dependencies = [
('twitter', '0054_auto_20171113_1001'),
]
operations = [
migrations.RunPython(re_save_tweets),
]
|
Add migration to re-save all Tweets
So that the HTML version of their text uses the newer version of Twython
which fixes some issues with entities in tweets.# Generated by Django 2.0.4 on 2018-04-16 11:44
from django.db import migrations
def re_save_tweets(apps, schema_editor):
"""
Re-save all of the Tweets so that the HTML version of their text is updated
using the newer version of Twython.
"""
Tweet = apps.get_model('twitter', 'Tweet')
for tweet in Tweet.objects.all():
tweet.save(update_fields=['text_html'])
class Migration(migrations.Migration):
dependencies = [
('twitter', '0054_auto_20171113_1001'),
]
operations = [
migrations.RunPython(re_save_tweets),
]
|
<commit_before><commit_msg>Add migration to re-save all Tweets
So that the HTML version of their text uses the newer version of Twython
which fixes some issues with entities in tweets.<commit_after># Generated by Django 2.0.4 on 2018-04-16 11:44
from django.db import migrations
def re_save_tweets(apps, schema_editor):
"""
Re-save all of the Tweets so that the HTML version of their text is updated
using the newer version of Twython.
"""
Tweet = apps.get_model('twitter', 'Tweet')
for tweet in Tweet.objects.all():
tweet.save(update_fields=['text_html'])
class Migration(migrations.Migration):
dependencies = [
('twitter', '0054_auto_20171113_1001'),
]
operations = [
migrations.RunPython(re_save_tweets),
]
|
|
9cb92e07c8e92dfd6be51f924e23d60b0cb24185
|
fjord/suggest/providers/trigger/migrations/0002_auto_20150831_0912.py
|
fjord/suggest/providers/trigger/migrations/0002_auto_20150831_0912.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fjord.base.models
class Migration(migrations.Migration):
dependencies = [
('trigger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='triggerrule',
name='keywords',
field=fjord.base.models.ListField(help_text='Key words and phrases to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='locales',
field=fjord.base.models.ListField(help_text='Locales to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='products',
field=models.ManyToManyField(help_text='Products to match.', to='feedback.Product', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='versions',
field=fjord.base.models.ListField(help_text='Versions to match. Allows for prefix matches for strings that end in "*".', blank=True),
preserve_default=True,
),
]
|
Add migration for help text for trigger models
|
Add migration for help text for trigger models
|
Python
|
bsd-3-clause
|
Ritsyy/fjord,lgp171188/fjord,lgp171188/fjord,Ritsyy/fjord,lgp171188/fjord,hoosteeno/fjord,Ritsyy/fjord,mozilla/fjord,hoosteeno/fjord,hoosteeno/fjord,mozilla/fjord,mozilla/fjord,mozilla/fjord,lgp171188/fjord,hoosteeno/fjord,Ritsyy/fjord
|
Add migration for help text for trigger models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fjord.base.models
class Migration(migrations.Migration):
dependencies = [
('trigger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='triggerrule',
name='keywords',
field=fjord.base.models.ListField(help_text='Key words and phrases to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='locales',
field=fjord.base.models.ListField(help_text='Locales to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='products',
field=models.ManyToManyField(help_text='Products to match.', to='feedback.Product', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='versions',
field=fjord.base.models.ListField(help_text='Versions to match. Allows for prefix matches for strings that end in "*".', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for help text for trigger models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fjord.base.models
class Migration(migrations.Migration):
dependencies = [
('trigger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='triggerrule',
name='keywords',
field=fjord.base.models.ListField(help_text='Key words and phrases to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='locales',
field=fjord.base.models.ListField(help_text='Locales to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='products',
field=models.ManyToManyField(help_text='Products to match.', to='feedback.Product', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='versions',
field=fjord.base.models.ListField(help_text='Versions to match. Allows for prefix matches for strings that end in "*".', blank=True),
preserve_default=True,
),
]
|
Add migration for help text for trigger models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fjord.base.models
class Migration(migrations.Migration):
dependencies = [
('trigger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='triggerrule',
name='keywords',
field=fjord.base.models.ListField(help_text='Key words and phrases to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='locales',
field=fjord.base.models.ListField(help_text='Locales to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='products',
field=models.ManyToManyField(help_text='Products to match.', to='feedback.Product', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='versions',
field=fjord.base.models.ListField(help_text='Versions to match. Allows for prefix matches for strings that end in "*".', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration for help text for trigger models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import fjord.base.models
class Migration(migrations.Migration):
dependencies = [
('trigger', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='triggerrule',
name='keywords',
field=fjord.base.models.ListField(help_text='Key words and phrases to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='locales',
field=fjord.base.models.ListField(help_text='Locales to match.', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='products',
field=models.ManyToManyField(help_text='Products to match.', to='feedback.Product', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='triggerrule',
name='versions',
field=fjord.base.models.ListField(help_text='Versions to match. Allows for prefix matches for strings that end in "*".', blank=True),
preserve_default=True,
),
]
|
|
ea62ed3f9c2dd96d22b05abfb22b36b3a141901b
|
file_manager.py
|
file_manager.py
|
# Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Scott Williams <scottw@artesiancode.com>
# Description: A shim over the filesystem so we can avoid clobbering files we
# don't own.
|
Add a stub for the file manager
|
Add a stub for the file manager
|
Python
|
bsd-2-clause
|
starpow971/Stanford-Humanities-Center-Updater,starpow971/Stanford-Humanities-Center-Updater
|
Add a stub for the file manager
|
# Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Scott Williams <scottw@artesiancode.com>
# Description: A shim over the filesystem so we can avoid clobbering files we
# don't own.
|
<commit_before><commit_msg>Add a stub for the file manager<commit_after>
|
# Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Scott Williams <scottw@artesiancode.com>
# Description: A shim over the filesystem so we can avoid clobbering files we
# don't own.
|
Add a stub for the file manager# Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Scott Williams <scottw@artesiancode.com>
# Description: A shim over the filesystem so we can avoid clobbering files we
# don't own.
|
<commit_before><commit_msg>Add a stub for the file manager<commit_after># Copyright 2011, The Board of Regents of Leland Stanford, Jr. University
# All rights reserved. See LICENSE.
# Author: Scott Williams <scottw@artesiancode.com>
# Description: A shim over the filesystem so we can avoid clobbering files we
# don't own.
|
|
996405d96b1dccab95548de32e24e54d0fc06c38
|
drain_all_queues.py
|
drain_all_queues.py
|
from helpers import client
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
were_messages = True
while were_messages:
were_messages = False
for msg in scrape.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in ingest.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in complete.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
print were_messages
|
Add script to drain all the work/production queues
|
Add script to drain all the work/production queues
|
Python
|
mit
|
ryansb/zaqar-webscraper-demo
|
Add script to drain all the work/production queues
|
from helpers import client
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
were_messages = True
while were_messages:
were_messages = False
for msg in scrape.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in ingest.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in complete.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
print were_messages
|
<commit_before><commit_msg>Add script to drain all the work/production queues<commit_after>
|
from helpers import client
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
were_messages = True
while were_messages:
were_messages = False
for msg in scrape.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in ingest.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in complete.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
print were_messages
|
Add script to drain all the work/production queuesfrom helpers import client
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
were_messages = True
while were_messages:
were_messages = False
for msg in scrape.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in ingest.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in complete.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
print were_messages
|
<commit_before><commit_msg>Add script to drain all the work/production queues<commit_after>from helpers import client
if __name__ == '__main__':
scrape = client.queue('scrape')
ingest = client.queue('ingest')
complete = client.queue('completed')
were_messages = True
while were_messages:
were_messages = False
for msg in scrape.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in ingest.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
for msg in complete.claim(ttl=60, grace=60):
msg.delete()
were_messages = True
print were_messages
|
|
ca2407c8d2cb0f151b9fda0a15c9d242d3ad6aa8
|
arches/app/models/migrations/5616_search_export_history.py
|
arches/app/models/migrations/5616_search_export_history.py
|
# Generated by Django 2.2.6 on 2019-12-16 13:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '5668_add_resourceinstancelist'),
]
operations = [
migrations.CreateModel(
name='SearchExportHistory',
fields=[
('searchexportid', models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('numberofinstances', models.IntegerField()),
('exporttime', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'db_table': 'search_export_history',
'managed': True,
},
),
]
|
Migrate the db to add search_export_history table
|
Migrate the db to add search_export_history table
|
Python
|
agpl-3.0
|
archesproject/arches,archesproject/arches,archesproject/arches,archesproject/arches
|
Migrate the db to add search_export_history table
|
# Generated by Django 2.2.6 on 2019-12-16 13:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '5668_add_resourceinstancelist'),
]
operations = [
migrations.CreateModel(
name='SearchExportHistory',
fields=[
('searchexportid', models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('numberofinstances', models.IntegerField()),
('exporttime', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'db_table': 'search_export_history',
'managed': True,
},
),
]
|
<commit_before><commit_msg>Migrate the db to add search_export_history table<commit_after>
|
# Generated by Django 2.2.6 on 2019-12-16 13:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '5668_add_resourceinstancelist'),
]
operations = [
migrations.CreateModel(
name='SearchExportHistory',
fields=[
('searchexportid', models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('numberofinstances', models.IntegerField()),
('exporttime', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'db_table': 'search_export_history',
'managed': True,
},
),
]
|
Migrate the db to add search_export_history table# Generated by Django 2.2.6 on 2019-12-16 13:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '5668_add_resourceinstancelist'),
]
operations = [
migrations.CreateModel(
name='SearchExportHistory',
fields=[
('searchexportid', models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('numberofinstances', models.IntegerField()),
('exporttime', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'db_table': 'search_export_history',
'managed': True,
},
),
]
|
<commit_before><commit_msg>Migrate the db to add search_export_history table<commit_after># Generated by Django 2.2.6 on 2019-12-16 13:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('models', '5668_add_resourceinstancelist'),
]
operations = [
migrations.CreateModel(
name='SearchExportHistory',
fields=[
('searchexportid', models.UUIDField(default=uuid.uuid1, primary_key=True, serialize=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('numberofinstances', models.IntegerField()),
('exporttime', models.DateTimeField(auto_now_add=True)),
('url', models.TextField()),
],
options={
'db_table': 'search_export_history',
'managed': True,
},
),
]
|
|
7adab964e523ec6af96acbea0fa7f30efef78dc8
|
examples/tracing/strlen_hist.py
|
examples/tracing/strlen_hist.py
|
#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(ctx->ax));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
Add uprobe strlen histogram example
|
Add uprobe strlen histogram example
This example traces all calls to libc's strlen(). The program is attached as a
retprobe, therefore giving access to the resulting string length. The value is
kept in a log2 histogram that is printed to console once per second.
Example:
```
$ sudo ./strlen_hist.py
22:12:51
strlen return: : count distribution
0 -> 1 : 2041 |**************** |
2 -> 3 : 1120 |******** |
4 -> 7 : 3300 |************************** |
8 -> 15 : 4995 |****************************************|
16 -> 31 : 2130 |***************** |
32 -> 63 : 562 |**** |
^C
```
Signed-off-by: Brenden Blanco <8e41c261aa8ddb01230a40c21c26a049c1f6d041@plumgrid.com>
|
Python
|
apache-2.0
|
romain-intel/bcc,tuxology/bcc,brendangregg/bcc,mcaleavya/bcc,zaafar/bcc,romain-intel/bcc,tuxology/bcc,shodoco/bcc,shodoco/bcc,iovisor/bcc,iovisor/bcc,mkacik/bcc,mcaleavya/bcc,mkacik/bcc,iovisor/bcc,mcaleavya/bcc,mcaleavya/bcc,mkacik/bcc,mcaleavya/bcc,shodoco/bcc,brendangregg/bcc,brendangregg/bcc,zaafar/bcc,tuxology/bcc,zaafar/bcc,iovisor/bcc,tuxology/bcc,mkacik/bcc,tuxology/bcc,brendangregg/bcc,romain-intel/bcc,iovisor/bcc,shodoco/bcc,shodoco/bcc,zaafar/bcc,mkacik/bcc,brendangregg/bcc,romain-intel/bcc,zaafar/bcc,romain-intel/bcc
|
Add uprobe strlen histogram example
This example traces all calls to libc's strlen(). The program is attached as a
retprobe, therefore giving access to the resulting string length. The value is
kept in a log2 histogram that is printed to console once per second.
Example:
```
$ sudo ./strlen_hist.py
22:12:51
strlen return: : count distribution
0 -> 1 : 2041 |**************** |
2 -> 3 : 1120 |******** |
4 -> 7 : 3300 |************************** |
8 -> 15 : 4995 |****************************************|
16 -> 31 : 2130 |***************** |
32 -> 63 : 562 |**** |
^C
```
Signed-off-by: Brenden Blanco <8e41c261aa8ddb01230a40c21c26a049c1f6d041@plumgrid.com>
|
#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(ctx->ax));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add uprobe strlen histogram example
This example traces all calls to libc's strlen(). The program is attached as a
retprobe, therefore giving access to the resulting string length. The value is
kept in a log2 histogram that is printed to console once per second.
Example:
```
$ sudo ./strlen_hist.py
22:12:51
strlen return: : count distribution
0 -> 1 : 2041 |**************** |
2 -> 3 : 1120 |******** |
4 -> 7 : 3300 |************************** |
8 -> 15 : 4995 |****************************************|
16 -> 31 : 2130 |***************** |
32 -> 63 : 562 |**** |
^C
```
Signed-off-by: Brenden Blanco <8e41c261aa8ddb01230a40c21c26a049c1f6d041@plumgrid.com><commit_after>
|
#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(ctx->ax));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
Add uprobe strlen histogram example
This example traces all calls to libc's strlen(). The program is attached as a
retprobe, therefore giving access to the resulting string length. The value is
kept in a log2 histogram that is printed to console once per second.
Example:
```
$ sudo ./strlen_hist.py
22:12:51
strlen return: : count distribution
0 -> 1 : 2041 |**************** |
2 -> 3 : 1120 |******** |
4 -> 7 : 3300 |************************** |
8 -> 15 : 4995 |****************************************|
16 -> 31 : 2130 |***************** |
32 -> 63 : 562 |**** |
^C
```
Signed-off-by: Brenden Blanco <8e41c261aa8ddb01230a40c21c26a049c1f6d041@plumgrid.com>#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(ctx->ax));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add uprobe strlen histogram example
This example traces all calls to libc's strlen(). The program is attached as a
retprobe, therefore giving access to the resulting string length. The value is
kept in a log2 histogram that is printed to console once per second.
Example:
```
$ sudo ./strlen_hist.py
22:12:51
strlen return: : count distribution
0 -> 1 : 2041 |**************** |
2 -> 3 : 1120 |******** |
4 -> 7 : 3300 |************************** |
8 -> 15 : 4995 |****************************************|
16 -> 31 : 2130 |***************** |
32 -> 63 : 562 |**** |
^C
```
Signed-off-by: Brenden Blanco <8e41c261aa8ddb01230a40c21c26a049c1f6d041@plumgrid.com><commit_after>#!/usr/bin/python
#
# strlen_hist.py Histogram of system-wide strlen return values
#
# A basic example of using uprobes along with a histogram to show
# distributions.
#
# Runs until ctrl-c is pressed.
#
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
#
# Example output:
# $ sudo ./strlen_hist.py
# 22:12:52
# strlen return: : count distribution
# 0 -> 1 : 2106 |**************** |
# 2 -> 3 : 1172 |********* |
# 4 -> 7 : 3892 |****************************** |
# 8 -> 15 : 5096 |****************************************|
# 16 -> 31 : 2201 |***************** |
# 32 -> 63 : 547 |**** |
# 64 -> 127 : 106 | |
# 128 -> 255 : 13 | |
# 256 -> 511 : 27 | |
# 512 -> 1023 : 6 | |
# 1024 -> 2047 : 10 | |
# ^C$
#
from __future__ import print_function
import bcc
import time
text = """
#include <uapi/linux/ptrace.h>
BPF_HISTOGRAM(dist);
int count(struct pt_regs *ctx) {
dist.increment(bpf_log2l(ctx->ax));
return 0;
}
"""
b = bcc.BPF(text=text)
sym="strlen"
b.attach_uretprobe(name="c", sym=sym, fn_name="count")
dist = b["dist"]
try:
while True:
time.sleep(1)
print("%-8s\n" % time.strftime("%H:%M:%S"), end="")
dist.print_log2_hist(sym + " return:")
dist.clear()
except KeyboardInterrupt:
pass
|
|
71fa8ba0890fcff363b473d5a79069b69731330e
|
fcm_django/migrations/0006_auto_20210802_1140.py
|
fcm_django/migrations/0006_auto_20210802_1140.py
|
# Generated by Django 3.1.12 on 2021-08-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_django', '0005_auto_20170808_1145'),
]
operations = [
migrations.AlterField(
model_name='fcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='Unique device identifier', max_length=4096, null=True, verbose_name='Device ID'),
),
]
|
Add migration for field length
|
Add migration for field length
|
Python
|
mit
|
xtrinch/fcm-django
|
Add migration for field length
|
# Generated by Django 3.1.12 on 2021-08-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_django', '0005_auto_20170808_1145'),
]
operations = [
migrations.AlterField(
model_name='fcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='Unique device identifier', max_length=4096, null=True, verbose_name='Device ID'),
),
]
|
<commit_before><commit_msg>Add migration for field length<commit_after>
|
# Generated by Django 3.1.12 on 2021-08-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_django', '0005_auto_20170808_1145'),
]
operations = [
migrations.AlterField(
model_name='fcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='Unique device identifier', max_length=4096, null=True, verbose_name='Device ID'),
),
]
|
Add migration for field length# Generated by Django 3.1.12 on 2021-08-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_django', '0005_auto_20170808_1145'),
]
operations = [
migrations.AlterField(
model_name='fcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='Unique device identifier', max_length=4096, null=True, verbose_name='Device ID'),
),
]
|
<commit_before><commit_msg>Add migration for field length<commit_after># Generated by Django 3.1.12 on 2021-08-02 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_django', '0005_auto_20170808_1145'),
]
operations = [
migrations.AlterField(
model_name='fcmdevice',
name='device_id',
field=models.CharField(blank=True, db_index=True, help_text='Unique device identifier', max_length=4096, null=True, verbose_name='Device ID'),
),
]
|
|
8cb2ef91756ee8f06fa816d522655c796b7ed9cc
|
controllers/alert_controller.py
|
controllers/alert_controller.py
|
from database_access import guardeddatabase as gdb
from serializers.alert import alert_schema
from settings import DEFAULT_RADIUS
def get_alerts(*args, **kwargs):
latitude = kwargs.get('lat')
longitude = kwargs.get('lon')
alerts = gdb.get_alerts(lat=latitude, lon=longitude, radius=DEFAULT_RADIUS)
return alert_schema.dump(alerts, many=True).data
|
Add SQLAlchemy database access with models and serializers
|
Add SQLAlchemy database access with models and serializers
|
Python
|
apache-2.0
|
frodriguezf/flood-alert,frodriguezf/flood-alert,frodriguezf/flood-alert,frodriguezf/flood-alert
|
Add SQLAlchemy database access with models and serializers
|
from database_access import guardeddatabase as gdb
from serializers.alert import alert_schema
from settings import DEFAULT_RADIUS
def get_alerts(*args, **kwargs):
latitude = kwargs.get('lat')
longitude = kwargs.get('lon')
alerts = gdb.get_alerts(lat=latitude, lon=longitude, radius=DEFAULT_RADIUS)
return alert_schema.dump(alerts, many=True).data
|
<commit_before><commit_msg>Add SQLAlchemy database access with models and serializers<commit_after>
|
from database_access import guardeddatabase as gdb
from serializers.alert import alert_schema
from settings import DEFAULT_RADIUS
def get_alerts(*args, **kwargs):
latitude = kwargs.get('lat')
longitude = kwargs.get('lon')
alerts = gdb.get_alerts(lat=latitude, lon=longitude, radius=DEFAULT_RADIUS)
return alert_schema.dump(alerts, many=True).data
|
Add SQLAlchemy database access with models and serializersfrom database_access import guardeddatabase as gdb
from serializers.alert import alert_schema
from settings import DEFAULT_RADIUS
def get_alerts(*args, **kwargs):
latitude = kwargs.get('lat')
longitude = kwargs.get('lon')
alerts = gdb.get_alerts(lat=latitude, lon=longitude, radius=DEFAULT_RADIUS)
return alert_schema.dump(alerts, many=True).data
|
<commit_before><commit_msg>Add SQLAlchemy database access with models and serializers<commit_after>from database_access import guardeddatabase as gdb
from serializers.alert import alert_schema
from settings import DEFAULT_RADIUS
def get_alerts(*args, **kwargs):
latitude = kwargs.get('lat')
longitude = kwargs.get('lon')
alerts = gdb.get_alerts(lat=latitude, lon=longitude, radius=DEFAULT_RADIUS)
return alert_schema.dump(alerts, many=True).data
|
|
33e32b6660596c439db0f9615d83512063ea79a8
|
devicehive/connection_handler.py
|
devicehive/connection_handler.py
|
from devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Api
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class=None, handler_options=None,
refresh_token=None, access_token=None):
assert handler_class is not None, 'Handler class required'
assert refresh_token is not None, 'Refresh token required'
BaseHandler.__init__(self, transport)
self.api = Api(transport)
self._handler = handler_class(self.api, handler_options)
self._refresh_token = refresh_token
self._access_token = access_token
def handle_connected(self):
if not self._access_token:
response = self.api.refresh_token(self._refresh_token)
self._access_token = response.data['accessToken']
def handle_event(self, event):
# TODO: handle event here and call handler method.
pass
def handle_closed(self):
# TODO: reconnect here.
pass
|
Move handler to connection handler
|
Move handler to connection handler
|
Python
|
apache-2.0
|
devicehive/devicehive-python
|
Move handler to connection handler
|
from devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Api
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class=None, handler_options=None,
refresh_token=None, access_token=None):
assert handler_class is not None, 'Handler class required'
assert refresh_token is not None, 'Refresh token required'
BaseHandler.__init__(self, transport)
self.api = Api(transport)
self._handler = handler_class(self.api, handler_options)
self._refresh_token = refresh_token
self._access_token = access_token
def handle_connected(self):
if not self._access_token:
response = self.api.refresh_token(self._refresh_token)
self._access_token = response.data['accessToken']
def handle_event(self, event):
# TODO: handle event here and call handler method.
pass
def handle_closed(self):
# TODO: reconnect here.
pass
|
<commit_before><commit_msg>Move handler to connection handler<commit_after>
|
from devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Api
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class=None, handler_options=None,
refresh_token=None, access_token=None):
assert handler_class is not None, 'Handler class required'
assert refresh_token is not None, 'Refresh token required'
BaseHandler.__init__(self, transport)
self.api = Api(transport)
self._handler = handler_class(self.api, handler_options)
self._refresh_token = refresh_token
self._access_token = access_token
def handle_connected(self):
if not self._access_token:
response = self.api.refresh_token(self._refresh_token)
self._access_token = response.data['accessToken']
def handle_event(self, event):
# TODO: handle event here and call handler method.
pass
def handle_closed(self):
# TODO: reconnect here.
pass
|
Move handler to connection handlerfrom devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Api
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class=None, handler_options=None,
refresh_token=None, access_token=None):
assert handler_class is not None, 'Handler class required'
assert refresh_token is not None, 'Refresh token required'
BaseHandler.__init__(self, transport)
self.api = Api(transport)
self._handler = handler_class(self.api, handler_options)
self._refresh_token = refresh_token
self._access_token = access_token
def handle_connected(self):
if not self._access_token:
response = self.api.refresh_token(self._refresh_token)
self._access_token = response.data['accessToken']
def handle_event(self, event):
# TODO: handle event here and call handler method.
pass
def handle_closed(self):
# TODO: reconnect here.
pass
|
<commit_before><commit_msg>Move handler to connection handler<commit_after>from devicehive.handlers.base_handler import BaseHandler
from devicehive.api import Api
class ConnectionHandler(BaseHandler):
"""Connection handler class."""
def __init__(self, transport, handler_class=None, handler_options=None,
refresh_token=None, access_token=None):
assert handler_class is not None, 'Handler class required'
assert refresh_token is not None, 'Refresh token required'
BaseHandler.__init__(self, transport)
self.api = Api(transport)
self._handler = handler_class(self.api, handler_options)
self._refresh_token = refresh_token
self._access_token = access_token
def handle_connected(self):
if not self._access_token:
response = self.api.refresh_token(self._refresh_token)
self._access_token = response.data['accessToken']
def handle_event(self, event):
# TODO: handle event here and call handler method.
pass
def handle_closed(self):
# TODO: reconnect here.
pass
|
|
7bdc76d31ad8a32b2ff79eff2530ac12c9b7b3bd
|
indra/sources/eidos/eidos_cli.py
|
indra/sources/eidos/eidos_cli.py
|
"""
This is a Python based command line interface to Eidos
to complement the Python-Java bridge based interface.
EIDOSPATH (in the INDRA config.ini or as an environmental variable)
needs to be pointing to a fat JAR of the Eidos system.
"""
import os
import glob
import logging
import subprocess
from indra import get_config
from .eidos_api import process_json_ld_file
eip = get_config('EIDOSPATH')
eidos_package = 'org.clulab.wm.eidos'
logger = logging.getLogger('eidos_cli')
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd)
def run_extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out)
def run_extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
run_extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_ld_file(json)
if ep:
stmts += ep.statements
return stmts
|
Add a CLI to call Eidos
|
Add a CLI to call Eidos
|
Python
|
bsd-2-clause
|
sorgerlab/indra,sorgerlab/belpy,bgyori/indra,pvtodorov/indra,johnbachman/indra,johnbachman/belpy,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,sorgerlab/belpy,sorgerlab/indra,sorgerlab/belpy,johnbachman/indra,bgyori/indra,johnbachman/belpy,johnbachman/belpy
|
Add a CLI to call Eidos
|
"""
This is a Python based command line interface to Eidos
to complement the Python-Java bridge based interface.
EIDOSPATH (in the INDRA config.ini or as an environmental variable)
needs to be pointing to a fat JAR of the Eidos system.
"""
import os
import glob
import logging
import subprocess
from indra import get_config
from .eidos_api import process_json_ld_file
eip = get_config('EIDOSPATH')
eidos_package = 'org.clulab.wm.eidos'
logger = logging.getLogger('eidos_cli')
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd)
def run_extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out)
def run_extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
run_extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_ld_file(json)
if ep:
stmts += ep.statements
return stmts
|
<commit_before><commit_msg>Add a CLI to call Eidos<commit_after>
|
"""
This is a Python based command line interface to Eidos
to complement the Python-Java bridge based interface.
EIDOSPATH (in the INDRA config.ini or as an environmental variable)
needs to be pointing to a fat JAR of the Eidos system.
"""
import os
import glob
import logging
import subprocess
from indra import get_config
from .eidos_api import process_json_ld_file
eip = get_config('EIDOSPATH')
eidos_package = 'org.clulab.wm.eidos'
logger = logging.getLogger('eidos_cli')
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd)
def run_extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out)
def run_extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
run_extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_ld_file(json)
if ep:
stmts += ep.statements
return stmts
|
Add a CLI to call Eidos"""
This is a Python based command line interface to Eidos
to complement the Python-Java bridge based interface.
EIDOSPATH (in the INDRA config.ini or as an environmental variable)
needs to be pointing to a fat JAR of the Eidos system.
"""
import os
import glob
import logging
import subprocess
from indra import get_config
from .eidos_api import process_json_ld_file
eip = get_config('EIDOSPATH')
eidos_package = 'org.clulab.wm.eidos'
logger = logging.getLogger('eidos_cli')
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd)
def run_extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out)
def run_extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
run_extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_ld_file(json)
if ep:
stmts += ep.statements
return stmts
|
<commit_before><commit_msg>Add a CLI to call Eidos<commit_after>"""
This is a Python based command line interface to Eidos
to complement the Python-Java bridge based interface.
EIDOSPATH (in the INDRA config.ini or as an environmental variable)
needs to be pointing to a fat JAR of the Eidos system.
"""
import os
import glob
import logging
import subprocess
from indra import get_config
from .eidos_api import process_json_ld_file
eip = get_config('EIDOSPATH')
eidos_package = 'org.clulab.wm.eidos'
logger = logging.getLogger('eidos_cli')
def run_eidos(endpoint, *args):
"""Run a given enpoint of Eidos through the command line.
Parameters
----------
endpoint : str
The class within the Eidos package to run, for instance
'apps.ExtractFromDirectory' will run
'org.clulab.wm.eidos.apps.ExtractFromDirectory'
*args
Any further arguments to be passed as inputs to the class
being run.
"""
# Make the full path to the class that should be used
call_class = '%s.%s' % (eidos_package, endpoint)
# Assemble the command line command and append optonal args
cmd = ['java', '-Xmx12G', '-cp', eip, call_class] + list(args)
logger.info('Running Eidos with command "%s"' % (' '.join(cmd)))
subprocess.call(cmd)
def run_extract_from_directory(path_in, path_out):
"""Run Eidos on a set of text files in a folder.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
"""
logger.info('Running Eidos on input folder %s' % path_in)
run_eidos('apps.ExtractFromDirectory', path_in, path_out)
def run_extract_and_process(path_in, path_out):
"""Run Eidos on a set of text files and process output with INDRA.
The output is produced in the specified output folder but
the output files aren't processed by this function.
Parameters
----------
path_in : str
Path to an input folder with some text files
path_out : str
Path to an output folder in which Eidos places the output
JSON-LD files
Returns
-------
stmts : list[indra.statements.Statements]
A list of INDRA Statements
"""
run_extract_from_directory(path_in, path_out)
jsons = glob.glob(os.path.join(path_out, '*.jsonld'))
logger.info('Found %d JSON-LD files to process in %s' %
(len(jsons), path_out))
stmts = []
for json in jsons:
ep = process_json_ld_file(json)
if ep:
stmts += ep.statements
return stmts
|
|
ae71d06c07488b8a0e9ba0b479b8b2f29ae08c18
|
scripts/stock_price/white_noise.py
|
scripts/stock_price/white_noise.py
|
#!/usr/bin/python3
# coding: utf-8
'''
Create a white noise animation like a TV screen
'''
import numpy as np
from PIL import Image
width = 128
height = 96
n_frames = 10
frame_duration = 100
center_value = 64
def create_image():
image = np.zeros(shape=(height, width, 3), dtype=int)
for y in range(0, height):
for x in range(0, width):
value = int(np.random.normal() * center_value) + center_value
image[y, x] = value
return Image.fromarray(np.uint8(np.clip(image, 0, 255)))
images = list(map(lambda _: create_image(), range(0, n_frames)))
images[0].save('out/white_noise.gif',
save_all=True, append_images=images[1:], optimize=False,
duration=frame_duration, loop=0)
|
Add an animation generator script
|
Add an animation generator script
|
Python
|
mit
|
zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend
|
Add an animation generator script
|
#!/usr/bin/python3
# coding: utf-8
'''
Create a white noise animation like a TV screen
'''
import numpy as np
from PIL import Image
width = 128
height = 96
n_frames = 10
frame_duration = 100
center_value = 64
def create_image():
image = np.zeros(shape=(height, width, 3), dtype=int)
for y in range(0, height):
for x in range(0, width):
value = int(np.random.normal() * center_value) + center_value
image[y, x] = value
return Image.fromarray(np.uint8(np.clip(image, 0, 255)))
images = list(map(lambda _: create_image(), range(0, n_frames)))
images[0].save('out/white_noise.gif',
save_all=True, append_images=images[1:], optimize=False,
duration=frame_duration, loop=0)
|
<commit_before><commit_msg>Add an animation generator script<commit_after>
|
#!/usr/bin/python3
# coding: utf-8
'''
Create a white noise animation like a TV screen
'''
import numpy as np
from PIL import Image
width = 128
height = 96
n_frames = 10
frame_duration = 100
center_value = 64
def create_image():
image = np.zeros(shape=(height, width, 3), dtype=int)
for y in range(0, height):
for x in range(0, width):
value = int(np.random.normal() * center_value) + center_value
image[y, x] = value
return Image.fromarray(np.uint8(np.clip(image, 0, 255)))
images = list(map(lambda _: create_image(), range(0, n_frames)))
images[0].save('out/white_noise.gif',
save_all=True, append_images=images[1:], optimize=False,
duration=frame_duration, loop=0)
|
Add an animation generator script#!/usr/bin/python3
# coding: utf-8
'''
Create a white noise animation like a TV screen
'''
import numpy as np
from PIL import Image
width = 128
height = 96
n_frames = 10
frame_duration = 100
center_value = 64
def create_image():
image = np.zeros(shape=(height, width, 3), dtype=int)
for y in range(0, height):
for x in range(0, width):
value = int(np.random.normal() * center_value) + center_value
image[y, x] = value
return Image.fromarray(np.uint8(np.clip(image, 0, 255)))
images = list(map(lambda _: create_image(), range(0, n_frames)))
images[0].save('out/white_noise.gif',
save_all=True, append_images=images[1:], optimize=False,
duration=frame_duration, loop=0)
|
<commit_before><commit_msg>Add an animation generator script<commit_after>#!/usr/bin/python3
# coding: utf-8
'''
Create a white noise animation like a TV screen
'''
import numpy as np
from PIL import Image
width = 128
height = 96
n_frames = 10
frame_duration = 100
center_value = 64
def create_image():
image = np.zeros(shape=(height, width, 3), dtype=int)
for y in range(0, height):
for x in range(0, width):
value = int(np.random.normal() * center_value) + center_value
image[y, x] = value
return Image.fromarray(np.uint8(np.clip(image, 0, 255)))
images = list(map(lambda _: create_image(), range(0, n_frames)))
images[0].save('out/white_noise.gif',
save_all=True, append_images=images[1:], optimize=False,
duration=frame_duration, loop=0)
|
|
4218d583bce19893c43b602130b75a371a1c699c
|
scripts/spat_sar_demo.py
|
scripts/spat_sar_demo.py
|
import numpy as np
import csv
import sys
import os
from math import exp
import mete
if len(sys.argv) > 1:
S0 = int(sys.argv[1])
N0 = int(sys.argv[2])
if os.path.exists('../demo') is False:
os.mkdir('../demo')
beta = mete.get_beta(S0, N0)
n0 = mete.trunc_logser_rvs(exp(-beta), N0, S0)
n0 = list(n0)
n0 = [int(x) for x in n0]
n0.sort(reverse=True)
rad = mete.get_mete_rad(S0, N0)[0]
Amax = 4
Amin = 1
recur = mete.downscale_sar(Amax, S0, N0, Amin)
recur_obsSAD = mete.downscale_sar_fixed_abu(Amax, n0, Amin)
Avals = recur_obsSAD[0][ : ]
nonrecur = mete.sar_noniterative(Avals, Amax, S0, N0, 'precise')
nonrecur_obsSAD = mete.sar_noniterative_fixed_abu(Avals, Amax, n0)
sad_out = np.empty((S0, 2))
sad_out[ : , 0] = n0
sad_out[ : , 1] = rad
filename = '../demo/' + 'abu_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['n0', 'sad'])
for i in range(0, np.shape(sad_out)[0]):
datawriter.writerow(sad_out[i, ])
writer.close()
sar_out = np.empty((3, 4))
sar_out[ : , 0] = recur[1] + [S0]
sar_out[ : , 1] = recur_obsSAD[1]
sar_out[ : , 2] = nonrecur[1]
sar_out[ : , 3] = nonrecur_obsSAD[1]
filename = '../demo/' + 'rich_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['recur', 'recur_obsSAD', 'nonrecur', 'nonrecur_obsSAD'])
for i in range(0, np.shape(sar_out)[0]):
datawriter.writerow(sar_out[i, ])
writer.close()
|
Add script to compute sad and sar for conceptual sar figure
|
Add script to compute sad and sar for conceptual sar figure
|
Python
|
mit
|
weecology/mete-spatial,weecology/mete-spatial,weecology/mete-spatial,weecology/mete-spatial
|
Add script to compute sad and sar for conceptual sar figure
|
import numpy as np
import csv
import sys
import os
from math import exp
import mete
if len(sys.argv) > 1:
S0 = int(sys.argv[1])
N0 = int(sys.argv[2])
if os.path.exists('../demo') is False:
os.mkdir('../demo')
beta = mete.get_beta(S0, N0)
n0 = mete.trunc_logser_rvs(exp(-beta), N0, S0)
n0 = list(n0)
n0 = [int(x) for x in n0]
n0.sort(reverse=True)
rad = mete.get_mete_rad(S0, N0)[0]
Amax = 4
Amin = 1
recur = mete.downscale_sar(Amax, S0, N0, Amin)
recur_obsSAD = mete.downscale_sar_fixed_abu(Amax, n0, Amin)
Avals = recur_obsSAD[0][ : ]
nonrecur = mete.sar_noniterative(Avals, Amax, S0, N0, 'precise')
nonrecur_obsSAD = mete.sar_noniterative_fixed_abu(Avals, Amax, n0)
sad_out = np.empty((S0, 2))
sad_out[ : , 0] = n0
sad_out[ : , 1] = rad
filename = '../demo/' + 'abu_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['n0', 'sad'])
for i in range(0, np.shape(sad_out)[0]):
datawriter.writerow(sad_out[i, ])
writer.close()
sar_out = np.empty((3, 4))
sar_out[ : , 0] = recur[1] + [S0]
sar_out[ : , 1] = recur_obsSAD[1]
sar_out[ : , 2] = nonrecur[1]
sar_out[ : , 3] = nonrecur_obsSAD[1]
filename = '../demo/' + 'rich_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['recur', 'recur_obsSAD', 'nonrecur', 'nonrecur_obsSAD'])
for i in range(0, np.shape(sar_out)[0]):
datawriter.writerow(sar_out[i, ])
writer.close()
|
<commit_before><commit_msg>Add script to compute sad and sar for conceptual sar figure<commit_after>
|
import numpy as np
import csv
import sys
import os
from math import exp
import mete
if len(sys.argv) > 1:
S0 = int(sys.argv[1])
N0 = int(sys.argv[2])
if os.path.exists('../demo') is False:
os.mkdir('../demo')
beta = mete.get_beta(S0, N0)
n0 = mete.trunc_logser_rvs(exp(-beta), N0, S0)
n0 = list(n0)
n0 = [int(x) for x in n0]
n0.sort(reverse=True)
rad = mete.get_mete_rad(S0, N0)[0]
Amax = 4
Amin = 1
recur = mete.downscale_sar(Amax, S0, N0, Amin)
recur_obsSAD = mete.downscale_sar_fixed_abu(Amax, n0, Amin)
Avals = recur_obsSAD[0][ : ]
nonrecur = mete.sar_noniterative(Avals, Amax, S0, N0, 'precise')
nonrecur_obsSAD = mete.sar_noniterative_fixed_abu(Avals, Amax, n0)
sad_out = np.empty((S0, 2))
sad_out[ : , 0] = n0
sad_out[ : , 1] = rad
filename = '../demo/' + 'abu_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['n0', 'sad'])
for i in range(0, np.shape(sad_out)[0]):
datawriter.writerow(sad_out[i, ])
writer.close()
sar_out = np.empty((3, 4))
sar_out[ : , 0] = recur[1] + [S0]
sar_out[ : , 1] = recur_obsSAD[1]
sar_out[ : , 2] = nonrecur[1]
sar_out[ : , 3] = nonrecur_obsSAD[1]
filename = '../demo/' + 'rich_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['recur', 'recur_obsSAD', 'nonrecur', 'nonrecur_obsSAD'])
for i in range(0, np.shape(sar_out)[0]):
datawriter.writerow(sar_out[i, ])
writer.close()
|
Add script to compute sad and sar for conceptual sar figureimport numpy as np
import csv
import sys
import os
from math import exp
import mete
if len(sys.argv) > 1:
S0 = int(sys.argv[1])
N0 = int(sys.argv[2])
if os.path.exists('../demo') is False:
os.mkdir('../demo')
beta = mete.get_beta(S0, N0)
n0 = mete.trunc_logser_rvs(exp(-beta), N0, S0)
n0 = list(n0)
n0 = [int(x) for x in n0]
n0.sort(reverse=True)
rad = mete.get_mete_rad(S0, N0)[0]
Amax = 4
Amin = 1
recur = mete.downscale_sar(Amax, S0, N0, Amin)
recur_obsSAD = mete.downscale_sar_fixed_abu(Amax, n0, Amin)
Avals = recur_obsSAD[0][ : ]
nonrecur = mete.sar_noniterative(Avals, Amax, S0, N0, 'precise')
nonrecur_obsSAD = mete.sar_noniterative_fixed_abu(Avals, Amax, n0)
sad_out = np.empty((S0, 2))
sad_out[ : , 0] = n0
sad_out[ : , 1] = rad
filename = '../demo/' + 'abu_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['n0', 'sad'])
for i in range(0, np.shape(sad_out)[0]):
datawriter.writerow(sad_out[i, ])
writer.close()
sar_out = np.empty((3, 4))
sar_out[ : , 0] = recur[1] + [S0]
sar_out[ : , 1] = recur_obsSAD[1]
sar_out[ : , 2] = nonrecur[1]
sar_out[ : , 3] = nonrecur_obsSAD[1]
filename = '../demo/' + 'rich_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['recur', 'recur_obsSAD', 'nonrecur', 'nonrecur_obsSAD'])
for i in range(0, np.shape(sar_out)[0]):
datawriter.writerow(sar_out[i, ])
writer.close()
|
<commit_before><commit_msg>Add script to compute sad and sar for conceptual sar figure<commit_after>import numpy as np
import csv
import sys
import os
from math import exp
import mete
if len(sys.argv) > 1:
S0 = int(sys.argv[1])
N0 = int(sys.argv[2])
if os.path.exists('../demo') is False:
os.mkdir('../demo')
beta = mete.get_beta(S0, N0)
n0 = mete.trunc_logser_rvs(exp(-beta), N0, S0)
n0 = list(n0)
n0 = [int(x) for x in n0]
n0.sort(reverse=True)
rad = mete.get_mete_rad(S0, N0)[0]
Amax = 4
Amin = 1
recur = mete.downscale_sar(Amax, S0, N0, Amin)
recur_obsSAD = mete.downscale_sar_fixed_abu(Amax, n0, Amin)
Avals = recur_obsSAD[0][ : ]
nonrecur = mete.sar_noniterative(Avals, Amax, S0, N0, 'precise')
nonrecur_obsSAD = mete.sar_noniterative_fixed_abu(Avals, Amax, n0)
sad_out = np.empty((S0, 2))
sad_out[ : , 0] = n0
sad_out[ : , 1] = rad
filename = '../demo/' + 'abu_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['n0', 'sad'])
for i in range(0, np.shape(sad_out)[0]):
datawriter.writerow(sad_out[i, ])
writer.close()
sar_out = np.empty((3, 4))
sar_out[ : , 0] = recur[1] + [S0]
sar_out[ : , 1] = recur_obsSAD[1]
sar_out[ : , 2] = nonrecur[1]
sar_out[ : , 3] = nonrecur_obsSAD[1]
filename = '../demo/' + 'rich_sar_demo.txt'
writer = open(filename, 'wb')
datawriter = csv.writer(writer)
datawriter.writerow(['recur', 'recur_obsSAD', 'nonrecur', 'nonrecur_obsSAD'])
for i in range(0, np.shape(sar_out)[0]):
datawriter.writerow(sar_out[i, ])
writer.close()
|
|
c7a8a3c329f8f1d90af96db7f7af1e36ab9f36e8
|
scripts/test-scrobble.py
|
scripts/test-scrobble.py
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
Add test script for checking to see if scrobbling works on new installs
|
Add test script for checking to see if scrobbling works on new installs
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add test script for checking to see if scrobbling works on new installs
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
<commit_before><commit_msg>Add test script for checking to see if scrobbling works on new installs<commit_after>
|
#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
Add test script for checking to see if scrobbling works on new installs#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
<commit_before><commit_msg>Add test script for checking to see if scrobbling works on new installs<commit_after>#!/usr/bin/env python
##### CONFIG #####
SERVER = "turtle.libre.fm"
USER = "testuser"
PASSWORD = "password"
##################
import gobble, datetime
print "Handshaking..."
gs = gobble.GobbleServer(SERVER, USER, PASSWORD, 'tst')
time = datetime.datetime.now() - datetime.timedelta(days=1) # Yesterday
track = gobble.GobbleTrack("Richard Stallman", "Free Software Song", time)
gs.add_track(track)
print "Submitting..."
gs.submit()
print "Done!"
|
|
253147a32014e945deda52c252dee2c6e0072264
|
test/test_db_hash_handler.py
|
test/test_db_hash_handler.py
|
import hashlib
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listdbhash
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.db_hash_pb2 import DBHash
class TestHandlerNullDatabase(StationHandlerTestCase):
def test_handle_dbhash_returns_const_for_empty_db(self):
# hashlib.sha1().digest()
null_hash = '\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t'
self.station.payload = ""
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, null_hash)
self.assertEqual(db_hash.prefix, "")
class TestHandlerPopulatedDataBase(StationHandlerTestCase):
def test_prefixes_work_for_populated_db(self):
# Create some test data.
oids = []
prefixes = ("a", "1c", "eb9")
for i in xrange(100):
oid = self.station.station.write("butts %d" % i)
oids.append(oid)
oids = sorted(oids)
hashes = {}
for prefix in prefixes:
s = hashlib.sha1()
for i in oids:
if i.startswith(prefix):
s.update(i)
hashes[prefix] = s.digest()
for prefix in prefixes:
self.station.payload = prefix
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, hashes[prefix])
self.assertEqual(db_hash.prefix, prefix)
|
Add tests for db hashing
|
Add tests for db hashing
|
Python
|
mit
|
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
|
Add tests for db hashing
|
import hashlib
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listdbhash
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.db_hash_pb2 import DBHash
class TestHandlerNullDatabase(StationHandlerTestCase):
def test_handle_dbhash_returns_const_for_empty_db(self):
# hashlib.sha1().digest()
null_hash = '\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t'
self.station.payload = ""
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, null_hash)
self.assertEqual(db_hash.prefix, "")
class TestHandlerPopulatedDataBase(StationHandlerTestCase):
def test_prefixes_work_for_populated_db(self):
# Create some test data.
oids = []
prefixes = ("a", "1c", "eb9")
for i in xrange(100):
oid = self.station.station.write("butts %d" % i)
oids.append(oid)
oids = sorted(oids)
hashes = {}
for prefix in prefixes:
s = hashlib.sha1()
for i in oids:
if i.startswith(prefix):
s.update(i)
hashes[prefix] = s.digest()
for prefix in prefixes:
self.station.payload = prefix
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, hashes[prefix])
self.assertEqual(db_hash.prefix, prefix)
|
<commit_before><commit_msg>Add tests for db hashing<commit_after>
|
import hashlib
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listdbhash
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.db_hash_pb2 import DBHash
class TestHandlerNullDatabase(StationHandlerTestCase):
def test_handle_dbhash_returns_const_for_empty_db(self):
# hashlib.sha1().digest()
null_hash = '\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t'
self.station.payload = ""
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, null_hash)
self.assertEqual(db_hash.prefix, "")
class TestHandlerPopulatedDataBase(StationHandlerTestCase):
def test_prefixes_work_for_populated_db(self):
# Create some test data.
oids = []
prefixes = ("a", "1c", "eb9")
for i in xrange(100):
oid = self.station.station.write("butts %d" % i)
oids.append(oid)
oids = sorted(oids)
hashes = {}
for prefix in prefixes:
s = hashlib.sha1()
for i in oids:
if i.startswith(prefix):
s.update(i)
hashes[prefix] = s.digest()
for prefix in prefixes:
self.station.payload = prefix
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, hashes[prefix])
self.assertEqual(db_hash.prefix, prefix)
|
Add tests for db hashingimport hashlib
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listdbhash
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.db_hash_pb2 import DBHash
class TestHandlerNullDatabase(StationHandlerTestCase):
def test_handle_dbhash_returns_const_for_empty_db(self):
# hashlib.sha1().digest()
null_hash = '\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t'
self.station.payload = ""
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, null_hash)
self.assertEqual(db_hash.prefix, "")
class TestHandlerPopulatedDataBase(StationHandlerTestCase):
def test_prefixes_work_for_populated_db(self):
# Create some test data.
oids = []
prefixes = ("a", "1c", "eb9")
for i in xrange(100):
oid = self.station.station.write("butts %d" % i)
oids.append(oid)
oids = sorted(oids)
hashes = {}
for prefix in prefixes:
s = hashlib.sha1()
for i in oids:
if i.startswith(prefix):
s.update(i)
hashes[prefix] = s.digest()
for prefix in prefixes:
self.station.payload = prefix
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, hashes[prefix])
self.assertEqual(db_hash.prefix, prefix)
|
<commit_before><commit_msg>Add tests for db hashing<commit_after>import hashlib
from handler_fixture import StationHandlerTestCase
from groundstation.transfer.request_handlers import handle_listdbhash
from groundstation.transfer.response_handlers import handle_terminate
import groundstation.transfer.response as response
from groundstation.proto.db_hash_pb2 import DBHash
class TestHandlerNullDatabase(StationHandlerTestCase):
def test_handle_dbhash_returns_const_for_empty_db(self):
# hashlib.sha1().digest()
null_hash = '\xda9\xa3\xee^kK\r2U\xbf\xef\x95`\x18\x90\xaf\xd8\x07\t'
self.station.payload = ""
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, null_hash)
self.assertEqual(db_hash.prefix, "")
class TestHandlerPopulatedDataBase(StationHandlerTestCase):
def test_prefixes_work_for_populated_db(self):
# Create some test data.
oids = []
prefixes = ("a", "1c", "eb9")
for i in xrange(100):
oid = self.station.station.write("butts %d" % i)
oids.append(oid)
oids = sorted(oids)
hashes = {}
for prefix in prefixes:
s = hashlib.sha1()
for i in oids:
if i.startswith(prefix):
s.update(i)
hashes[prefix] = s.digest()
for prefix in prefixes:
self.station.payload = prefix
handle_listdbhash(self.station)
resp = self.station.stream.pop()
self.assertIsInstance(resp, response.Response)
db_hash = DBHash()
db_hash.ParseFromString(resp.payload)
self.assertEqual(db_hash.hash, hashes[prefix])
self.assertEqual(db_hash.prefix, prefix)
|
|
92164760614821af73c2167fab5555475db5cd9c
|
examples/AllSolutions.py
|
examples/AllSolutions.py
|
from __future__ import division, print_function
from Numberjack import *
# This example demonstrates finding all solutions to a given model.
# solver.startNewSearch() should be called to set up internal data structures in
# the solver first, then call solver.getNextSolution() repeatedly until it
# returns a value other than SAT.
def solve(param):
num_solutions = 0
N = param["N"]
decsionvars = VarArray(N) # Array of N Boolean variables
model = Model(Sum(decsionvars) == (N // 2))
solver = model.load(param["solver"])
solver.startNewSearch() # Must be called to set up internal data structures
while solver.getNextSolution() == SAT:
values = [x.get_value() for x in decsionvars]
print("Solution:", values)
num_solutions += 1
print("Found a total of %d solutions." % num_solutions)
if __name__ == '__main__':
default = {'solver': 'MiniSat', 'N': 4}
param = input(default)
solve(param)
|
Add example code for finding all solutions.
|
Add example code for finding all solutions.
|
Python
|
lgpl-2.1
|
eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack,eomahony/Numberjack
|
Add example code for finding all solutions.
|
from __future__ import division, print_function
from Numberjack import *
# This example demonstrates finding all solutions to a given model.
# solver.startNewSearch() should be called to set up internal data structures in
# the solver first, then call solver.getNextSolution() repeatedly until it
# returns a value other than SAT.
def solve(param):
num_solutions = 0
N = param["N"]
decsionvars = VarArray(N) # Array of N Boolean variables
model = Model(Sum(decsionvars) == (N // 2))
solver = model.load(param["solver"])
solver.startNewSearch() # Must be called to set up internal data structures
while solver.getNextSolution() == SAT:
values = [x.get_value() for x in decsionvars]
print("Solution:", values)
num_solutions += 1
print("Found a total of %d solutions." % num_solutions)
if __name__ == '__main__':
default = {'solver': 'MiniSat', 'N': 4}
param = input(default)
solve(param)
|
<commit_before><commit_msg>Add example code for finding all solutions.<commit_after>
|
from __future__ import division, print_function
from Numberjack import *
# This example demonstrates finding all solutions to a given model.
# solver.startNewSearch() should be called to set up internal data structures in
# the solver first, then call solver.getNextSolution() repeatedly until it
# returns a value other than SAT.
def solve(param):
num_solutions = 0
N = param["N"]
decsionvars = VarArray(N) # Array of N Boolean variables
model = Model(Sum(decsionvars) == (N // 2))
solver = model.load(param["solver"])
solver.startNewSearch() # Must be called to set up internal data structures
while solver.getNextSolution() == SAT:
values = [x.get_value() for x in decsionvars]
print("Solution:", values)
num_solutions += 1
print("Found a total of %d solutions." % num_solutions)
if __name__ == '__main__':
default = {'solver': 'MiniSat', 'N': 4}
param = input(default)
solve(param)
|
Add example code for finding all solutions.from __future__ import division, print_function
from Numberjack import *
# This example demonstrates finding all solutions to a given model.
# solver.startNewSearch() should be called to set up internal data structures in
# the solver first, then call solver.getNextSolution() repeatedly until it
# returns a value other than SAT.
def solve(param):
num_solutions = 0
N = param["N"]
decsionvars = VarArray(N) # Array of N Boolean variables
model = Model(Sum(decsionvars) == (N // 2))
solver = model.load(param["solver"])
solver.startNewSearch() # Must be called to set up internal data structures
while solver.getNextSolution() == SAT:
values = [x.get_value() for x in decsionvars]
print("Solution:", values)
num_solutions += 1
print("Found a total of %d solutions." % num_solutions)
if __name__ == '__main__':
default = {'solver': 'MiniSat', 'N': 4}
param = input(default)
solve(param)
|
<commit_before><commit_msg>Add example code for finding all solutions.<commit_after>from __future__ import division, print_function
from Numberjack import *
# This example demonstrates finding all solutions to a given model.
# solver.startNewSearch() should be called to set up internal data structures in
# the solver first, then call solver.getNextSolution() repeatedly until it
# returns a value other than SAT.
def solve(param):
num_solutions = 0
N = param["N"]
decsionvars = VarArray(N) # Array of N Boolean variables
model = Model(Sum(decsionvars) == (N // 2))
solver = model.load(param["solver"])
solver.startNewSearch() # Must be called to set up internal data structures
while solver.getNextSolution() == SAT:
values = [x.get_value() for x in decsionvars]
print("Solution:", values)
num_solutions += 1
print("Found a total of %d solutions." % num_solutions)
if __name__ == '__main__':
default = {'solver': 'MiniSat', 'N': 4}
param = input(default)
solve(param)
|
|
dd63a0d619819aaffc7d6dbc05183530190a169e
|
src/core/signing.py
|
src/core/signing.py
|
import zlib
from django.core.signing import JSONSerializer, Signer, b64_decode, b64_encode
from django.utils.encoding import force_bytes
def fixed_dumps(obj, key=None, salt='core.signing', serializer=JSONSerializer, compress=False):
"""
The code is extracted from django.core.signing.loads
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return Signer(key, salt=salt).sign(base64d)
def fixed_loads(s, key=None, salt='core.signing', serializer=JSONSerializer):
"""
The code is extracted from django.core.signing.dumps
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(Signer(key, salt=salt).unsign(s))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
|
Add fixed authentication key (with auth number) logic
|
Add fixed authentication key (with auth number) logic
Based on django.core.signing.{loads,dumps}
|
Python
|
mit
|
ccwang002/biocloud-server-kai,ccwang002/biocloud-server-kai,ccwang002/biocloud-server-kai
|
Add fixed authentication key (with auth number) logic
Based on django.core.signing.{loads,dumps}
|
import zlib
from django.core.signing import JSONSerializer, Signer, b64_decode, b64_encode
from django.utils.encoding import force_bytes
def fixed_dumps(obj, key=None, salt='core.signing', serializer=JSONSerializer, compress=False):
"""
The code is extracted from django.core.signing.loads
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return Signer(key, salt=salt).sign(base64d)
def fixed_loads(s, key=None, salt='core.signing', serializer=JSONSerializer):
"""
The code is extracted from django.core.signing.dumps
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(Signer(key, salt=salt).unsign(s))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
|
<commit_before><commit_msg>Add fixed authentication key (with auth number) logic
Based on django.core.signing.{loads,dumps}<commit_after>
|
import zlib
from django.core.signing import JSONSerializer, Signer, b64_decode, b64_encode
from django.utils.encoding import force_bytes
def fixed_dumps(obj, key=None, salt='core.signing', serializer=JSONSerializer, compress=False):
"""
The code is extracted from django.core.signing.loads
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return Signer(key, salt=salt).sign(base64d)
def fixed_loads(s, key=None, salt='core.signing', serializer=JSONSerializer):
"""
The code is extracted from django.core.signing.dumps
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(Signer(key, salt=salt).unsign(s))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
|
Add fixed authentication key (with auth number) logic
Based on django.core.signing.{loads,dumps}import zlib
from django.core.signing import JSONSerializer, Signer, b64_decode, b64_encode
from django.utils.encoding import force_bytes
def fixed_dumps(obj, key=None, salt='core.signing', serializer=JSONSerializer, compress=False):
"""
The code is extracted from django.core.signing.loads
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return Signer(key, salt=salt).sign(base64d)
def fixed_loads(s, key=None, salt='core.signing', serializer=JSONSerializer):
"""
The code is extracted from django.core.signing.dumps
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(Signer(key, salt=salt).unsign(s))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
|
<commit_before><commit_msg>Add fixed authentication key (with auth number) logic
Based on django.core.signing.{loads,dumps}<commit_after>import zlib
from django.core.signing import JSONSerializer, Signer, b64_decode, b64_encode
from django.utils.encoding import force_bytes
def fixed_dumps(obj, key=None, salt='core.signing', serializer=JSONSerializer, compress=False):
"""
The code is extracted from django.core.signing.loads
Returns URL-safe, sha1 signed base64 compressed JSON string. If key is
None, settings.SECRET_KEY is used instead.
If compress is True (not the default) checks if compressing using zlib can
save some space. Prepends a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data)
if is_compressed:
base64d = b'.' + base64d
return Signer(key, salt=salt).sign(base64d)
def fixed_loads(s, key=None, salt='core.signing', serializer=JSONSerializer):
"""
The code is extracted from django.core.signing.dumps
Reverse of dumps(), raises BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign always returns unicode but base64 and zlib
# compression operate on bytes.
base64d = force_bytes(Signer(key, salt=salt).unsign(s))
decompress = False
if base64d[:1] == b'.':
# It's compressed; uncompress it first
base64d = base64d[1:]
decompress = True
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
|
|
ecb3fcfb19085fce2e15afc8d0c7ad4404401083
|
simuvex/procedures/stubs/caller.py
|
simuvex/procedures/stubs/caller.py
|
import simuvex
######################################
# Caller
######################################
class Caller(simuvex.SimProcedure):
"""
Caller stub. Creates a Ijk_Call exit to the specified function
"""
def run(self, target_addr=None):
self.call(target_addr, [ ], self.after_call)
def after_call(self):
pass
|
Add a new SimProcedure stub Caller
|
Add a new SimProcedure stub Caller
|
Python
|
bsd-2-clause
|
angr/angr,angr/simuvex,chubbymaggie/simuvex,axt/angr,tyb0807/angr,iamahuman/angr,schieb/angr,chubbymaggie/simuvex,schieb/angr,f-prettyland/angr,chubbymaggie/simuvex,tyb0807/angr,chubbymaggie/angr,angr/angr,axt/angr,iamahuman/angr,chubbymaggie/angr,iamahuman/angr,f-prettyland/angr,angr/angr,schieb/angr,f-prettyland/angr,chubbymaggie/angr,axt/angr,tyb0807/angr
|
Add a new SimProcedure stub Caller
|
import simuvex
######################################
# Caller
######################################
class Caller(simuvex.SimProcedure):
"""
Caller stub. Creates a Ijk_Call exit to the specified function
"""
def run(self, target_addr=None):
self.call(target_addr, [ ], self.after_call)
def after_call(self):
pass
|
<commit_before><commit_msg>Add a new SimProcedure stub Caller<commit_after>
|
import simuvex
######################################
# Caller
######################################
class Caller(simuvex.SimProcedure):
"""
Caller stub. Creates a Ijk_Call exit to the specified function
"""
def run(self, target_addr=None):
self.call(target_addr, [ ], self.after_call)
def after_call(self):
pass
|
Add a new SimProcedure stub Caller
import simuvex
######################################
# Caller
######################################
class Caller(simuvex.SimProcedure):
"""
Caller stub. Creates a Ijk_Call exit to the specified function
"""
def run(self, target_addr=None):
self.call(target_addr, [ ], self.after_call)
def after_call(self):
pass
|
<commit_before><commit_msg>Add a new SimProcedure stub Caller<commit_after>
import simuvex
######################################
# Caller
######################################
class Caller(simuvex.SimProcedure):
"""
Caller stub. Creates a Ijk_Call exit to the specified function
"""
def run(self, target_addr=None):
self.call(target_addr, [ ], self.after_call)
def after_call(self):
pass
|
|
795423854ee31bbd2c120460ff2f9b537d3d6dd9
|
dj_experiment/management/commands/listexperimentsbycatalog.py
|
dj_experiment/management/commands/listexperimentsbycatalog.py
|
from dj_experiment.models import Catalog
from django.core.management.base import BaseCommand, CommandError
from humanfriendly.tables import format_smart_table
class Command(BaseCommand):
"""Retrieve the list of experiments from a catalog."""
help = 'Retrieve the list of experiments from a catalog'
def add_arguments(self, parser):
"""Add arguments for handling the command."""
# Named (optional) arguments start with --
parser.add_argument('-C', '--catalog', # argument flag
dest='catalog', # argument name
default=None,
help='Query catalog by its name',
)
def handle(self, *args, **options):
"""List all the entries from a catalog name."""
if not options.get('catalog'):
raise CommandError(
"listexperimentsbycatalog wants at least one named argument"
)
else:
catalog_name = options['catalog']
column_names = ['Experiment Name']
experiments = (
cat.xperiments.values('name')
for cat in Catalog.objects.filter(
name=catalog_name
)
)
explist = list()
for xperiment in experiments:
for exp_kws in xperiment:
explist.append([exp_kws['name'], ])
self.stdout.write(format_smart_table(
explist, column_names))
|
Add command to list experiments
|
Add command to list experiments
|
Python
|
mit
|
francbartoli/dj-experiment,francbartoli/dj-experiment
|
Add command to list experiments
|
from dj_experiment.models import Catalog
from django.core.management.base import BaseCommand, CommandError
from humanfriendly.tables import format_smart_table
class Command(BaseCommand):
"""Retrieve the list of experiments from a catalog."""
help = 'Retrieve the list of experiments from a catalog'
def add_arguments(self, parser):
"""Add arguments for handling the command."""
# Named (optional) arguments start with --
parser.add_argument('-C', '--catalog', # argument flag
dest='catalog', # argument name
default=None,
help='Query catalog by its name',
)
def handle(self, *args, **options):
"""List all the entries from a catalog name."""
if not options.get('catalog'):
raise CommandError(
"listexperimentsbycatalog wants at least one named argument"
)
else:
catalog_name = options['catalog']
column_names = ['Experiment Name']
experiments = (
cat.xperiments.values('name')
for cat in Catalog.objects.filter(
name=catalog_name
)
)
explist = list()
for xperiment in experiments:
for exp_kws in xperiment:
explist.append([exp_kws['name'], ])
self.stdout.write(format_smart_table(
explist, column_names))
|
<commit_before><commit_msg>Add command to list experiments<commit_after>
|
from dj_experiment.models import Catalog
from django.core.management.base import BaseCommand, CommandError
from humanfriendly.tables import format_smart_table
class Command(BaseCommand):
"""Retrieve the list of experiments from a catalog."""
help = 'Retrieve the list of experiments from a catalog'
def add_arguments(self, parser):
"""Add arguments for handling the command."""
# Named (optional) arguments start with --
parser.add_argument('-C', '--catalog', # argument flag
dest='catalog', # argument name
default=None,
help='Query catalog by its name',
)
def handle(self, *args, **options):
"""List all the entries from a catalog name."""
if not options.get('catalog'):
raise CommandError(
"listexperimentsbycatalog wants at least one named argument"
)
else:
catalog_name = options['catalog']
column_names = ['Experiment Name']
experiments = (
cat.xperiments.values('name')
for cat in Catalog.objects.filter(
name=catalog_name
)
)
explist = list()
for xperiment in experiments:
for exp_kws in xperiment:
explist.append([exp_kws['name'], ])
self.stdout.write(format_smart_table(
explist, column_names))
|
Add command to list experimentsfrom dj_experiment.models import Catalog
from django.core.management.base import BaseCommand, CommandError
from humanfriendly.tables import format_smart_table
class Command(BaseCommand):
"""Retrieve the list of experiments from a catalog."""
help = 'Retrieve the list of experiments from a catalog'
def add_arguments(self, parser):
"""Add arguments for handling the command."""
# Named (optional) arguments start with --
parser.add_argument('-C', '--catalog', # argument flag
dest='catalog', # argument name
default=None,
help='Query catalog by its name',
)
def handle(self, *args, **options):
"""List all the entries from a catalog name."""
if not options.get('catalog'):
raise CommandError(
"listexperimentsbycatalog wants at least one named argument"
)
else:
catalog_name = options['catalog']
column_names = ['Experiment Name']
experiments = (
cat.xperiments.values('name')
for cat in Catalog.objects.filter(
name=catalog_name
)
)
explist = list()
for xperiment in experiments:
for exp_kws in xperiment:
explist.append([exp_kws['name'], ])
self.stdout.write(format_smart_table(
explist, column_names))
|
<commit_before><commit_msg>Add command to list experiments<commit_after>from dj_experiment.models import Catalog
from django.core.management.base import BaseCommand, CommandError
from humanfriendly.tables import format_smart_table
class Command(BaseCommand):
"""Retrieve the list of experiments from a catalog."""
help = 'Retrieve the list of experiments from a catalog'
def add_arguments(self, parser):
"""Add arguments for handling the command."""
# Named (optional) arguments start with --
parser.add_argument('-C', '--catalog', # argument flag
dest='catalog', # argument name
default=None,
help='Query catalog by its name',
)
def handle(self, *args, **options):
"""List all the entries from a catalog name."""
if not options.get('catalog'):
raise CommandError(
"listexperimentsbycatalog wants at least one named argument"
)
else:
catalog_name = options['catalog']
column_names = ['Experiment Name']
experiments = (
cat.xperiments.values('name')
for cat in Catalog.objects.filter(
name=catalog_name
)
)
explist = list()
for xperiment in experiments:
for exp_kws in xperiment:
explist.append([exp_kws['name'], ])
self.stdout.write(format_smart_table(
explist, column_names))
|
|
fd2bf99bed6bae5d3af20faa8352bd85f8225e93
|
st2api/tests/unit/controllers/v1/test_jsexpose_decorator.py
|
st2api/tests/unit/controllers/v1/test_jsexpose_decorator.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
__all__ = [
'JsexposeDecoratorTestCase'
]
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
class JsexposeDecoratorTestCase(FunctionalTest):
"""
Test case which tests various invalid requests and makes sure they are correctly handled by
the jsexpose decorator.
"""
models = None
alias1 = None
@classmethod
def setUpClass(cls):
super(JsexposeDecoratorTestCase, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
def test_invalid_number_of_arguments_results_in_resource_not_found(self):
# Invalid path (additional path components after the id)
resp = self.app.get('/v1/actionalias/%s/some/more/args' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.json['faultstring'], 'The resource could not be found.')
def test_invalid_query_param_results_in_bad_request(self):
resp = self.app.get('/v1/actionalias/%s?invalid=arg' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.json['faultstring'], 'Unsupported query parameter: invalid')
|
Add some end to end tests for jsexpose decorator edge cases.
|
Add some end to end tests for jsexpose decorator edge cases.
|
Python
|
apache-2.0
|
Plexxi/st2,punalpatel/st2,lakshmi-kannan/st2,punalpatel/st2,peak6/st2,peak6/st2,emedvedev/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,emedvedev/st2,pixelrebel/st2,emedvedev/st2,tonybaloney/st2,lakshmi-kannan/st2,peak6/st2,nzlosh/st2,pixelrebel/st2,nzlosh/st2,punalpatel/st2,StackStorm/st2,tonybaloney/st2,lakshmi-kannan/st2,Plexxi/st2,StackStorm/st2,tonybaloney/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,pixelrebel/st2
|
Add some end to end tests for jsexpose decorator edge cases.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
__all__ = [
'JsexposeDecoratorTestCase'
]
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
class JsexposeDecoratorTestCase(FunctionalTest):
"""
Test case which tests various invalid requests and makes sure they are correctly handled by
the jsexpose decorator.
"""
models = None
alias1 = None
@classmethod
def setUpClass(cls):
super(JsexposeDecoratorTestCase, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
def test_invalid_number_of_arguments_results_in_resource_not_found(self):
# Invalid path (additional path components after the id)
resp = self.app.get('/v1/actionalias/%s/some/more/args' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.json['faultstring'], 'The resource could not be found.')
def test_invalid_query_param_results_in_bad_request(self):
resp = self.app.get('/v1/actionalias/%s?invalid=arg' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.json['faultstring'], 'Unsupported query parameter: invalid')
|
<commit_before><commit_msg>Add some end to end tests for jsexpose decorator edge cases.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
__all__ = [
'JsexposeDecoratorTestCase'
]
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
class JsexposeDecoratorTestCase(FunctionalTest):
"""
Test case which tests various invalid requests and makes sure they are correctly handled by
the jsexpose decorator.
"""
models = None
alias1 = None
@classmethod
def setUpClass(cls):
super(JsexposeDecoratorTestCase, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
def test_invalid_number_of_arguments_results_in_resource_not_found(self):
# Invalid path (additional path components after the id)
resp = self.app.get('/v1/actionalias/%s/some/more/args' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.json['faultstring'], 'The resource could not be found.')
def test_invalid_query_param_results_in_bad_request(self):
resp = self.app.get('/v1/actionalias/%s?invalid=arg' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.json['faultstring'], 'Unsupported query parameter: invalid')
|
Add some end to end tests for jsexpose decorator edge cases.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
__all__ = [
'JsexposeDecoratorTestCase'
]
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
class JsexposeDecoratorTestCase(FunctionalTest):
"""
Test case which tests various invalid requests and makes sure they are correctly handled by
the jsexpose decorator.
"""
models = None
alias1 = None
@classmethod
def setUpClass(cls):
super(JsexposeDecoratorTestCase, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
def test_invalid_number_of_arguments_results_in_resource_not_found(self):
# Invalid path (additional path components after the id)
resp = self.app.get('/v1/actionalias/%s/some/more/args' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.json['faultstring'], 'The resource could not be found.')
def test_invalid_query_param_results_in_bad_request(self):
resp = self.app.get('/v1/actionalias/%s?invalid=arg' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.json['faultstring'], 'Unsupported query parameter: invalid')
|
<commit_before><commit_msg>Add some end to end tests for jsexpose decorator edge cases.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
__all__ = [
'JsexposeDecoratorTestCase'
]
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
class JsexposeDecoratorTestCase(FunctionalTest):
"""
Test case which tests various invalid requests and makes sure they are correctly handled by
the jsexpose decorator.
"""
models = None
alias1 = None
@classmethod
def setUpClass(cls):
super(JsexposeDecoratorTestCase, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
def test_invalid_number_of_arguments_results_in_resource_not_found(self):
# Invalid path (additional path components after the id)
resp = self.app.get('/v1/actionalias/%s/some/more/args' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.json['faultstring'], 'The resource could not be found.')
def test_invalid_query_param_results_in_bad_request(self):
resp = self.app.get('/v1/actionalias/%s?invalid=arg' % (self.alias1.id), expect_errors=True)
self.assertEqual(resp.status_int, 400)
self.assertEqual(resp.json['faultstring'], 'Unsupported query parameter: invalid')
|
|
6ef227b61d6455a03b8665f16e0934705ff611f2
|
scripts/make_stdlib_list.py
|
scripts/make_stdlib_list.py
|
# Generate list of modules in the standard distribution
import sys
import os
if(sys.version_info[0]!=3):
raise ValueError("This script must be run with Python 3")
brython_stdlib_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'src')
python_stdlib_folder = os.path.dirname(sys.executable)
print(python_stdlib_folder)
doc_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'doc')
with open(os.path.join(doc_folder,'stdlib_list.html'), 'w') as out:
out.write('<html><head><style>li {padding-left:20px;list-style-type:none;}</style>')
out.write('</head><body>')
out.write('<h1>Brython distribution</h1>\n<div style="padding-left:30px;">')
out.write('<table border=1>\n')
for dirpath, dirnames, filenames in os.walk(brython_stdlib_folder):
path = dirpath[len(brython_stdlib_folder)+1:]
python_path = os.path.join(python_stdlib_folder, path)
if path.startswith('Lib\\test'):
print(path, dirnames)
continue
if path:
valid = [f for f in filenames
if os.path.splitext(f)[1] not in ['.pyc']]
valid = [v for v in valid if v.startswith('_')] + \
[v for v in valid if not v.startswith('_')]
if valid:
common = [v for v in valid
if os.path.exists(os.path.join(python_path,v))]
brython_specific = [v for v in valid if not v in common]
if os.path.exists(python_path):
missing = [f for f in os.listdir(python_path)
if f!='__pycache__' and
os.path.isfile(os.path.join(python_path,f))
and not f in valid]
else:
missing = []
out.write('<tr><td valign="top">%s</td>\n' %path)
for files in common, brython_specific, missing:
out.write('<td style="vertical-align:top;">'+'\n<br>'.join(files)+'</td>\n')
out.write('</tr>\n')
out.write('</table>\n</div>\n</body>\n</html>')
|
Add script to generate a table of differences between Brython and CPython distributions
|
Add script to generate a table of differences between Brython and CPython distributions
|
Python
|
bsd-3-clause
|
Isendir/brython,JohnDenker/brython,molebot/brython,kikocorreoso/brython,Mozhuowen/brython,Mozhuowen/brython,olemis/brython,molebot/brython,Lh4cKg/brython,rubyinhell/brython,Lh4cKg/brython,Hasimir/brython,olemis/brython,kevinmel2000/brython,amrdraz/brython,amrdraz/brython,Lh4cKg/brython,jonathanverner/brython,Isendir/brython,brython-dev/brython,Hasimir/brython,JohnDenker/brython,rubyinhell/brython,kevinmel2000/brython,jonathanverner/brython,Hasimir/brython,kevinmel2000/brython,rubyinhell/brython,kevinmel2000/brython,Isendir/brython,molebot/brython,Mozhuowen/brython,jonathanverner/brython,Lh4cKg/brython,molebot/brython,brython-dev/brython,rubyinhell/brython,amrdraz/brython,olemis/brython,kikocorreoso/brython,amrdraz/brython,Mozhuowen/brython,JohnDenker/brython,kikocorreoso/brython,jonathanverner/brython,JohnDenker/brython,Hasimir/brython,Isendir/brython,brython-dev/brython,olemis/brython
|
Add script to generate a table of differences between Brython and CPython distributions
|
# Generate list of modules in the standard distribution
import sys
import os
if(sys.version_info[0]!=3):
raise ValueError("This script must be run with Python 3")
brython_stdlib_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'src')
python_stdlib_folder = os.path.dirname(sys.executable)
print(python_stdlib_folder)
doc_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'doc')
with open(os.path.join(doc_folder,'stdlib_list.html'), 'w') as out:
out.write('<html><head><style>li {padding-left:20px;list-style-type:none;}</style>')
out.write('</head><body>')
out.write('<h1>Brython distribution</h1>\n<div style="padding-left:30px;">')
out.write('<table border=1>\n')
for dirpath, dirnames, filenames in os.walk(brython_stdlib_folder):
path = dirpath[len(brython_stdlib_folder)+1:]
python_path = os.path.join(python_stdlib_folder, path)
if path.startswith('Lib\\test'):
print(path, dirnames)
continue
if path:
valid = [f for f in filenames
if os.path.splitext(f)[1] not in ['.pyc']]
valid = [v for v in valid if v.startswith('_')] + \
[v for v in valid if not v.startswith('_')]
if valid:
common = [v for v in valid
if os.path.exists(os.path.join(python_path,v))]
brython_specific = [v for v in valid if not v in common]
if os.path.exists(python_path):
missing = [f for f in os.listdir(python_path)
if f!='__pycache__' and
os.path.isfile(os.path.join(python_path,f))
and not f in valid]
else:
missing = []
out.write('<tr><td valign="top">%s</td>\n' %path)
for files in common, brython_specific, missing:
out.write('<td style="vertical-align:top;">'+'\n<br>'.join(files)+'</td>\n')
out.write('</tr>\n')
out.write('</table>\n</div>\n</body>\n</html>')
|
<commit_before><commit_msg>Add script to generate a table of differences between Brython and CPython distributions<commit_after>
|
# Generate list of modules in the standard distribution
import sys
import os
if(sys.version_info[0]!=3):
raise ValueError("This script must be run with Python 3")
brython_stdlib_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'src')
python_stdlib_folder = os.path.dirname(sys.executable)
print(python_stdlib_folder)
doc_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'doc')
with open(os.path.join(doc_folder,'stdlib_list.html'), 'w') as out:
out.write('<html><head><style>li {padding-left:20px;list-style-type:none;}</style>')
out.write('</head><body>')
out.write('<h1>Brython distribution</h1>\n<div style="padding-left:30px;">')
out.write('<table border=1>\n')
for dirpath, dirnames, filenames in os.walk(brython_stdlib_folder):
path = dirpath[len(brython_stdlib_folder)+1:]
python_path = os.path.join(python_stdlib_folder, path)
if path.startswith('Lib\\test'):
print(path, dirnames)
continue
if path:
valid = [f for f in filenames
if os.path.splitext(f)[1] not in ['.pyc']]
valid = [v for v in valid if v.startswith('_')] + \
[v for v in valid if not v.startswith('_')]
if valid:
common = [v for v in valid
if os.path.exists(os.path.join(python_path,v))]
brython_specific = [v for v in valid if not v in common]
if os.path.exists(python_path):
missing = [f for f in os.listdir(python_path)
if f!='__pycache__' and
os.path.isfile(os.path.join(python_path,f))
and not f in valid]
else:
missing = []
out.write('<tr><td valign="top">%s</td>\n' %path)
for files in common, brython_specific, missing:
out.write('<td style="vertical-align:top;">'+'\n<br>'.join(files)+'</td>\n')
out.write('</tr>\n')
out.write('</table>\n</div>\n</body>\n</html>')
|
Add script to generate a table of differences between Brython and CPython distributions# Generate list of modules in the standard distribution
import sys
import os
if(sys.version_info[0]!=3):
raise ValueError("This script must be run with Python 3")
brython_stdlib_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'src')
python_stdlib_folder = os.path.dirname(sys.executable)
print(python_stdlib_folder)
doc_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'doc')
with open(os.path.join(doc_folder,'stdlib_list.html'), 'w') as out:
out.write('<html><head><style>li {padding-left:20px;list-style-type:none;}</style>')
out.write('</head><body>')
out.write('<h1>Brython distribution</h1>\n<div style="padding-left:30px;">')
out.write('<table border=1>\n')
for dirpath, dirnames, filenames in os.walk(brython_stdlib_folder):
path = dirpath[len(brython_stdlib_folder)+1:]
python_path = os.path.join(python_stdlib_folder, path)
if path.startswith('Lib\\test'):
print(path, dirnames)
continue
if path:
valid = [f for f in filenames
if os.path.splitext(f)[1] not in ['.pyc']]
valid = [v for v in valid if v.startswith('_')] + \
[v for v in valid if not v.startswith('_')]
if valid:
common = [v for v in valid
if os.path.exists(os.path.join(python_path,v))]
brython_specific = [v for v in valid if not v in common]
if os.path.exists(python_path):
missing = [f for f in os.listdir(python_path)
if f!='__pycache__' and
os.path.isfile(os.path.join(python_path,f))
and not f in valid]
else:
missing = []
out.write('<tr><td valign="top">%s</td>\n' %path)
for files in common, brython_specific, missing:
out.write('<td style="vertical-align:top;">'+'\n<br>'.join(files)+'</td>\n')
out.write('</tr>\n')
out.write('</table>\n</div>\n</body>\n</html>')
|
<commit_before><commit_msg>Add script to generate a table of differences between Brython and CPython distributions<commit_after># Generate list of modules in the standard distribution
import sys
import os
if(sys.version_info[0]!=3):
raise ValueError("This script must be run with Python 3")
brython_stdlib_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'src')
python_stdlib_folder = os.path.dirname(sys.executable)
print(python_stdlib_folder)
doc_folder = os.path.join(os.path.dirname(os.getcwd()),
'www', 'doc')
with open(os.path.join(doc_folder,'stdlib_list.html'), 'w') as out:
out.write('<html><head><style>li {padding-left:20px;list-style-type:none;}</style>')
out.write('</head><body>')
out.write('<h1>Brython distribution</h1>\n<div style="padding-left:30px;">')
out.write('<table border=1>\n')
for dirpath, dirnames, filenames in os.walk(brython_stdlib_folder):
path = dirpath[len(brython_stdlib_folder)+1:]
python_path = os.path.join(python_stdlib_folder, path)
if path.startswith('Lib\\test'):
print(path, dirnames)
continue
if path:
valid = [f for f in filenames
if os.path.splitext(f)[1] not in ['.pyc']]
valid = [v for v in valid if v.startswith('_')] + \
[v for v in valid if not v.startswith('_')]
if valid:
common = [v for v in valid
if os.path.exists(os.path.join(python_path,v))]
brython_specific = [v for v in valid if not v in common]
if os.path.exists(python_path):
missing = [f for f in os.listdir(python_path)
if f!='__pycache__' and
os.path.isfile(os.path.join(python_path,f))
and not f in valid]
else:
missing = []
out.write('<tr><td valign="top">%s</td>\n' %path)
for files in common, brython_specific, missing:
out.write('<td style="vertical-align:top;">'+'\n<br>'.join(files)+'</td>\n')
out.write('</tr>\n')
out.write('</table>\n</div>\n</body>\n</html>')
|
|
2c164f6154e565225d593d82579975380bf1ed1f
|
scripts/klayout/scrotLayout.py
|
scripts/klayout/scrotLayout.py
|
import pya
import re
WIDTH = 2048
HEIGHT = 2048
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.save_image(input_layout+".png", WIDTH, HEIGHT)
app.exit(0)
|
Add a klayout script to screenshot layouts
|
Add a klayout script to screenshot layouts
|
Python
|
apache-2.0
|
efabless/openlane,efabless/openlane,efabless/openlane,efabless/openlane
|
Add a klayout script to screenshot layouts
|
import pya
import re
WIDTH = 2048
HEIGHT = 2048
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.save_image(input_layout+".png", WIDTH, HEIGHT)
app.exit(0)
|
<commit_before><commit_msg>Add a klayout script to screenshot layouts<commit_after>
|
import pya
import re
WIDTH = 2048
HEIGHT = 2048
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.save_image(input_layout+".png", WIDTH, HEIGHT)
app.exit(0)
|
Add a klayout script to screenshot layoutsimport pya
import re
WIDTH = 2048
HEIGHT = 2048
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.save_image(input_layout+".png", WIDTH, HEIGHT)
app.exit(0)
|
<commit_before><commit_msg>Add a klayout script to screenshot layouts<commit_after>import pya
import re
WIDTH = 2048
HEIGHT = 2048
app = pya.Application.instance()
win = app.main_window()
# Load technology file
tech = pya.Technology()
tech.load(tech_file)
layoutOptions = tech.load_layout_options
# Load def file in the main window
cell_view = win.load_layout(input_layout, layoutOptions, 0)
layout_view = cell_view.view()
layout_view.save_image(input_layout+".png", WIDTH, HEIGHT)
app.exit(0)
|
|
c636b18cf0164c4ffcd522b89d09e82f7da3ccc6
|
tests/unit/utils/test_jinja.py
|
tests/unit/utils/test_jinja.py
|
# -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.jinja
from tests.support.unit import TestCase
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = salt.utils.jinja.tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
|
Add unit test for tojson filter
|
Add unit test for tojson filter
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit test for tojson filter
|
# -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.jinja
from tests.support.unit import TestCase
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = salt.utils.jinja.tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
|
<commit_before><commit_msg>Add unit test for tojson filter<commit_after>
|
# -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.jinja
from tests.support.unit import TestCase
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = salt.utils.jinja.tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
|
Add unit test for tojson filter# -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.jinja
from tests.support.unit import TestCase
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = salt.utils.jinja.tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
|
<commit_before><commit_msg>Add unit test for tojson filter<commit_after># -*- coding: utf-8 -*-
'''
Tests for salt.utils.jinja
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
# Import Salt libs
import salt.utils.jinja
from tests.support.unit import TestCase
class JinjaTestCase(TestCase):
def test_tojson(self):
'''
Test the tojson filter for those using Jinja < 2.9. Non-ascii unicode
content should be dumped with ensure_ascii=True.
'''
data = {'Non-ascii words': ['süß', 'спам', 'яйца']}
result = salt.utils.jinja.tojson(data)
expected = '{"Non-ascii words": ["s\\u00fc\\u00df", "\\u0441\\u043f\\u0430\\u043c", "\\u044f\\u0439\\u0446\\u0430"]}'
assert result == expected, result
|
|
a24ba36d5341817bc0b95a1dcfc19ecfe8eb77ae
|
examples/readme3.py
|
examples/readme3.py
|
# this is a Python3 version of the code in readme.py
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks, ensureDeferred
from twisted.internet.endpoints import UNIXClientEndpoint
import treq
import txtorcon
async def main(reactor):
tor = await txtorcon.connect(
reactor,
UNIXClientEndpoint(reactor, "/var/run/tor/control")
)
print("Connected to Tor version {}".format(tor.version))
url = u'https://www.torproject.org:443'
print(u"Downloading {}".format(repr(url)))
resp = await treq.get(url, agent=tor.web_agent())
print(u" {} bytes".format(resp.length))
data = await resp.text()
print(u"Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
print(u"Creating a circuit")
state = await tor.create_state()
circ = await state.build_circuit()
await circ.when_built()
print(u" path: {}".format(" -> ".join([r.ip for r in circ.path])))
print(u"Downloading meejah's public key via above circuit...")
resp = await treq.get(
u'https://meejah.ca/meejah.asc',
agent=circ.web_agent(reactor, tor.config.socks_endpoint(reactor)),
)
data = await resp.text()
print(data)
@react
def _main(reactor):
return ensureDeferred(main(reactor))
|
Add Py3 version of readme code
|
Add Py3 version of readme code
|
Python
|
mit
|
meejah/txtorcon,meejah/txtorcon
|
Add Py3 version of readme code
|
# this is a Python3 version of the code in readme.py
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks, ensureDeferred
from twisted.internet.endpoints import UNIXClientEndpoint
import treq
import txtorcon
async def main(reactor):
tor = await txtorcon.connect(
reactor,
UNIXClientEndpoint(reactor, "/var/run/tor/control")
)
print("Connected to Tor version {}".format(tor.version))
url = u'https://www.torproject.org:443'
print(u"Downloading {}".format(repr(url)))
resp = await treq.get(url, agent=tor.web_agent())
print(u" {} bytes".format(resp.length))
data = await resp.text()
print(u"Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
print(u"Creating a circuit")
state = await tor.create_state()
circ = await state.build_circuit()
await circ.when_built()
print(u" path: {}".format(" -> ".join([r.ip for r in circ.path])))
print(u"Downloading meejah's public key via above circuit...")
resp = await treq.get(
u'https://meejah.ca/meejah.asc',
agent=circ.web_agent(reactor, tor.config.socks_endpoint(reactor)),
)
data = await resp.text()
print(data)
@react
def _main(reactor):
return ensureDeferred(main(reactor))
|
<commit_before><commit_msg>Add Py3 version of readme code<commit_after>
|
# this is a Python3 version of the code in readme.py
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks, ensureDeferred
from twisted.internet.endpoints import UNIXClientEndpoint
import treq
import txtorcon
async def main(reactor):
tor = await txtorcon.connect(
reactor,
UNIXClientEndpoint(reactor, "/var/run/tor/control")
)
print("Connected to Tor version {}".format(tor.version))
url = u'https://www.torproject.org:443'
print(u"Downloading {}".format(repr(url)))
resp = await treq.get(url, agent=tor.web_agent())
print(u" {} bytes".format(resp.length))
data = await resp.text()
print(u"Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
print(u"Creating a circuit")
state = await tor.create_state()
circ = await state.build_circuit()
await circ.when_built()
print(u" path: {}".format(" -> ".join([r.ip for r in circ.path])))
print(u"Downloading meejah's public key via above circuit...")
resp = await treq.get(
u'https://meejah.ca/meejah.asc',
agent=circ.web_agent(reactor, tor.config.socks_endpoint(reactor)),
)
data = await resp.text()
print(data)
@react
def _main(reactor):
return ensureDeferred(main(reactor))
|
Add Py3 version of readme code# this is a Python3 version of the code in readme.py
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks, ensureDeferred
from twisted.internet.endpoints import UNIXClientEndpoint
import treq
import txtorcon
async def main(reactor):
tor = await txtorcon.connect(
reactor,
UNIXClientEndpoint(reactor, "/var/run/tor/control")
)
print("Connected to Tor version {}".format(tor.version))
url = u'https://www.torproject.org:443'
print(u"Downloading {}".format(repr(url)))
resp = await treq.get(url, agent=tor.web_agent())
print(u" {} bytes".format(resp.length))
data = await resp.text()
print(u"Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
print(u"Creating a circuit")
state = await tor.create_state()
circ = await state.build_circuit()
await circ.when_built()
print(u" path: {}".format(" -> ".join([r.ip for r in circ.path])))
print(u"Downloading meejah's public key via above circuit...")
resp = await treq.get(
u'https://meejah.ca/meejah.asc',
agent=circ.web_agent(reactor, tor.config.socks_endpoint(reactor)),
)
data = await resp.text()
print(data)
@react
def _main(reactor):
return ensureDeferred(main(reactor))
|
<commit_before><commit_msg>Add Py3 version of readme code<commit_after># this is a Python3 version of the code in readme.py
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks, ensureDeferred
from twisted.internet.endpoints import UNIXClientEndpoint
import treq
import txtorcon
async def main(reactor):
tor = await txtorcon.connect(
reactor,
UNIXClientEndpoint(reactor, "/var/run/tor/control")
)
print("Connected to Tor version {}".format(tor.version))
url = u'https://www.torproject.org:443'
print(u"Downloading {}".format(repr(url)))
resp = await treq.get(url, agent=tor.web_agent())
print(u" {} bytes".format(resp.length))
data = await resp.text()
print(u"Got {} bytes:\n{}\n[...]{}".format(
len(data),
data[:120],
data[-120:],
))
print(u"Creating a circuit")
state = await tor.create_state()
circ = await state.build_circuit()
await circ.when_built()
print(u" path: {}".format(" -> ".join([r.ip for r in circ.path])))
print(u"Downloading meejah's public key via above circuit...")
resp = await treq.get(
u'https://meejah.ca/meejah.asc',
agent=circ.web_agent(reactor, tor.config.socks_endpoint(reactor)),
)
data = await resp.text()
print(data)
@react
def _main(reactor):
return ensureDeferred(main(reactor))
|
|
8f443e922a913c91f21b663065d6bdc7abc73672
|
api/migrations/0083_event_english_image.py
|
api/migrations/0083_event_english_image.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0082_card_round_card_idolized_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='english_image',
field=models.ImageField(null=True, upload_to=b'events/EN/', blank=True),
preserve_default=True,
),
]
|
Use EN banners for events when available
|
Use EN banners for events when available
|
Python
|
apache-2.0
|
dburr/SchoolIdolAPI,rdsathene/SchoolIdolAPI,rdsathene/SchoolIdolAPI,dburr/SchoolIdolAPI,dburr/SchoolIdolAPI,rdsathene/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI,SchoolIdolTomodachi/SchoolIdolAPI
|
Use EN banners for events when available
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0082_card_round_card_idolized_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='english_image',
field=models.ImageField(null=True, upload_to=b'events/EN/', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Use EN banners for events when available<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0082_card_round_card_idolized_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='english_image',
field=models.ImageField(null=True, upload_to=b'events/EN/', blank=True),
preserve_default=True,
),
]
|
Use EN banners for events when available# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0082_card_round_card_idolized_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='english_image',
field=models.ImageField(null=True, upload_to=b'events/EN/', blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Use EN banners for events when available<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0082_card_round_card_idolized_image'),
]
operations = [
migrations.AddField(
model_name='event',
name='english_image',
field=models.ImageField(null=True, upload_to=b'events/EN/', blank=True),
preserve_default=True,
),
]
|
|
90d02b4bfa2154ba71a8a05b6c2661cf792d062d
|
lang/py3/avro/tests/test_enum.py
|
lang/py3/avro/tests/test_enum.py
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Thete ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from avro import schema
class TestEnum(unittest.TestCase):
def testSymbolsInOrder(self):
enum = schema.EnumSchema('Test', '', ['A', 'B'], schema.Names(), '', {})
self.assertEqual('A', enum.symbols[0])
def testSymbolsInReverseOrder(self):
enum = schema.EnumSchema('Test', '', ['B', 'A'], schema.Names(), '', {})
self.assertEqual('B', enum.symbols[0])
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
Add missing file from AVRO-1673.
|
Add missing file from AVRO-1673.
git-svn-id: c07a28b12301a3ecdaa1125838bb8d44496e7ec7@1691208 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
djudd/avro,eonezhang/avro,massie/avro,st0nx/avro,ecatmur/avro,eonezhang/avro,dasch/avro,st0nx/avro,massie/avro,Romain-Geissler-1A/avro,restorando/avro,DrAA/avro,relateiq/avro,Romain-Geissler-1A/avro,eonezhang/avro,massie/avro,jmattbeal/avro,relateiq/avro,ecatmur/avro,Romain-Geissler-1A/avro,Yelp/avro,DrAA/avro,wojtkiewicz/avro,st0nx/avro,relateiq/avro,dasch/avro,apache/avro,st0nx/avro,relateiq/avro,restorando/avro,massie/avro,dasch/avro,wojtkiewicz/avro,restorando/avro,alexanderlz/avro,rdblue/avro-ruby,st0nx/avro,apache/avro,eonezhang/avro,DrAA/avro,dasch/avro,alexanderlz/avro,restorando/avro,massie/avro,yadavsaroj/avro,jmattbeal/avro,ecatmur/avro,jmattbeal/avro,eonezhang/avro,restorando/avro,alexanderlz/avro,bitbouncer/avro,eonezhang/avro,relateiq/avro,ecatmur/avro,apache/avro,jmattbeal/avro,st0nx/avro,restorando/avro,djudd/avro,Yelp/avro,DrAA/avro,rdblue/avro-ruby,ecatmur/avro,DrAA/avro,Romain-Geissler-1A/avro,alexanderlz/avro,dasch/avro,yadavsaroj/avro,ecatmur/avro,dasch/avro,relateiq/avro,Yelp/avro,Yelp/avro,dasch/avro,ecatmur/avro,bitbouncer/avro,bitbouncer/avro,st0nx/avro,alexanderlz/avro,massie/avro,wojtkiewicz/avro,DrAA/avro,Romain-Geissler-1A/avro,wojtkiewicz/avro,apache/avro,alexanderlz/avro,Yelp/avro,restorando/avro,djudd/avro,jmattbeal/avro,restorando/avro,ecatmur/avro,jmattbeal/avro,st0nx/avro,alexanderlz/avro,DrAA/avro,bitbouncer/avro,yadavsaroj/avro,bitbouncer/avro,Romain-Geissler-1A/avro,alexanderlz/avro,eonezhang/avro,apache/avro,relateiq/avro,ecatmur/avro,djudd/avro,DrAA/avro,relateiq/avro,yadavsaroj/avro,DrAA/avro,bitbouncer/avro,yadavsaroj/avro,restorando/avro,Yelp/avro,ecatmur/avro,alexanderlz/avro,wojtkiewicz/avro,Romain-Geissler-1A/avro,Romain-Geissler-1A/avro,djudd/avro,alexanderlz/avro,massie/avro,wojtkiewicz/avro,apache/avro,apache/avro,yadavsaroj/avro,wojtkiewicz/avro,relateiq/avro,eonezhang/avro,Romain-Geissler-1A/avro,Romain-Geissler-1A/avro,ecatmur/avro,apache/avro,relateiq/avro,apache/avro,Yelp/avro,wojtkiewicz/avro,DrAA/avro,massie/avro,jmattbeal/avro,eonezhang/avro,st0nx/avro,dasch/avro,DrAA/avro,Yelp/avro,eonezhang/avro,relateiq/avro,dasch/avro,bitbouncer/avro,bitbouncer/avro,relateiq/avro,djudd/avro,bitbouncer/avro,djudd/avro,bitbouncer/avro,DrAA/avro,dasch/avro,eonezhang/avro,yadavsaroj/avro,yadavsaroj/avro,restorando/avro,Romain-Geissler-1A/avro,alexanderlz/avro,jmattbeal/avro,st0nx/avro,djudd/avro,apache/avro,apache/avro,massie/avro,massie/avro,bitbouncer/avro,Yelp/avro,ecatmur/avro,djudd/avro,restorando/avro,Yelp/avro,yadavsaroj/avro,jmattbeal/avro,jmattbeal/avro,wojtkiewicz/avro,yadavsaroj/avro,apache/avro,djudd/avro,yadavsaroj/avro,dasch/avro,bitbouncer/avro,wojtkiewicz/avro,yadavsaroj/avro,djudd/avro,Yelp/avro,restorando/avro,eonezhang/avro,dasch/avro,Yelp/avro,jmattbeal/avro,wojtkiewicz/avro,Romain-Geissler-1A/avro,wojtkiewicz/avro,jmattbeal/avro,st0nx/avro,massie/avro,djudd/avro,massie/avro,st0nx/avro,alexanderlz/avro
|
Add missing file from AVRO-1673.
git-svn-id: c07a28b12301a3ecdaa1125838bb8d44496e7ec7@1691208 13f79535-47bb-0310-9956-ffa450edef68
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Thete ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from avro import schema
class TestEnum(unittest.TestCase):
def testSymbolsInOrder(self):
enum = schema.EnumSchema('Test', '', ['A', 'B'], schema.Names(), '', {})
self.assertEqual('A', enum.symbols[0])
def testSymbolsInReverseOrder(self):
enum = schema.EnumSchema('Test', '', ['B', 'A'], schema.Names(), '', {})
self.assertEqual('B', enum.symbols[0])
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
<commit_before><commit_msg>Add missing file from AVRO-1673.
git-svn-id: c07a28b12301a3ecdaa1125838bb8d44496e7ec7@1691208 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Thete ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from avro import schema
class TestEnum(unittest.TestCase):
def testSymbolsInOrder(self):
enum = schema.EnumSchema('Test', '', ['A', 'B'], schema.Names(), '', {})
self.assertEqual('A', enum.symbols[0])
def testSymbolsInReverseOrder(self):
enum = schema.EnumSchema('Test', '', ['B', 'A'], schema.Names(), '', {})
self.assertEqual('B', enum.symbols[0])
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
Add missing file from AVRO-1673.
git-svn-id: c07a28b12301a3ecdaa1125838bb8d44496e7ec7@1691208 13f79535-47bb-0310-9956-ffa450edef68#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Thete ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from avro import schema
class TestEnum(unittest.TestCase):
def testSymbolsInOrder(self):
enum = schema.EnumSchema('Test', '', ['A', 'B'], schema.Names(), '', {})
self.assertEqual('A', enum.symbols[0])
def testSymbolsInReverseOrder(self):
enum = schema.EnumSchema('Test', '', ['B', 'A'], schema.Names(), '', {})
self.assertEqual('B', enum.symbols[0])
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
<commit_before><commit_msg>Add missing file from AVRO-1673.
git-svn-id: c07a28b12301a3ecdaa1125838bb8d44496e7ec7@1691208 13f79535-47bb-0310-9956-ffa450edef68<commit_after>#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Thete ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from avro import schema
class TestEnum(unittest.TestCase):
def testSymbolsInOrder(self):
enum = schema.EnumSchema('Test', '', ['A', 'B'], schema.Names(), '', {})
self.assertEqual('A', enum.symbols[0])
def testSymbolsInReverseOrder(self):
enum = schema.EnumSchema('Test', '', ['B', 'A'], schema.Names(), '', {})
self.assertEqual('B', enum.symbols[0])
if __name__ == '__main__':
raise Exception('Use run_tests.py')
|
|
5deefa9392061c70ed9023fe1cad3f16e22a21a0
|
src/Scripts/tail-latency.py
|
src/Scripts/tail-latency.py
|
# Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipelineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
Add quick and dirty script to measure tail latency.
|
Add quick and dirty script to measure tail latency.
|
Python
|
mit
|
BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,danluu/BitFunnel,BitFunnel/BitFunnel
|
Add quick and dirty script to measure tail latency.
|
# Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipelineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
<commit_before><commit_msg>Add quick and dirty script to measure tail latency.<commit_after>
|
# Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipelineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
Add quick and dirty script to measure tail latency.# Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipelineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
<commit_before><commit_msg>Add quick and dirty script to measure tail latency.<commit_after># Note that this is not a valid measurement of tail latency. This uses the execution times we measure because they're convenient, but this does not include queueing time inside BitFunnel nor does it include head-of-line blocking queue waiting time on the queue into BitFunnel.
import csv
filename = "/tmp/QueryPipelineStatistics.csv"
times = []
with open(filename) as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['query',
'rows',
'matches',
'quadwords',
'cachelines',
'parse',
'plan',
'match']
for row in reader:
total_time = float(row[-1]) + float(row[-2]) + float(row[-3])
times.append(total_time)
times.sort(reverse=True)
idx_max = len(times) - 1
idx = [round(idx_max / 2),
round(idx_max / 10),
round(idx_max / 100),
round(idx_max / 1000),
0]
tails = [times[x] for x in idx]
print(tails)
|
|
16dc79b63d6e4b9b29dd238dd57544d13bc4d9b5
|
shuup/core/management/commands/shuup_fix_order_status_identifiers.py
|
shuup/core/management/commands/shuup_fix_order_status_identifiers.py
|
# This file is part of Shuup.
#
# Copyright (c) 2017, Anders Innovations. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.management.base import BaseCommand
from django.db.transaction import atomic
from shuup.core.models import OrderStatus, OrderStatusRole
class Command(BaseCommand):
@atomic
def handle(self, *args, **options):
data = [ # role, invalid_identifier, valid_identifier
(OrderStatusRole.PROCESSING, 'canceled', 'processing'),
(OrderStatusRole.COMPLETE, 'processing', 'complete'),
(OrderStatusRole.CANCELED, 'complete', 'canceled'),
]
to_post_process = []
for (role, invalid_identifier, valid_identifier) in data:
status = OrderStatus.objects.filter(
identifier=invalid_identifier, role=role).first()
if not status:
self.stdout.write("No changes to {} statuses".format(role))
continue
tmp_identifier = valid_identifier + '_tmp'
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
role, status.identifier, tmp_identifier))
status.identifier = tmp_identifier
status.save()
to_post_process.append(status)
for status in to_post_process:
new_identifier = status.identifier.replace('_tmp', '')
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
status.role, status.identifier, new_identifier))
status.identifier = new_identifier
status.save()
|
Add script for fixing OrderStatus identifiers
|
Add script for fixing OrderStatus identifiers
Add a tool to fix the identifiers of the OrderStatus objects, because
the default statuses might have been created with invalid identifiers
before the default status data was fixed in the previous commit.
|
Python
|
agpl-3.0
|
shoopio/shoop,shoopio/shoop,shoopio/shoop
|
Add script for fixing OrderStatus identifiers
Add a tool to fix the identifiers of the OrderStatus objects, because
the default statuses might have been created with invalid identifiers
before the default status data was fixed in the previous commit.
|
# This file is part of Shuup.
#
# Copyright (c) 2017, Anders Innovations. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.management.base import BaseCommand
from django.db.transaction import atomic
from shuup.core.models import OrderStatus, OrderStatusRole
class Command(BaseCommand):
@atomic
def handle(self, *args, **options):
data = [ # role, invalid_identifier, valid_identifier
(OrderStatusRole.PROCESSING, 'canceled', 'processing'),
(OrderStatusRole.COMPLETE, 'processing', 'complete'),
(OrderStatusRole.CANCELED, 'complete', 'canceled'),
]
to_post_process = []
for (role, invalid_identifier, valid_identifier) in data:
status = OrderStatus.objects.filter(
identifier=invalid_identifier, role=role).first()
if not status:
self.stdout.write("No changes to {} statuses".format(role))
continue
tmp_identifier = valid_identifier + '_tmp'
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
role, status.identifier, tmp_identifier))
status.identifier = tmp_identifier
status.save()
to_post_process.append(status)
for status in to_post_process:
new_identifier = status.identifier.replace('_tmp', '')
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
status.role, status.identifier, new_identifier))
status.identifier = new_identifier
status.save()
|
<commit_before><commit_msg>Add script for fixing OrderStatus identifiers
Add a tool to fix the identifiers of the OrderStatus objects, because
the default statuses might have been created with invalid identifiers
before the default status data was fixed in the previous commit.<commit_after>
|
# This file is part of Shuup.
#
# Copyright (c) 2017, Anders Innovations. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.management.base import BaseCommand
from django.db.transaction import atomic
from shuup.core.models import OrderStatus, OrderStatusRole
class Command(BaseCommand):
@atomic
def handle(self, *args, **options):
data = [ # role, invalid_identifier, valid_identifier
(OrderStatusRole.PROCESSING, 'canceled', 'processing'),
(OrderStatusRole.COMPLETE, 'processing', 'complete'),
(OrderStatusRole.CANCELED, 'complete', 'canceled'),
]
to_post_process = []
for (role, invalid_identifier, valid_identifier) in data:
status = OrderStatus.objects.filter(
identifier=invalid_identifier, role=role).first()
if not status:
self.stdout.write("No changes to {} statuses".format(role))
continue
tmp_identifier = valid_identifier + '_tmp'
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
role, status.identifier, tmp_identifier))
status.identifier = tmp_identifier
status.save()
to_post_process.append(status)
for status in to_post_process:
new_identifier = status.identifier.replace('_tmp', '')
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
status.role, status.identifier, new_identifier))
status.identifier = new_identifier
status.save()
|
Add script for fixing OrderStatus identifiers
Add a tool to fix the identifiers of the OrderStatus objects, because
the default statuses might have been created with invalid identifiers
before the default status data was fixed in the previous commit.# This file is part of Shuup.
#
# Copyright (c) 2017, Anders Innovations. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.management.base import BaseCommand
from django.db.transaction import atomic
from shuup.core.models import OrderStatus, OrderStatusRole
class Command(BaseCommand):
@atomic
def handle(self, *args, **options):
data = [ # role, invalid_identifier, valid_identifier
(OrderStatusRole.PROCESSING, 'canceled', 'processing'),
(OrderStatusRole.COMPLETE, 'processing', 'complete'),
(OrderStatusRole.CANCELED, 'complete', 'canceled'),
]
to_post_process = []
for (role, invalid_identifier, valid_identifier) in data:
status = OrderStatus.objects.filter(
identifier=invalid_identifier, role=role).first()
if not status:
self.stdout.write("No changes to {} statuses".format(role))
continue
tmp_identifier = valid_identifier + '_tmp'
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
role, status.identifier, tmp_identifier))
status.identifier = tmp_identifier
status.save()
to_post_process.append(status)
for status in to_post_process:
new_identifier = status.identifier.replace('_tmp', '')
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
status.role, status.identifier, new_identifier))
status.identifier = new_identifier
status.save()
|
<commit_before><commit_msg>Add script for fixing OrderStatus identifiers
Add a tool to fix the identifiers of the OrderStatus objects, because
the default statuses might have been created with invalid identifiers
before the default status data was fixed in the previous commit.<commit_after># This file is part of Shuup.
#
# Copyright (c) 2017, Anders Innovations. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.management.base import BaseCommand
from django.db.transaction import atomic
from shuup.core.models import OrderStatus, OrderStatusRole
class Command(BaseCommand):
@atomic
def handle(self, *args, **options):
data = [ # role, invalid_identifier, valid_identifier
(OrderStatusRole.PROCESSING, 'canceled', 'processing'),
(OrderStatusRole.COMPLETE, 'processing', 'complete'),
(OrderStatusRole.CANCELED, 'complete', 'canceled'),
]
to_post_process = []
for (role, invalid_identifier, valid_identifier) in data:
status = OrderStatus.objects.filter(
identifier=invalid_identifier, role=role).first()
if not status:
self.stdout.write("No changes to {} statuses".format(role))
continue
tmp_identifier = valid_identifier + '_tmp'
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
role, status.identifier, tmp_identifier))
status.identifier = tmp_identifier
status.save()
to_post_process.append(status)
for status in to_post_process:
new_identifier = status.identifier.replace('_tmp', '')
self.stdout.write(
"Updating identifier of {} status: {!r} -> {!r}".format(
status.role, status.identifier, new_identifier))
status.identifier = new_identifier
status.save()
|
|
9f8b0fe642e7900ac1c966ad365a5fe99456d23e
|
setup.py
|
setup.py
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
Include Python 3.4 and 3.5
|
Include Python 3.4 and 3.5
|
Python
|
mit
|
gazpachoking/jsonref
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
Include Python 3.4 and 3.5
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
<commit_before>from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
<commit_msg>Include Python 3.4 and 3.5<commit_after>
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
Include Python 3.4 and 3.5from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
<commit_before>from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
<commit_msg>Include Python 3.4 and 3.5<commit_after>from distutils.core import setup
from jsonref import __version__
with open("README.rst") as readme:
long_description = readme.read()
classifiers = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
setup(
name="jsonref",
version=__version__,
py_modules=["jsonref", "proxytypes"],
author="Chase Sterling",
author_email="chase.sterling@gmail.com",
classifiers=classifiers,
description="An implementation of JSON Reference for Python",
license="MIT",
long_description=long_description,
url="http://github.com/gazpachoking/jsonref",
)
|
d23409e27f70bf6811bd25663accf9524dce1436
|
cloudcaster/json2yaml.py
|
cloudcaster/json2yaml.py
|
#!/usr/bin/python
import json
import argparse
import yaml
from yaml import Loader, Dumper
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
conffile = open(args.file).read()
conf = json.loads(conffile)
print yaml.safe_dump(conf)
|
Add json to yaml conversion script
|
Add json to yaml conversion script
|
Python
|
bsd-2-clause
|
TouqeerHussain/ops,WrathOfChris/ops,WrathOfChris/ops,TouqeerHussain/ops
|
Add json to yaml conversion script
|
#!/usr/bin/python
import json
import argparse
import yaml
from yaml import Loader, Dumper
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
conffile = open(args.file).read()
conf = json.loads(conffile)
print yaml.safe_dump(conf)
|
<commit_before><commit_msg>Add json to yaml conversion script<commit_after>
|
#!/usr/bin/python
import json
import argparse
import yaml
from yaml import Loader, Dumper
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
conffile = open(args.file).read()
conf = json.loads(conffile)
print yaml.safe_dump(conf)
|
Add json to yaml conversion script#!/usr/bin/python
import json
import argparse
import yaml
from yaml import Loader, Dumper
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
conffile = open(args.file).read()
conf = json.loads(conffile)
print yaml.safe_dump(conf)
|
<commit_before><commit_msg>Add json to yaml conversion script<commit_after>#!/usr/bin/python
import json
import argparse
import yaml
from yaml import Loader, Dumper
from pprint import pprint
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
conffile = open(args.file).read()
conf = json.loads(conffile)
print yaml.safe_dump(conf)
|
|
904e53e15902f393af586bfac0276635a68714bd
|
alembic/versions/40f5c56adfee_add_users_provider_column.py
|
alembic/versions/40f5c56adfee_add_users_provider_column.py
|
"""added provider column to users table
Revision ID: 40f5c56adfee
Revises: 407f7f23156f
Create Date: 2016-02-02 22:19:49.329707
"""
# revision identifiers, used by Alembic.
revision = '40f5c56adfee'
down_revision = '407f7f23156f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('provider', sa.String(64), nullable=True))
def downgrade():
op.drop_column('users', 'provider')
|
Add file missing from 948063a
|
Add file missing from 948063a
|
Python
|
bsd-3-clause
|
yosinv/anyway,boazin/anyway,OmerSchechter/anyway,omerxx/anyway,hasadna/anyway,hasadna/anyway,omerxx/anyway,hasadna/anyway,omerxx/anyway,boazin/anyway,hasadna/anyway,OmerSchechter/anyway,yosinv/anyway,yosinv/anyway,OmerSchechter/anyway,boazin/anyway
|
Add file missing from 948063a
|
"""added provider column to users table
Revision ID: 40f5c56adfee
Revises: 407f7f23156f
Create Date: 2016-02-02 22:19:49.329707
"""
# revision identifiers, used by Alembic.
revision = '40f5c56adfee'
down_revision = '407f7f23156f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('provider', sa.String(64), nullable=True))
def downgrade():
op.drop_column('users', 'provider')
|
<commit_before><commit_msg>Add file missing from 948063a<commit_after>
|
"""added provider column to users table
Revision ID: 40f5c56adfee
Revises: 407f7f23156f
Create Date: 2016-02-02 22:19:49.329707
"""
# revision identifiers, used by Alembic.
revision = '40f5c56adfee'
down_revision = '407f7f23156f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('provider', sa.String(64), nullable=True))
def downgrade():
op.drop_column('users', 'provider')
|
Add file missing from 948063a"""added provider column to users table
Revision ID: 40f5c56adfee
Revises: 407f7f23156f
Create Date: 2016-02-02 22:19:49.329707
"""
# revision identifiers, used by Alembic.
revision = '40f5c56adfee'
down_revision = '407f7f23156f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('provider', sa.String(64), nullable=True))
def downgrade():
op.drop_column('users', 'provider')
|
<commit_before><commit_msg>Add file missing from 948063a<commit_after>"""added provider column to users table
Revision ID: 40f5c56adfee
Revises: 407f7f23156f
Create Date: 2016-02-02 22:19:49.329707
"""
# revision identifiers, used by Alembic.
revision = '40f5c56adfee'
down_revision = '407f7f23156f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('users', sa.Column('provider', sa.String(64), nullable=True))
def downgrade():
op.drop_column('users', 'provider')
|
|
f7c02fa02f8b4843e62c18ded7600fb77dc3c002
|
sar_util.py
|
sar_util.py
|
###########################################
#
# A basic search and replace on a text file
#
###########################################
import sys
from operator import xor
# add strings to replace here
replace_map = {'#"cwd" : "{TR_TESTSUITE_DIR}",': '"cwd" : "../../thrust/internal/test",'}
# searches and replaces in place, returns description and status
def search_and_replace(filename, search=None, replace=None):
if xor(bool(search), bool(replace)):
return "[search] [replace] should both be present", 1
# read all the data in the file to a string
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as e:
return "Error: {0}".format(e), 1
# search and replace
try:
if search and replace:
data = data.replace(search, replace)
else:
for k in replace_map:
data = data.replace(k, replace_map[k])
except Exception as e:
return "Error: {0}".format(e), 1
# write new string to file
try:
with open(filename, 'w') as f:
f.write(data)
except Exception as e:
return "Error: {0}".format(e), 1
return "Replace successful", 0
# validates params and calls search and replace
def main():
# validate the number of arguments
if len(sys.argv) == 4:
text, status = search_and_replace(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 2:
text, status = search_and_replace(sys.argv[1])
else:
text, status = "Command Format: python sar_utility <filename> [search] [replace]", 1
print text
sys.exit(status)
if __name__ == "__main__":
main()
|
Add search and replace python script
|
Add search and replace python script
[git-p4: depot-paths = "//sw/gpgpu/thrust/": change = 23090677]
|
Python
|
apache-2.0
|
thrust/thrust,andrewcorrigan/thrust-multi-permutation-iterator,jaredhoberock/thrust,thrust/thrust,andrewcorrigan/thrust-multi-permutation-iterator,jaredhoberock/thrust,jaredhoberock/thrust,andrewcorrigan/thrust-multi-permutation-iterator,thrust/thrust,jaredhoberock/thrust,jaredhoberock/thrust,thrust/thrust,thrust/thrust
|
Add search and replace python script
[git-p4: depot-paths = "//sw/gpgpu/thrust/": change = 23090677]
|
###########################################
#
# A basic search and replace on a text file
#
###########################################
import sys
from operator import xor
# add strings to replace here
replace_map = {'#"cwd" : "{TR_TESTSUITE_DIR}",': '"cwd" : "../../thrust/internal/test",'}
# searches and replaces in place, returns description and status
def search_and_replace(filename, search=None, replace=None):
if xor(bool(search), bool(replace)):
return "[search] [replace] should both be present", 1
# read all the data in the file to a string
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as e:
return "Error: {0}".format(e), 1
# search and replace
try:
if search and replace:
data = data.replace(search, replace)
else:
for k in replace_map:
data = data.replace(k, replace_map[k])
except Exception as e:
return "Error: {0}".format(e), 1
# write new string to file
try:
with open(filename, 'w') as f:
f.write(data)
except Exception as e:
return "Error: {0}".format(e), 1
return "Replace successful", 0
# validates params and calls search and replace
def main():
# validate the number of arguments
if len(sys.argv) == 4:
text, status = search_and_replace(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 2:
text, status = search_and_replace(sys.argv[1])
else:
text, status = "Command Format: python sar_utility <filename> [search] [replace]", 1
print text
sys.exit(status)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add search and replace python script
[git-p4: depot-paths = "//sw/gpgpu/thrust/": change = 23090677]<commit_after>
|
###########################################
#
# A basic search and replace on a text file
#
###########################################
import sys
from operator import xor
# add strings to replace here
replace_map = {'#"cwd" : "{TR_TESTSUITE_DIR}",': '"cwd" : "../../thrust/internal/test",'}
# searches and replaces in place, returns description and status
def search_and_replace(filename, search=None, replace=None):
if xor(bool(search), bool(replace)):
return "[search] [replace] should both be present", 1
# read all the data in the file to a string
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as e:
return "Error: {0}".format(e), 1
# search and replace
try:
if search and replace:
data = data.replace(search, replace)
else:
for k in replace_map:
data = data.replace(k, replace_map[k])
except Exception as e:
return "Error: {0}".format(e), 1
# write new string to file
try:
with open(filename, 'w') as f:
f.write(data)
except Exception as e:
return "Error: {0}".format(e), 1
return "Replace successful", 0
# validates params and calls search and replace
def main():
# validate the number of arguments
if len(sys.argv) == 4:
text, status = search_and_replace(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 2:
text, status = search_and_replace(sys.argv[1])
else:
text, status = "Command Format: python sar_utility <filename> [search] [replace]", 1
print text
sys.exit(status)
if __name__ == "__main__":
main()
|
Add search and replace python script
[git-p4: depot-paths = "//sw/gpgpu/thrust/": change = 23090677]###########################################
#
# A basic search and replace on a text file
#
###########################################
import sys
from operator import xor
# add strings to replace here
replace_map = {'#"cwd" : "{TR_TESTSUITE_DIR}",': '"cwd" : "../../thrust/internal/test",'}
# searches and replaces in place, returns description and status
def search_and_replace(filename, search=None, replace=None):
if xor(bool(search), bool(replace)):
return "[search] [replace] should both be present", 1
# read all the data in the file to a string
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as e:
return "Error: {0}".format(e), 1
# search and replace
try:
if search and replace:
data = data.replace(search, replace)
else:
for k in replace_map:
data = data.replace(k, replace_map[k])
except Exception as e:
return "Error: {0}".format(e), 1
# write new string to file
try:
with open(filename, 'w') as f:
f.write(data)
except Exception as e:
return "Error: {0}".format(e), 1
return "Replace successful", 0
# validates params and calls search and replace
def main():
# validate the number of arguments
if len(sys.argv) == 4:
text, status = search_and_replace(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 2:
text, status = search_and_replace(sys.argv[1])
else:
text, status = "Command Format: python sar_utility <filename> [search] [replace]", 1
print text
sys.exit(status)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add search and replace python script
[git-p4: depot-paths = "//sw/gpgpu/thrust/": change = 23090677]<commit_after>###########################################
#
# A basic search and replace on a text file
#
###########################################
import sys
from operator import xor
# add strings to replace here
replace_map = {'#"cwd" : "{TR_TESTSUITE_DIR}",': '"cwd" : "../../thrust/internal/test",'}
# searches and replaces in place, returns description and status
def search_and_replace(filename, search=None, replace=None):
if xor(bool(search), bool(replace)):
return "[search] [replace] should both be present", 1
# read all the data in the file to a string
try:
with open(filename, 'r') as f:
data = f.read()
except Exception as e:
return "Error: {0}".format(e), 1
# search and replace
try:
if search and replace:
data = data.replace(search, replace)
else:
for k in replace_map:
data = data.replace(k, replace_map[k])
except Exception as e:
return "Error: {0}".format(e), 1
# write new string to file
try:
with open(filename, 'w') as f:
f.write(data)
except Exception as e:
return "Error: {0}".format(e), 1
return "Replace successful", 0
# validates params and calls search and replace
def main():
# validate the number of arguments
if len(sys.argv) == 4:
text, status = search_and_replace(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 2:
text, status = search_and_replace(sys.argv[1])
else:
text, status = "Command Format: python sar_utility <filename> [search] [replace]", 1
print text
sys.exit(status)
if __name__ == "__main__":
main()
|
|
e4ca5f66b8f1770fd496e11c2e03053e3e85592e
|
tests/flask_test.py
|
tests/flask_test.py
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import redmill
import database_test
class FlaskTest(database_test.DatabaseTest):
def setUp(self):
database_test.DatabaseTest.setUp(self)
self.app = redmill.app.test_client()
def _get_response(self, method, url, *args, **kwargs):
response = getattr(self.app, method)(url, *args, **kwargs)
if response.status_code/100 not in [4,5]:
if response.data:
data = json.loads(response.data)
else:
data = None
else:
data = response.data
return response.status_code, response.headers, data
|
Add base class for Flask tests
|
Add base class for Flask tests
|
Python
|
agpl-3.0
|
lamyj/redmill,lamyj/redmill,lamyj/redmill
|
Add base class for Flask tests
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import redmill
import database_test
class FlaskTest(database_test.DatabaseTest):
def setUp(self):
database_test.DatabaseTest.setUp(self)
self.app = redmill.app.test_client()
def _get_response(self, method, url, *args, **kwargs):
response = getattr(self.app, method)(url, *args, **kwargs)
if response.status_code/100 not in [4,5]:
if response.data:
data = json.loads(response.data)
else:
data = None
else:
data = response.data
return response.status_code, response.headers, data
|
<commit_before><commit_msg>Add base class for Flask tests<commit_after>
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import redmill
import database_test
class FlaskTest(database_test.DatabaseTest):
def setUp(self):
database_test.DatabaseTest.setUp(self)
self.app = redmill.app.test_client()
def _get_response(self, method, url, *args, **kwargs):
response = getattr(self.app, method)(url, *args, **kwargs)
if response.status_code/100 not in [4,5]:
if response.data:
data = json.loads(response.data)
else:
data = None
else:
data = response.data
return response.status_code, response.headers, data
|
Add base class for Flask tests# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import redmill
import database_test
class FlaskTest(database_test.DatabaseTest):
def setUp(self):
database_test.DatabaseTest.setUp(self)
self.app = redmill.app.test_client()
def _get_response(self, method, url, *args, **kwargs):
response = getattr(self.app, method)(url, *args, **kwargs)
if response.status_code/100 not in [4,5]:
if response.data:
data = json.loads(response.data)
else:
data = None
else:
data = response.data
return response.status_code, response.headers, data
|
<commit_before><commit_msg>Add base class for Flask tests<commit_after># This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import json
import redmill
import database_test
class FlaskTest(database_test.DatabaseTest):
def setUp(self):
database_test.DatabaseTest.setUp(self)
self.app = redmill.app.test_client()
def _get_response(self, method, url, *args, **kwargs):
response = getattr(self.app, method)(url, *args, **kwargs)
if response.status_code/100 not in [4,5]:
if response.data:
data = json.loads(response.data)
else:
data = None
else:
data = response.data
return response.status_code, response.headers, data
|
|
e56278f00f38b199cfaba91d5dc904a2be44b944
|
fortrantools.py
|
fortrantools.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
def str_complex(c, kindstr=''):
"""Converts the complex number `c` to a string in Fortran-format, i.e.
(Re c, Im c). If c is iterable, it returns a string of the form
[(Re c_1, Im c_1), ...].
:c: Number/Iterable to print
:returns: String in Fortran format
>>> str_complex(1)
(1.0, 0.0)
>>> str_complex(np.array([1.j, 1]))
[(0.0, 1.0), (1.0, 0.0)]
>>> str_complex(1, kindstr='_dp')
(1.0_dp, 0.0_dp)
>>> str_complex(np.array([1.j, 1]), kindstr='_sp')
[(0.0_sp, 1.0_sp), (1.0_sp, 0.0_sp)]
"""
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Add converter from complex to Fortran-formated string
|
Add converter from complex to Fortran-formated string
|
Python
|
unlicense
|
dseuss/pythonlibs
|
Add converter from complex to Fortran-formated string
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
def str_complex(c, kindstr=''):
"""Converts the complex number `c` to a string in Fortran-format, i.e.
(Re c, Im c). If c is iterable, it returns a string of the form
[(Re c_1, Im c_1), ...].
:c: Number/Iterable to print
:returns: String in Fortran format
>>> str_complex(1)
(1.0, 0.0)
>>> str_complex(np.array([1.j, 1]))
[(0.0, 1.0), (1.0, 0.0)]
>>> str_complex(1, kindstr='_dp')
(1.0_dp, 0.0_dp)
>>> str_complex(np.array([1.j, 1]), kindstr='_sp')
[(0.0_sp, 1.0_sp), (1.0_sp, 0.0_sp)]
"""
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Add converter from complex to Fortran-formated string<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
def str_complex(c, kindstr=''):
"""Converts the complex number `c` to a string in Fortran-format, i.e.
(Re c, Im c). If c is iterable, it returns a string of the form
[(Re c_1, Im c_1), ...].
:c: Number/Iterable to print
:returns: String in Fortran format
>>> str_complex(1)
(1.0, 0.0)
>>> str_complex(np.array([1.j, 1]))
[(0.0, 1.0), (1.0, 0.0)]
>>> str_complex(1, kindstr='_dp')
(1.0_dp, 0.0_dp)
>>> str_complex(np.array([1.j, 1]), kindstr='_sp')
[(0.0_sp, 1.0_sp), (1.0_sp, 0.0_sp)]
"""
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Add converter from complex to Fortran-formated string#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
def str_complex(c, kindstr=''):
"""Converts the complex number `c` to a string in Fortran-format, i.e.
(Re c, Im c). If c is iterable, it returns a string of the form
[(Re c_1, Im c_1), ...].
:c: Number/Iterable to print
:returns: String in Fortran format
>>> str_complex(1)
(1.0, 0.0)
>>> str_complex(np.array([1.j, 1]))
[(0.0, 1.0), (1.0, 0.0)]
>>> str_complex(1, kindstr='_dp')
(1.0_dp, 0.0_dp)
>>> str_complex(np.array([1.j, 1]), kindstr='_sp')
[(0.0_sp, 1.0_sp), (1.0_sp, 0.0_sp)]
"""
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Add converter from complex to Fortran-formated string<commit_after>#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
def str_complex(c, kindstr=''):
"""Converts the complex number `c` to a string in Fortran-format, i.e.
(Re c, Im c). If c is iterable, it returns a string of the form
[(Re c_1, Im c_1), ...].
:c: Number/Iterable to print
:returns: String in Fortran format
>>> str_complex(1)
(1.0, 0.0)
>>> str_complex(np.array([1.j, 1]))
[(0.0, 1.0), (1.0, 0.0)]
>>> str_complex(1, kindstr='_dp')
(1.0_dp, 0.0_dp)
>>> str_complex(np.array([1.j, 1]), kindstr='_sp')
[(0.0_sp, 1.0_sp), (1.0_sp, 0.0_sp)]
"""
if hasattr(c, '__iter__'):
return '[' + ', '.join([str_complex(c_i, kindstr) for c_i in c]) + ']'
else:
c = complex(c)
return '({}{}, {}{})'.format(c.real, kindstr, c.imag, kindstr)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
772e590605971ee9c165e4ffdfec844edaf017f8
|
content/test/gpu/gpu_tests/gpu_rasterization_expectations.py
|
content/test/gpu/gpu_tests/gpu_rasterization_expectations.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Flaky on Mac Retina tryservers.
self.Fail('GpuRasterization.BlueBox',
['mac', ('nvidia', 0x0fe9)], bug=466690)
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
Remove suppression of GpuRasterization.BlueBox on Mac
|
Remove suppression of GpuRasterization.BlueBox on Mac
The regressing CLs have been reverted.
BUG=466690
Review URL: https://codereview.chromium.org/1014503005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#320789}
|
Python
|
bsd-3-clause
|
Just-D/chromium-1,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,ltilve/chromium,ltilve/chromium,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Flaky on Mac Retina tryservers.
self.Fail('GpuRasterization.BlueBox',
['mac', ('nvidia', 0x0fe9)], bug=466690)
Remove suppression of GpuRasterization.BlueBox on Mac
The regressing CLs have been reverted.
BUG=466690
Review URL: https://codereview.chromium.org/1014503005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#320789}
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Flaky on Mac Retina tryservers.
self.Fail('GpuRasterization.BlueBox',
['mac', ('nvidia', 0x0fe9)], bug=466690)
<commit_msg>Remove suppression of GpuRasterization.BlueBox on Mac
The regressing CLs have been reverted.
BUG=466690
Review URL: https://codereview.chromium.org/1014503005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#320789}<commit_after>
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Flaky on Mac Retina tryservers.
self.Fail('GpuRasterization.BlueBox',
['mac', ('nvidia', 0x0fe9)], bug=466690)
Remove suppression of GpuRasterization.BlueBox on Mac
The regressing CLs have been reverted.
BUG=466690
Review URL: https://codereview.chromium.org/1014503005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#320789}# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
<commit_before># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Flaky on Mac Retina tryservers.
self.Fail('GpuRasterization.BlueBox',
['mac', ('nvidia', 0x0fe9)], bug=466690)
<commit_msg>Remove suppression of GpuRasterization.BlueBox on Mac
The regressing CLs have been reverted.
BUG=466690
Review URL: https://codereview.chromium.org/1014503005
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#320789}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class GpuRasterizationExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('GpuRasterization.BlueBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
pass
|
e7fb646c5be44762e9f61c2927afbd565e72fc77
|
python/one-offs/import-toutv.py
|
python/one-offs/import-toutv.py
|
#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
Add script to import already downloaded videos into data file
|
Add script to import already downloaded videos into data file
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script to import already downloaded videos into data file
|
#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to import already downloaded videos into data file<commit_after>
|
#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
Add script to import already downloaded videos into data file#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to import already downloaded videos into data file<commit_after>#!/usr/bin/env python
import os
import os.path
import re
import sys
# Ugly hack so we can import pytoutv_plus
lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'video-tools'))
sys.path.append(lib_path)
import pytoutv_plus
def main():
filenames = []
for (dirpath, dirnames, dirfilenames) in os.walk(os.getcwd()):
if dirfilenames != []:
filenames.extend(dirfilenames)
data = pytoutv_plus.Data()
filename_chars = 'àÀâÂçÇéÉèÈêÊëîÎôÔ\w\-\'\.\(\)'
pattern = re.compile('([{0}]+)\.(S([\d]+)E[\d]+)\.([{0}]+)\.([\d]+)kbps\.ts'.format(filename_chars))
for filename in filenames:
# Skip anything not ending in .ts
if not filename.endswith('.ts'):
continue
match = pattern.search(filename)
if match:
emission_title = match.group(1).replace('.', ' ')
episode_sae = match.group(2)
episode_season = match.group(3)
episode_title = match.group(4).replace('.', ' ')
episode_bitrate = int(match.group(5)) * 1000
else:
sys.stderr.write('Warning: no match for file {}\n'.format(filename))
# Go to the next file
continue
for emission in data.emissions:
if emission_title.lower() == emission.title.lower():
break
else:
sys.stderr.write('Warning: no match for emission {}\n'.format(emission_title))
# Go to the next file
continue
for episode in emission.episodes:
if episode_title.lower() == episode.title.lower():
print('Skipping {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
# Episode match, go to next file
break
else:
# If we've had an emission match but no episode match, add the episode to the emission
print('Importing {} - {} - {}'.format(
emission_title,
episode_sae,
episode_title))
ep = pytoutv_plus.Episode()
ep.title = episode_title
ep.bitrate = episode_bitrate
emission.episodes.append(ep)
data.write()
if __name__ == '__main__':
main()
|
|
22aaeccd973393be33f330a90757396554efc6eb
|
tasks/cusip/python/cuisp.py
|
tasks/cusip/python/cuisp.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cusip_check(cusip: str) -> bool:
if len(cusip) != 9:
raise ValueError('CUSIP must be 9 characters')
cusip = cusip.upper()
total = 0
for i in range(8):
c = cusip[i]
if c.isdigit():
v = int(c)
elif c.isalpha():
p = ord(c) - ord('A') + 1
v = p + 9
elif c == '*':
v = 36
elif c == '@':
v = 37
elif c == '#':
v = 38
if i % 2 != 0:
v *= 2
total += int(v / 10) + v % 10
check = (10 - (total % 10)) % 10
return str(check) == cusip[-1]
if __name__ == '__main__':
codes = [
'037833100',
'17275R102',
'38259P508',
'594918104',
'68389X106',
'68389X105'
]
for code in codes:
print(f'{code} -> {cusip_check(code)}')
|
Implement CUSIP task in Python
|
Implement CUSIP task in Python
|
Python
|
unlicense
|
mattmahn/rosetta-code,mattmahn/rosetta-code
|
Implement CUSIP task in Python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cusip_check(cusip: str) -> bool:
if len(cusip) != 9:
raise ValueError('CUSIP must be 9 characters')
cusip = cusip.upper()
total = 0
for i in range(8):
c = cusip[i]
if c.isdigit():
v = int(c)
elif c.isalpha():
p = ord(c) - ord('A') + 1
v = p + 9
elif c == '*':
v = 36
elif c == '@':
v = 37
elif c == '#':
v = 38
if i % 2 != 0:
v *= 2
total += int(v / 10) + v % 10
check = (10 - (total % 10)) % 10
return str(check) == cusip[-1]
if __name__ == '__main__':
codes = [
'037833100',
'17275R102',
'38259P508',
'594918104',
'68389X106',
'68389X105'
]
for code in codes:
print(f'{code} -> {cusip_check(code)}')
|
<commit_before><commit_msg>Implement CUSIP task in Python<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cusip_check(cusip: str) -> bool:
if len(cusip) != 9:
raise ValueError('CUSIP must be 9 characters')
cusip = cusip.upper()
total = 0
for i in range(8):
c = cusip[i]
if c.isdigit():
v = int(c)
elif c.isalpha():
p = ord(c) - ord('A') + 1
v = p + 9
elif c == '*':
v = 36
elif c == '@':
v = 37
elif c == '#':
v = 38
if i % 2 != 0:
v *= 2
total += int(v / 10) + v % 10
check = (10 - (total % 10)) % 10
return str(check) == cusip[-1]
if __name__ == '__main__':
codes = [
'037833100',
'17275R102',
'38259P508',
'594918104',
'68389X106',
'68389X105'
]
for code in codes:
print(f'{code} -> {cusip_check(code)}')
|
Implement CUSIP task in Python#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cusip_check(cusip: str) -> bool:
if len(cusip) != 9:
raise ValueError('CUSIP must be 9 characters')
cusip = cusip.upper()
total = 0
for i in range(8):
c = cusip[i]
if c.isdigit():
v = int(c)
elif c.isalpha():
p = ord(c) - ord('A') + 1
v = p + 9
elif c == '*':
v = 36
elif c == '@':
v = 37
elif c == '#':
v = 38
if i % 2 != 0:
v *= 2
total += int(v / 10) + v % 10
check = (10 - (total % 10)) % 10
return str(check) == cusip[-1]
if __name__ == '__main__':
codes = [
'037833100',
'17275R102',
'38259P508',
'594918104',
'68389X106',
'68389X105'
]
for code in codes:
print(f'{code} -> {cusip_check(code)}')
|
<commit_before><commit_msg>Implement CUSIP task in Python<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import math
def cusip_check(cusip: str) -> bool:
if len(cusip) != 9:
raise ValueError('CUSIP must be 9 characters')
cusip = cusip.upper()
total = 0
for i in range(8):
c = cusip[i]
if c.isdigit():
v = int(c)
elif c.isalpha():
p = ord(c) - ord('A') + 1
v = p + 9
elif c == '*':
v = 36
elif c == '@':
v = 37
elif c == '#':
v = 38
if i % 2 != 0:
v *= 2
total += int(v / 10) + v % 10
check = (10 - (total % 10)) % 10
return str(check) == cusip[-1]
if __name__ == '__main__':
codes = [
'037833100',
'17275R102',
'38259P508',
'594918104',
'68389X106',
'68389X105'
]
for code in codes:
print(f'{code} -> {cusip_check(code)}')
|
|
e5310f1147694992d1eb48a037261f805955294c
|
tests/unit/forms/widget_tests.py
|
tests/unit/forms/widget_tests.py
|
import nose
from oscar.forms import widgets
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
|
import nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
|
Adjust test generator to make tests pass
|
Adjust test generator to make tests pass
|
Python
|
bsd-3-clause
|
amirrpp/django-oscar,Jannes123/django-oscar,jmt4/django-oscar,marcoantoniooliveira/labweb,taedori81/django-oscar,kapt/django-oscar,josesanch/django-oscar,ademuk/django-oscar,pasqualguerrero/django-oscar,dongguangming/django-oscar,amirrpp/django-oscar,pdonadeo/django-oscar,nickpack/django-oscar,bnprk/django-oscar,jlmadurga/django-oscar,ka7eh/django-oscar,ahmetdaglarbas/e-commerce,MatthewWilkes/django-oscar,nfletton/django-oscar,kapari/django-oscar,jmt4/django-oscar,DrOctogon/unwash_ecom,john-parton/django-oscar,lijoantony/django-oscar,vovanbo/django-oscar,django-oscar/django-oscar,elliotthill/django-oscar,manevant/django-oscar,saadatqadri/django-oscar,binarydud/django-oscar,Idematica/django-oscar,kapari/django-oscar,adamend/django-oscar,lijoantony/django-oscar,spartonia/django-oscar,DrOctogon/unwash_ecom,jinnykoo/wuyisj,Idematica/django-oscar,monikasulik/django-oscar,ahmetdaglarbas/e-commerce,adamend/django-oscar,jmt4/django-oscar,kapt/django-oscar,machtfit/django-oscar,itbabu/django-oscar,mexeniz/django-oscar,Bogh/django-oscar,dongguangming/django-oscar,lijoantony/django-oscar,saadatqadri/django-oscar,WillisXChen/django-oscar,pdonadeo/django-oscar,manevant/django-oscar,pasqualguerrero/django-oscar,monikasulik/django-oscar,pdonadeo/django-oscar,adamend/django-oscar,bschuon/django-oscar,okfish/django-oscar,faratro/django-oscar,bschuon/django-oscar,faratro/django-oscar,QLGu/django-oscar,eddiep1101/django-oscar,jinnykoo/wuyisj.com,faratro/django-oscar,okfish/django-oscar,ka7eh/django-oscar,rocopartners/django-oscar,WadeYuChen/django-oscar,Idematica/django-oscar,josesanch/django-oscar,marcoantoniooliveira/labweb,WillisXChen/django-oscar,pasqualguerrero/django-oscar,faratro/django-oscar,binarydud/django-oscar,WillisXChen/django-oscar,solarissmoke/django-oscar,WadeYuChen/django-oscar,sasha0/django-oscar,django-oscar/django-oscar,rocopartners/django-oscar,rocopartners/django-oscar,bnprk/django-oscar,binarydud/django-oscar,okfish/django-oscar,jinnykoo/wuyisj.com,MatthewWilkes/django-oscar,eddiep1101/django-oscar,makielab/django-oscar,solarissmoke/django-oscar,lijoantony/django-oscar,rocopartners/django-oscar,spartonia/django-oscar,nfletton/django-oscar,QLGu/django-oscar,spartonia/django-oscar,saadatqadri/django-oscar,WillisXChen/django-oscar,jinnykoo/wuyisj.com,django-oscar/django-oscar,makielab/django-oscar,nickpack/django-oscar,Bogh/django-oscar,taedori81/django-oscar,thechampanurag/django-oscar,sonofatailor/django-oscar,jinnykoo/wuyisj,dongguangming/django-oscar,solarissmoke/django-oscar,mexeniz/django-oscar,jinnykoo/wuyisj,spartonia/django-oscar,josesanch/django-oscar,bschuon/django-oscar,marcoantoniooliveira/labweb,saadatqadri/django-oscar,jinnykoo/christmas,vovanbo/django-oscar,Jannes123/django-oscar,nfletton/django-oscar,binarydud/django-oscar,kapari/django-oscar,jlmadurga/django-oscar,amirrpp/django-oscar,thechampanurag/django-oscar,elliotthill/django-oscar,michaelkuty/django-oscar,amirrpp/django-oscar,jinnykoo/christmas,nickpack/django-oscar,pasqualguerrero/django-oscar,nfletton/django-oscar,solarissmoke/django-oscar,anentropic/django-oscar,django-oscar/django-oscar,bschuon/django-oscar,makielab/django-oscar,anentropic/django-oscar,QLGu/django-oscar,WillisXChen/django-oscar,thechampanurag/django-oscar,MatthewWilkes/django-oscar,john-parton/django-oscar,jmt4/django-oscar,okfish/django-oscar,taedori81/django-oscar,adamend/django-oscar,michaelkuty/django-oscar,john-parton/django-oscar,taedori81/django-oscar,ademuk/django-oscar,itbabu/django-oscar,itbabu/django-oscar,michaelkuty/django-oscar,WadeYuChen/django-oscar,itbabu/django-oscar,jlmadurga/django-oscar,mexeniz/django-oscar,QLGu/django-oscar,bnprk/django-oscar,WadeYuChen/django-oscar,michaelkuty/django-oscar,Bogh/django-oscar,manevant/django-oscar,manevant/django-oscar,ka7eh/django-oscar,sasha0/django-oscar,bnprk/django-oscar,MatthewWilkes/django-oscar,marcoantoniooliveira/labweb,john-parton/django-oscar,sasha0/django-oscar,machtfit/django-oscar,sonofatailor/django-oscar,jlmadurga/django-oscar,Jannes123/django-oscar,ademuk/django-oscar,elliotthill/django-oscar,sonofatailor/django-oscar,pdonadeo/django-oscar,kapari/django-oscar,anentropic/django-oscar,sonofatailor/django-oscar,ademuk/django-oscar,nickpack/django-oscar,ahmetdaglarbas/e-commerce,Jannes123/django-oscar,kapt/django-oscar,Bogh/django-oscar,anentropic/django-oscar,sasha0/django-oscar,dongguangming/django-oscar,eddiep1101/django-oscar,thechampanurag/django-oscar,jinnykoo/wuyisj.com,eddiep1101/django-oscar,machtfit/django-oscar,WillisXChen/django-oscar,jinnykoo/christmas,DrOctogon/unwash_ecom,makielab/django-oscar,vovanbo/django-oscar,vovanbo/django-oscar,ahmetdaglarbas/e-commerce,monikasulik/django-oscar,jinnykoo/wuyisj,ka7eh/django-oscar,monikasulik/django-oscar,mexeniz/django-oscar
|
import nose
from oscar.forms import widgets
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
Adjust test generator to make tests pass
|
import nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
|
<commit_before>import nose
from oscar.forms import widgets
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
<commit_msg>Adjust test generator to make tests pass<commit_after>
|
import nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
|
import nose
from oscar.forms import widgets
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
Adjust test generator to make tests passimport nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
|
<commit_before>import nose
from oscar.forms import widgets
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
def compare(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
for format, expected in format_testcases:
yield compare, format, expected
<commit_msg>Adjust test generator to make tests pass<commit_after>import nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yy-mm-dd'),
('%Y-%m-%d %H:%M', 'yy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d', ''),
('%Y-%m-%d %H:%M', 'HH:mm'),
('%d/%m/%Y', ''),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
|
cd230c68f1a6db2b63efa82bc5a723492804c7ed
|
xorgauth/accounts/migrations/0005_better_user-facing_name.py
|
xorgauth/accounts/migrations/0005_better_user-facing_name.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_add_groupmembership_unique_constraint'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hrid',
field=models.SlugField(unique=True, verbose_name='username'),
),
]
|
Add a migration for the recent change in User table
|
Add a migration for the recent change in User table
Commit 850be15e7636 ("Give a better user-facing name to hrid field")
modified User.hrid name without adding a migration for it.
|
Python
|
agpl-3.0
|
Polytechnique-org/xorgauth,Polytechnique-org/xorgauth
|
Add a migration for the recent change in User table
Commit 850be15e7636 ("Give a better user-facing name to hrid field")
modified User.hrid name without adding a migration for it.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_add_groupmembership_unique_constraint'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hrid',
field=models.SlugField(unique=True, verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add a migration for the recent change in User table
Commit 850be15e7636 ("Give a better user-facing name to hrid field")
modified User.hrid name without adding a migration for it.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_add_groupmembership_unique_constraint'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hrid',
field=models.SlugField(unique=True, verbose_name='username'),
),
]
|
Add a migration for the recent change in User table
Commit 850be15e7636 ("Give a better user-facing name to hrid field")
modified User.hrid name without adding a migration for it.# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_add_groupmembership_unique_constraint'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hrid',
field=models.SlugField(unique=True, verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add a migration for the recent change in User table
Commit 850be15e7636 ("Give a better user-facing name to hrid field")
modified User.hrid name without adding a migration for it.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 20:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_add_groupmembership_unique_constraint'),
]
operations = [
migrations.AlterField(
model_name='user',
name='hrid',
field=models.SlugField(unique=True, verbose_name='username'),
),
]
|
|
9b8f425cdb571dab251c743519a436e23d8ce331
|
test-raw.py
|
test-raw.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test raw data with MSSImage class.
import sys
from mss import MSSImage
if len(sys.argv) < 4:
print('python {0} data.raw width height'.format(sys.argv[0]))
else:
with open(sys.argv[1], 'rb') as f:
data = f.read()
width = sys.argv[2]
height = sys.argv[3]
try:
mss = MSSImage(data, width, height)
mss.dump(output=sys.argv[1])
except Exception as ex:
print(ex)
raise
|
Add a little file to test raw data (pixels) into MSSImage class
|
Add a little file to test raw data (pixels) into MSSImage class
|
Python
|
mit
|
BoboTiG/python-mss
|
Add a little file to test raw data (pixels) into MSSImage class
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test raw data with MSSImage class.
import sys
from mss import MSSImage
if len(sys.argv) < 4:
print('python {0} data.raw width height'.format(sys.argv[0]))
else:
with open(sys.argv[1], 'rb') as f:
data = f.read()
width = sys.argv[2]
height = sys.argv[3]
try:
mss = MSSImage(data, width, height)
mss.dump(output=sys.argv[1])
except Exception as ex:
print(ex)
raise
|
<commit_before><commit_msg>Add a little file to test raw data (pixels) into MSSImage class<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test raw data with MSSImage class.
import sys
from mss import MSSImage
if len(sys.argv) < 4:
print('python {0} data.raw width height'.format(sys.argv[0]))
else:
with open(sys.argv[1], 'rb') as f:
data = f.read()
width = sys.argv[2]
height = sys.argv[3]
try:
mss = MSSImage(data, width, height)
mss.dump(output=sys.argv[1])
except Exception as ex:
print(ex)
raise
|
Add a little file to test raw data (pixels) into MSSImage class#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test raw data with MSSImage class.
import sys
from mss import MSSImage
if len(sys.argv) < 4:
print('python {0} data.raw width height'.format(sys.argv[0]))
else:
with open(sys.argv[1], 'rb') as f:
data = f.read()
width = sys.argv[2]
height = sys.argv[3]
try:
mss = MSSImage(data, width, height)
mss.dump(output=sys.argv[1])
except Exception as ex:
print(ex)
raise
|
<commit_before><commit_msg>Add a little file to test raw data (pixels) into MSSImage class<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Test raw data with MSSImage class.
import sys
from mss import MSSImage
if len(sys.argv) < 4:
print('python {0} data.raw width height'.format(sys.argv[0]))
else:
with open(sys.argv[1], 'rb') as f:
data = f.read()
width = sys.argv[2]
height = sys.argv[3]
try:
mss = MSSImage(data, width, height)
mss.dump(output=sys.argv[1])
except Exception as ex:
print(ex)
raise
|
|
34b92b40454a08dcf8fcadd2cdaaac11546e1d80
|
test/integration/test_9k.py
|
test/integration/test_9k.py
|
import tarfile
from . import *
class Test9k(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '01_executable'), configure=False,
*args, **kwargs
)
def test_build(self):
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertPopen(
['9k', '--debug', self.builddir, '--backend', self.backend]
)
os.chdir(self.builddir)
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
|
Add an integration test for the `9k` shorthand
|
Add an integration test for the `9k` shorthand
|
Python
|
bsd-3-clause
|
jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000
|
Add an integration test for the `9k` shorthand
|
import tarfile
from . import *
class Test9k(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '01_executable'), configure=False,
*args, **kwargs
)
def test_build(self):
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertPopen(
['9k', '--debug', self.builddir, '--backend', self.backend]
)
os.chdir(self.builddir)
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
|
<commit_before><commit_msg>Add an integration test for the `9k` shorthand<commit_after>
|
import tarfile
from . import *
class Test9k(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '01_executable'), configure=False,
*args, **kwargs
)
def test_build(self):
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertPopen(
['9k', '--debug', self.builddir, '--backend', self.backend]
)
os.chdir(self.builddir)
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
|
Add an integration test for the `9k` shorthandimport tarfile
from . import *
class Test9k(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '01_executable'), configure=False,
*args, **kwargs
)
def test_build(self):
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertPopen(
['9k', '--debug', self.builddir, '--backend', self.backend]
)
os.chdir(self.builddir)
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
|
<commit_before><commit_msg>Add an integration test for the `9k` shorthand<commit_after>import tarfile
from . import *
class Test9k(IntegrationTest):
def __init__(self, *args, **kwargs):
IntegrationTest.__init__(
self, os.path.join(examples_dir, '01_executable'), configure=False,
*args, **kwargs
)
def test_build(self):
os.chdir(self.srcdir)
cleandir(self.builddir)
self.assertPopen(
['9k', '--debug', self.builddir, '--backend', self.backend]
)
os.chdir(self.builddir)
self.build(executable('simple'))
self.assertOutput([executable('simple')], 'hello, world!\n')
|
|
abcc3376d7b27a4e425ca297025d7556e2fb2339
|
djconnectwise/migrations/0028_remove_company_company_alias.py
|
djconnectwise/migrations/0028_remove_company_company_alias.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0027_auto_20170605_1534'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='company_alias',
),
]
|
Add migration for previous commit
|
Add migration for previous commit
|
Python
|
mit
|
KerkhoffTechnologies/django-connectwise,KerkhoffTechnologies/django-connectwise,AparatTechnologies/django-connectwise
|
Add migration for previous commit
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0027_auto_20170605_1534'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='company_alias',
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0027_auto_20170605_1534'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='company_alias',
),
]
|
Add migration for previous commit# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0027_auto_20170605_1534'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='company_alias',
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0027_auto_20170605_1534'),
]
operations = [
migrations.RemoveField(
model_name='company',
name='company_alias',
),
]
|
|
853e2e5b6c410daadc5cdf15d97bfc86ba77defa
|
paystackapi/tests/test_charge.py
|
paystackapi/tests/test_charge.py
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.charge import Charge
class TestCharge(BaseTestCase):
@httpretty.activate
def test_start_charge(self):
"""Method defined to test start charge."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.charge(
email="CUS_je02lbimlqixzax",
amount=42000,
metadata={
"custom_fields": [
{
"value":"makurdi",
"display_name": "Donation for",
"variable_name": "donation_for"
},
],
},
bank={
"code":"057",
"account_number":"0000000000"
},
birthday="1995-12-23"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_submit_pin(self):
"""Method defined to test submit pin."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge/submit_pin"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.submit_pin(
pin="0987",
reference="5bwib5v6anhe9xa",
)
self.assertTrue(response['status'])
|
Test submit_pin method defined for a charge
|
Test submit_pin method defined for a charge
|
Python
|
mit
|
andela-sjames/paystack-python
|
Test submit_pin method defined for a charge
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.charge import Charge
class TestCharge(BaseTestCase):
@httpretty.activate
def test_start_charge(self):
"""Method defined to test start charge."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.charge(
email="CUS_je02lbimlqixzax",
amount=42000,
metadata={
"custom_fields": [
{
"value":"makurdi",
"display_name": "Donation for",
"variable_name": "donation_for"
},
],
},
bank={
"code":"057",
"account_number":"0000000000"
},
birthday="1995-12-23"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_submit_pin(self):
"""Method defined to test submit pin."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge/submit_pin"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.submit_pin(
pin="0987",
reference="5bwib5v6anhe9xa",
)
self.assertTrue(response['status'])
|
<commit_before><commit_msg>Test submit_pin method defined for a charge<commit_after>
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.charge import Charge
class TestCharge(BaseTestCase):
@httpretty.activate
def test_start_charge(self):
"""Method defined to test start charge."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.charge(
email="CUS_je02lbimlqixzax",
amount=42000,
metadata={
"custom_fields": [
{
"value":"makurdi",
"display_name": "Donation for",
"variable_name": "donation_for"
},
],
},
bank={
"code":"057",
"account_number":"0000000000"
},
birthday="1995-12-23"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_submit_pin(self):
"""Method defined to test submit pin."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge/submit_pin"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.submit_pin(
pin="0987",
reference="5bwib5v6anhe9xa",
)
self.assertTrue(response['status'])
|
Test submit_pin method defined for a chargeimport httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.charge import Charge
class TestCharge(BaseTestCase):
@httpretty.activate
def test_start_charge(self):
"""Method defined to test start charge."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.charge(
email="CUS_je02lbimlqixzax",
amount=42000,
metadata={
"custom_fields": [
{
"value":"makurdi",
"display_name": "Donation for",
"variable_name": "donation_for"
},
],
},
bank={
"code":"057",
"account_number":"0000000000"
},
birthday="1995-12-23"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_submit_pin(self):
"""Method defined to test submit pin."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge/submit_pin"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.submit_pin(
pin="0987",
reference="5bwib5v6anhe9xa",
)
self.assertTrue(response['status'])
|
<commit_before><commit_msg>Test submit_pin method defined for a charge<commit_after>import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.charge import Charge
class TestCharge(BaseTestCase):
@httpretty.activate
def test_start_charge(self):
"""Method defined to test start charge."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.charge(
email="CUS_je02lbimlqixzax",
amount=42000,
metadata={
"custom_fields": [
{
"value":"makurdi",
"display_name": "Donation for",
"variable_name": "donation_for"
},
],
},
bank={
"code":"057",
"account_number":"0000000000"
},
birthday="1995-12-23"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_submit_pin(self):
"""Method defined to test submit pin."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/charge/submit_pin"),
content_type='text/json',
body='{"status": true, "message": "Charge attempted"}',
status=201,
)
response = Charge.submit_pin(
pin="0987",
reference="5bwib5v6anhe9xa",
)
self.assertTrue(response['status'])
|
|
236c5af71093549108fa942847820a721da4880a
|
h2o-docs/src/booklets/v2_2015/source/glm/glm_download_pojo.py
|
h2o-docs/src/booklets/v2_2015/source/glm/glm_download_pojo.py
|
import h2o
h2o.init()
path = h2o.system_file("prostate.csv")
h2o_df = h2o.import_file(path)
h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
binomial_fit = h2o.glm(y = "CAPSULE", x = ["AGE", "RACE", "PSA", "GLEASON"], training_frame = h2o_df, family = "binomial")
h2o.download_pojo(binomial_fit)
|
Add python example for downloading glm pojo.
|
Add python example for downloading glm pojo.
|
Python
|
apache-2.0
|
michalkurka/h2o-3,junwucs/h2o-3,madmax983/h2o-3,bospetersen/h2o-3,bospetersen/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,kyoren/https-github.com-h2oai-h2o-3,madmax983/h2o-3,bospetersen/h2o-3,junwucs/h2o-3,junwucs/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,madmax983/h2o-3,YzPaul3/h2o-3,datachand/h2o-3,pchmieli/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,junwucs/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,tarasane/h2o-3,printedheart/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,brightchen/h2o-3,spennihana/h2o-3,tarasane/h2o-3,brightchen/h2o-3,junwucs/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,bospetersen/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,tarasane/h2o-3,kyoren/https-github.com-h2oai-h2o-3,datachand/h2o-3,jangorecki/h2o-3,bospetersen/h2o-3,bospetersen/h2o-3,junwucs/h2o-3,spennihana/h2o-3,datachand/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,YzPaul3/h2o-3,madmax983/h2o-3,printedheart/h2o-3,madmax983/h2o-3,YzPaul3/h2o-3,h2oai/h2o-3,printedheart/h2o-3,spennihana/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,brightchen/h2o-3,kyoren/https-github.com-h2oai-h2o-3,spennihana/h2o-3,h2oai/h2o-dev,printedheart/h2o-3,madmax983/h2o-3,kyoren/https-github.com-h2oai-h2o-3,tarasane/h2o-3,datachand/h2o-3,h2oai/h2o-dev,printedheart/h2o-3,madmax983/h2o-3,kyoren/https-github.com-h2oai-h2o-3,spennihana/h2o-3,datachand/h2o-3,pchmieli/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,junwucs/h2o-3,tarasane/h2o-3,bospetersen/h2o-3,brightchen/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,kyoren/https-github.com-h2oai-h2o-3,jangorecki/h2o-3,pchmieli/h2o-3,YzPaul3/h2o-3,tarasane/h2o-3,brightchen/h2o-3,printedheart/h2o-3,jangorecki/h2o-3,pchmieli/h2o-3,printedheart/h2o-3,mathemage/h2o-3,mathemage/h2o-3,mathemage/h2o-3,mathemage/h2o-3,pchmieli/h2o-3,pchmieli/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,tarasane/h2o-3,pchmieli/h2o-3,datachand/h2o-3,datachand/h2o-3,YzPaul3/h2o-3,YzPaul3/h2o-3
|
Add python example for downloading glm pojo.
|
import h2o
h2o.init()
path = h2o.system_file("prostate.csv")
h2o_df = h2o.import_file(path)
h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
binomial_fit = h2o.glm(y = "CAPSULE", x = ["AGE", "RACE", "PSA", "GLEASON"], training_frame = h2o_df, family = "binomial")
h2o.download_pojo(binomial_fit)
|
<commit_before><commit_msg>Add python example for downloading glm pojo.<commit_after>
|
import h2o
h2o.init()
path = h2o.system_file("prostate.csv")
h2o_df = h2o.import_file(path)
h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
binomial_fit = h2o.glm(y = "CAPSULE", x = ["AGE", "RACE", "PSA", "GLEASON"], training_frame = h2o_df, family = "binomial")
h2o.download_pojo(binomial_fit)
|
Add python example for downloading glm pojo.import h2o
h2o.init()
path = h2o.system_file("prostate.csv")
h2o_df = h2o.import_file(path)
h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
binomial_fit = h2o.glm(y = "CAPSULE", x = ["AGE", "RACE", "PSA", "GLEASON"], training_frame = h2o_df, family = "binomial")
h2o.download_pojo(binomial_fit)
|
<commit_before><commit_msg>Add python example for downloading glm pojo.<commit_after>import h2o
h2o.init()
path = h2o.system_file("prostate.csv")
h2o_df = h2o.import_file(path)
h2o_df['CAPSULE'] = h2o_df['CAPSULE'].asfactor()
binomial_fit = h2o.glm(y = "CAPSULE", x = ["AGE", "RACE", "PSA", "GLEASON"], training_frame = h2o_df, family = "binomial")
h2o.download_pojo(binomial_fit)
|
|
4255d4ae361623ecbed4685f2f1ee94f090acd36
|
test/integration/ggrc/services/test_response_codes.py
|
test/integration/ggrc/services/test_response_codes.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests API response codes."""
import json
from mock import patch
from integration.ggrc import services
class TestCollectionPost(services.TestCase):
"""Test response codes for post requests."""
def setUp(self):
super(TestCollectionPost, self).setUp()
self.client.get("/login")
def _post(self, data):
return self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=[('X-Requested-By', 'Unit Tests')],
)
def test_post_successful_response(self):
"""Test successful model post call."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 201)
def test_post_bad_request(self):
"""Test all bad request calls."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Value Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Validation Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
response = self._post("what")
self.assertStatus(response, 400)
@patch("ggrc.rbac.permissions.is_allowed_create_for")
def test_post_forbidden(self, is_allowed):
"""Test posting a forbidden model."""
is_allowed.return_value = False
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 403)
|
Add tests for http response codes
|
Add tests for http response codes
|
Python
|
apache-2.0
|
josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core
|
Add tests for http response codes
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests API response codes."""
import json
from mock import patch
from integration.ggrc import services
class TestCollectionPost(services.TestCase):
"""Test response codes for post requests."""
def setUp(self):
super(TestCollectionPost, self).setUp()
self.client.get("/login")
def _post(self, data):
return self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=[('X-Requested-By', 'Unit Tests')],
)
def test_post_successful_response(self):
"""Test successful model post call."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 201)
def test_post_bad_request(self):
"""Test all bad request calls."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Value Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Validation Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
response = self._post("what")
self.assertStatus(response, 400)
@patch("ggrc.rbac.permissions.is_allowed_create_for")
def test_post_forbidden(self, is_allowed):
"""Test posting a forbidden model."""
is_allowed.return_value = False
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 403)
|
<commit_before><commit_msg>Add tests for http response codes<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests API response codes."""
import json
from mock import patch
from integration.ggrc import services
class TestCollectionPost(services.TestCase):
"""Test response codes for post requests."""
def setUp(self):
super(TestCollectionPost, self).setUp()
self.client.get("/login")
def _post(self, data):
return self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=[('X-Requested-By', 'Unit Tests')],
)
def test_post_successful_response(self):
"""Test successful model post call."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 201)
def test_post_bad_request(self):
"""Test all bad request calls."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Value Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Validation Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
response = self._post("what")
self.assertStatus(response, 400)
@patch("ggrc.rbac.permissions.is_allowed_create_for")
def test_post_forbidden(self, is_allowed):
"""Test posting a forbidden model."""
is_allowed.return_value = False
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 403)
|
Add tests for http response codes# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests API response codes."""
import json
from mock import patch
from integration.ggrc import services
class TestCollectionPost(services.TestCase):
"""Test response codes for post requests."""
def setUp(self):
super(TestCollectionPost, self).setUp()
self.client.get("/login")
def _post(self, data):
return self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=[('X-Requested-By', 'Unit Tests')],
)
def test_post_successful_response(self):
"""Test successful model post call."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 201)
def test_post_bad_request(self):
"""Test all bad request calls."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Value Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Validation Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
response = self._post("what")
self.assertStatus(response, 400)
@patch("ggrc.rbac.permissions.is_allowed_create_for")
def test_post_forbidden(self, is_allowed):
"""Test posting a forbidden model."""
is_allowed.return_value = False
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 403)
|
<commit_before><commit_msg>Add tests for http response codes<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests API response codes."""
import json
from mock import patch
from integration.ggrc import services
class TestCollectionPost(services.TestCase):
"""Test response codes for post requests."""
def setUp(self):
super(TestCollectionPost, self).setUp()
self.client.get("/login")
def _post(self, data):
return self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=[('X-Requested-By', 'Unit Tests')],
)
def test_post_successful_response(self):
"""Test successful model post call."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 201)
def test_post_bad_request(self):
"""Test all bad request calls."""
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Value Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'Validation Error',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 400)
response = self._post("what")
self.assertStatus(response, 400)
@patch("ggrc.rbac.permissions.is_allowed_create_for")
def test_post_forbidden(self, is_allowed):
"""Test posting a forbidden model."""
is_allowed.return_value = False
data = json.dumps({
'services_test_mock_model': {
'foo': 'bar',
'code': '1',
'validated': 'good',
'context': None
}}
)
response = self._post(data)
self.assertStatus(response, 403)
|
|
3976ee82ad4cc0fc7bc65080fa49e1cea694022c
|
osf/migrations/0095_reset_osf_abstractprovider_licenses_acceptable_id_seq.py
|
osf/migrations/0095_reset_osf_abstractprovider_licenses_acceptable_id_seq.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
Add migration to reset osf_abstractprovider_id_seq...
|
Add migration to reset osf_abstractprovider_id_seq...
...and osf_abstractprovider_licenses_acceptable_seq.
These sequences got out of sync in the 0081 migration.
|
Python
|
apache-2.0
|
HalcyonChimera/osf.io,Johnetordoff/osf.io,mattclark/osf.io,brianjgeiger/osf.io,felliott/osf.io,sloria/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,sloria/osf.io,aaxelb/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,erinspace/osf.io,adlius/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,caseyrollins/osf.io,pattisdr/osf.io,sloria/osf.io,HalcyonChimera/osf.io,icereval/osf.io,saradbowman/osf.io,mfraezz/osf.io,mattclark/osf.io,icereval/osf.io,cslzchen/osf.io,aaxelb/osf.io,saradbowman/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,baylee-d/osf.io,adlius/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,erinspace/osf.io,felliott/osf.io,mattclark/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,adlius/osf.io,baylee-d/osf.io,felliott/osf.io,pattisdr/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,adlius/osf.io,felliott/osf.io,cslzchen/osf.io,aaxelb/osf.io,aaxelb/osf.io,mfraezz/osf.io,icereval/osf.io,brianjgeiger/osf.io
|
Add migration to reset osf_abstractprovider_id_seq...
...and osf_abstractprovider_licenses_acceptable_seq.
These sequences got out of sync in the 0081 migration.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
<commit_before><commit_msg>Add migration to reset osf_abstractprovider_id_seq...
...and osf_abstractprovider_licenses_acceptable_seq.
These sequences got out of sync in the 0081 migration.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
Add migration to reset osf_abstractprovider_id_seq...
...and osf_abstractprovider_licenses_acceptable_seq.
These sequences got out of sync in the 0081 migration.# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
<commit_before><commit_msg>Add migration to reset osf_abstractprovider_id_seq...
...and osf_abstractprovider_licenses_acceptable_seq.
These sequences got out of sync in the 0081 migration.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-12 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunSQL(
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'),
coalesce(max("id"), 1), max("id") IS NOT null)
FROM "osf_abstractprovider";
""",
"""
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider_licenses_acceptable"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
SELECT setval(pg_get_serial_sequence('"osf_abstractprovider"','id'), 1, max("id") IS NOT null)
FROM "osf_abstractprovider_licenses_acceptable";
"""
),
]
|
|
8adf445513f7bba361f5ff0d8b7c111ec3caafef
|
eudat/find/getpidinfo.py
|
eudat/find/getpidinfo.py
|
#!/usr/bin/env python
"""
Retrieve pid information accessing the handle resolution system using HTTP
"""
__author__ = 'Roberto Mucci (r.mucci@cineca.it)'
import json
#import urllib
import urllib2
def get_pid_info(pid, handle_url='hdl.handle.net'):
"""
Resolve pid information accessing the handle resolution system provider
using HTTP REST API. Returns a list of dictionary containing PID
information.
pid : string
PID that has to be resolved
handle_url : string
Handle system provider address (default is hdl.handle.net).
"""
if not pid:
print "[ERROR] PID is needed to submit the request.. "
return
print "Search in\t%s\nfor pid\t%s\n....." % (handle_url, pid)
answer = __action_api(handle_url, pid)
values = answer['values']
return values
def __action_api(host, pid):
""" Make the HTTP request."""
action_url = "http://{host}/api/handles/{pid}".format(host=host, pid=pid)
try:
request = urllib2.Request(action_url)
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "\t\tError code %s : The server %s responded with an error" \
% (e.code, host)
if e.code == 500:
print '\t\tError. Something unexpected went wrong during handle ' \
'resolution. (HTTP 500 Internal Server Error)'
exit(e.code)
elif e.code == 404:
print '\t\tHandle Not Found. (HTTP 404 Not Found)'
exit(e.code)
except urllib2.URLError as e:
exit('%s' % e.reason)
else:
out = json.loads(response.read())
if out['responseCode'] == 200:
print 'Values Not Found. The handle exists but has no values ' \
'(or no values according to the types and indices specified)'
assert response.code >= 200
return out
def main():
""" Main function to test the script """
get_pid_info(pid='11100/0beb6af8-cbe5-11e3-a9da-e41f13eb41b2')
if __name__ == '__main__':
main()
|
Resolve PID and get internal information
|
Resolve PID and get internal information
|
Python
|
apache-2.0
|
EUDAT-B2STAGE/EUDAT-Library,EUDAT-B2STAGE/B2STAGE-Library
|
Resolve PID and get internal information
|
#!/usr/bin/env python
"""
Retrieve pid information accessing the handle resolution system using HTTP
"""
__author__ = 'Roberto Mucci (r.mucci@cineca.it)'
import json
#import urllib
import urllib2
def get_pid_info(pid, handle_url='hdl.handle.net'):
"""
Resolve pid information accessing the handle resolution system provider
using HTTP REST API. Returns a list of dictionary containing PID
information.
pid : string
PID that has to be resolved
handle_url : string
Handle system provider address (default is hdl.handle.net).
"""
if not pid:
print "[ERROR] PID is needed to submit the request.. "
return
print "Search in\t%s\nfor pid\t%s\n....." % (handle_url, pid)
answer = __action_api(handle_url, pid)
values = answer['values']
return values
def __action_api(host, pid):
""" Make the HTTP request."""
action_url = "http://{host}/api/handles/{pid}".format(host=host, pid=pid)
try:
request = urllib2.Request(action_url)
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "\t\tError code %s : The server %s responded with an error" \
% (e.code, host)
if e.code == 500:
print '\t\tError. Something unexpected went wrong during handle ' \
'resolution. (HTTP 500 Internal Server Error)'
exit(e.code)
elif e.code == 404:
print '\t\tHandle Not Found. (HTTP 404 Not Found)'
exit(e.code)
except urllib2.URLError as e:
exit('%s' % e.reason)
else:
out = json.loads(response.read())
if out['responseCode'] == 200:
print 'Values Not Found. The handle exists but has no values ' \
'(or no values according to the types and indices specified)'
assert response.code >= 200
return out
def main():
""" Main function to test the script """
get_pid_info(pid='11100/0beb6af8-cbe5-11e3-a9da-e41f13eb41b2')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Resolve PID and get internal information<commit_after>
|
#!/usr/bin/env python
"""
Retrieve pid information accessing the handle resolution system using HTTP
"""
__author__ = 'Roberto Mucci (r.mucci@cineca.it)'
import json
#import urllib
import urllib2
def get_pid_info(pid, handle_url='hdl.handle.net'):
"""
Resolve pid information accessing the handle resolution system provider
using HTTP REST API. Returns a list of dictionary containing PID
information.
pid : string
PID that has to be resolved
handle_url : string
Handle system provider address (default is hdl.handle.net).
"""
if not pid:
print "[ERROR] PID is needed to submit the request.. "
return
print "Search in\t%s\nfor pid\t%s\n....." % (handle_url, pid)
answer = __action_api(handle_url, pid)
values = answer['values']
return values
def __action_api(host, pid):
""" Make the HTTP request."""
action_url = "http://{host}/api/handles/{pid}".format(host=host, pid=pid)
try:
request = urllib2.Request(action_url)
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "\t\tError code %s : The server %s responded with an error" \
% (e.code, host)
if e.code == 500:
print '\t\tError. Something unexpected went wrong during handle ' \
'resolution. (HTTP 500 Internal Server Error)'
exit(e.code)
elif e.code == 404:
print '\t\tHandle Not Found. (HTTP 404 Not Found)'
exit(e.code)
except urllib2.URLError as e:
exit('%s' % e.reason)
else:
out = json.loads(response.read())
if out['responseCode'] == 200:
print 'Values Not Found. The handle exists but has no values ' \
'(or no values according to the types and indices specified)'
assert response.code >= 200
return out
def main():
""" Main function to test the script """
get_pid_info(pid='11100/0beb6af8-cbe5-11e3-a9da-e41f13eb41b2')
if __name__ == '__main__':
main()
|
Resolve PID and get internal information#!/usr/bin/env python
"""
Retrieve pid information accessing the handle resolution system using HTTP
"""
__author__ = 'Roberto Mucci (r.mucci@cineca.it)'
import json
#import urllib
import urllib2
def get_pid_info(pid, handle_url='hdl.handle.net'):
"""
Resolve pid information accessing the handle resolution system provider
using HTTP REST API. Returns a list of dictionary containing PID
information.
pid : string
PID that has to be resolved
handle_url : string
Handle system provider address (default is hdl.handle.net).
"""
if not pid:
print "[ERROR] PID is needed to submit the request.. "
return
print "Search in\t%s\nfor pid\t%s\n....." % (handle_url, pid)
answer = __action_api(handle_url, pid)
values = answer['values']
return values
def __action_api(host, pid):
""" Make the HTTP request."""
action_url = "http://{host}/api/handles/{pid}".format(host=host, pid=pid)
try:
request = urllib2.Request(action_url)
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "\t\tError code %s : The server %s responded with an error" \
% (e.code, host)
if e.code == 500:
print '\t\tError. Something unexpected went wrong during handle ' \
'resolution. (HTTP 500 Internal Server Error)'
exit(e.code)
elif e.code == 404:
print '\t\tHandle Not Found. (HTTP 404 Not Found)'
exit(e.code)
except urllib2.URLError as e:
exit('%s' % e.reason)
else:
out = json.loads(response.read())
if out['responseCode'] == 200:
print 'Values Not Found. The handle exists but has no values ' \
'(or no values according to the types and indices specified)'
assert response.code >= 200
return out
def main():
""" Main function to test the script """
get_pid_info(pid='11100/0beb6af8-cbe5-11e3-a9da-e41f13eb41b2')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Resolve PID and get internal information<commit_after>#!/usr/bin/env python
"""
Retrieve pid information accessing the handle resolution system using HTTP
"""
__author__ = 'Roberto Mucci (r.mucci@cineca.it)'
import json
#import urllib
import urllib2
def get_pid_info(pid, handle_url='hdl.handle.net'):
"""
Resolve pid information accessing the handle resolution system provider
using HTTP REST API. Returns a list of dictionary containing PID
information.
pid : string
PID that has to be resolved
handle_url : string
Handle system provider address (default is hdl.handle.net).
"""
if not pid:
print "[ERROR] PID is needed to submit the request.. "
return
print "Search in\t%s\nfor pid\t%s\n....." % (handle_url, pid)
answer = __action_api(handle_url, pid)
values = answer['values']
return values
def __action_api(host, pid):
""" Make the HTTP request."""
action_url = "http://{host}/api/handles/{pid}".format(host=host, pid=pid)
try:
request = urllib2.Request(action_url)
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
print "\t\tError code %s : The server %s responded with an error" \
% (e.code, host)
if e.code == 500:
print '\t\tError. Something unexpected went wrong during handle ' \
'resolution. (HTTP 500 Internal Server Error)'
exit(e.code)
elif e.code == 404:
print '\t\tHandle Not Found. (HTTP 404 Not Found)'
exit(e.code)
except urllib2.URLError as e:
exit('%s' % e.reason)
else:
out = json.loads(response.read())
if out['responseCode'] == 200:
print 'Values Not Found. The handle exists but has no values ' \
'(or no values according to the types and indices specified)'
assert response.code >= 200
return out
def main():
""" Main function to test the script """
get_pid_info(pid='11100/0beb6af8-cbe5-11e3-a9da-e41f13eb41b2')
if __name__ == '__main__':
main()
|
|
dc7366efa0dde1fbf9cb3999d5cf69fe1033f802
|
corehq/apps/api/decorators.py
|
corehq/apps/api/decorators.py
|
import base64
from corehq.apps.api.models import ApiUser
from django.http import HttpResponse
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
|
Add decorator for basic auth with ApiUsers
|
Add decorator for basic auth with ApiUsers
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Add decorator for basic auth with ApiUsers
|
import base64
from corehq.apps.api.models import ApiUser
from django.http import HttpResponse
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
|
<commit_before><commit_msg>Add decorator for basic auth with ApiUsers<commit_after>
|
import base64
from corehq.apps.api.models import ApiUser
from django.http import HttpResponse
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
|
Add decorator for basic auth with ApiUsersimport base64
from corehq.apps.api.models import ApiUser
from django.http import HttpResponse
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
|
<commit_before><commit_msg>Add decorator for basic auth with ApiUsers<commit_after>import base64
from corehq.apps.api.models import ApiUser
from django.http import HttpResponse
def api_user_basic_auth(permission, realm=''):
def real_decorator(view):
def wrapper(request, *args, **kwargs):
if 'HTTP_AUTHORIZATION' in request.META:
auth = request.META['HTTP_AUTHORIZATION'].split()
if len(auth) == 2:
if auth[0].lower() == 'basic':
username, password = base64.b64decode(auth[1]).split(':', 1)
if ApiUser.auth(username, password, permission):
return view(request, *args, **kwargs)
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % realm
return response
return wrapper
return real_decorator
|
|
2607fddbb063c12aaab0e15fb96fbda2a492f240
|
powerline/matchers/vim/plugin/nerdtree.py
|
powerline/matchers/vim/plugin/nerdtree.py
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_tree_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
Fix case in nerd tree matcher regex
|
Fix case in nerd tree matcher regex
Fixes #1105
|
Python
|
mit
|
QuLogic/powerline,bartvm/powerline,prvnkumar/powerline,kenrachynski/powerline,QuLogic/powerline,bezhermoso/powerline,russellb/powerline,prvnkumar/powerline,areteix/powerline,dragon788/powerline,dragon788/powerline,IvanAli/powerline,junix/powerline,DoctorJellyface/powerline,Liangjianghao/powerline,QuLogic/powerline,wfscheper/powerline,xfumihiro/powerline,DoctorJellyface/powerline,prvnkumar/powerline,xfumihiro/powerline,lukw00/powerline,wfscheper/powerline,s0undt3ch/powerline,junix/powerline,S0lll0s/powerline,EricSB/powerline,xfumihiro/powerline,Luffin/powerline,blindFS/powerline,IvanAli/powerline,kenrachynski/powerline,xxxhycl2010/powerline,cyrixhero/powerline,xxxhycl2010/powerline,lukw00/powerline,bartvm/powerline,darac/powerline,EricSB/powerline,seanfisk/powerline,xxxhycl2010/powerline,bartvm/powerline,Liangjianghao/powerline,russellb/powerline,areteix/powerline,bezhermoso/powerline,IvanAli/powerline,s0undt3ch/powerline,kenrachynski/powerline,wfscheper/powerline,s0undt3ch/powerline,dragon788/powerline,Luffin/powerline,lukw00/powerline,bezhermoso/powerline,EricSB/powerline,areteix/powerline,S0lll0s/powerline,junix/powerline,darac/powerline,firebitsbr/powerline,darac/powerline,blindFS/powerline,seanfisk/powerline,firebitsbr/powerline,DoctorJellyface/powerline,firebitsbr/powerline,seanfisk/powerline,Luffin/powerline,cyrixhero/powerline,blindFS/powerline,russellb/powerline,Liangjianghao/powerline,S0lll0s/powerline,cyrixhero/powerline
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
Fix case in nerd tree matcher regex
Fixes #1105
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_tree_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
<commit_before># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
<commit_msg>Fix case in nerd tree matcher regex
Fixes #1105<commit_after>
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_tree_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
Fix case in nerd tree matcher regex
Fixes #1105# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_tree_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
<commit_before># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_TREE_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
<commit_msg>Fix case in nerd tree matcher regex
Fixes #1105<commit_after># vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
from powerline.bindings.vim import buffer_name
NERD_TREE_RE = re.compile(b'NERD_tree_\\d+')
def nerdtree(matcher_info):
name = buffer_name(matcher_info)
return name and NERD_TREE_RE.match(os.path.basename(name))
|
863102b71e87e508ff57523c42beba430405e358
|
teamworkApp/lib/studentGraph.py
|
teamworkApp/lib/studentGraph.py
|
from enum import IntEnum
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
DB = 'db/development.sqlite3'
Style = IntEnum('Style', 'Contributor, Collaborator, Communicator, Challenger', start=0)
Answer_Value = IntEnum(
'Answer_Value',
'1234 1243 1324 1342 1423 1432 2134 2143 2314 2341 2413 2431 3124 3142 3214 3241 3412 3421 4123 4132 4213 4231 4312 4321',
start=0
)
# the order of answers repeat every 4 questions so just the index is (question_num % 4)
Questions = {
0: (Style.Contributor, Style.Collaborator, Style.Communicator, Style.Challenger),
1: (Style.Collaborator, Style.Communicator, Style.Challenger, Style.Contributor),
2: (Style.Communicator, Style.Challenger, Style.Contributor, Style.Collaborator),
3: (Style.Challenger, Style.Contributor, Style.Collaborator, Style.Communicator),
}
def get_students_answers(student_id):
"""return list of complete answers for a given student"""
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
'SELECT * FROM answers WHERE student_id=?',
[student_id,],
)
rows = c.fetchall()
conn.commit()
conn.close()
return rows
def find_scores(student_id):
"""Returns a student's scores for each of the possible styles in a tuple"""
answers = get_students_answers(student_id)
questions = [answer[5] for answer in answers]
values = [Answer_Value(answer[1]).name for answer in answers]
scores = [0] * len(Style)
for question in range(len(questions)):
order = Questions[question % 4]
for i, style in enumerate(order):
scores[style.value] += int(values[question][i])
return tuple(scores)
def student_graph(student_id):
"""Returns a path to where the student's graph is saved"""
labels = [Style(x).name for x in range(len(Style))]
scores = find_scores(student_id)
fig1, ax1 = plt.subplots()
ax1.pie(scores, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
if __name__ == "__main__":
student_graph(1)
|
Create individual student pie graph
|
Create individual student pie graph
|
Python
|
mit
|
nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis,nathanljustin/teamwork-analysis
|
Create individual student pie graph
|
from enum import IntEnum
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
DB = 'db/development.sqlite3'
Style = IntEnum('Style', 'Contributor, Collaborator, Communicator, Challenger', start=0)
Answer_Value = IntEnum(
'Answer_Value',
'1234 1243 1324 1342 1423 1432 2134 2143 2314 2341 2413 2431 3124 3142 3214 3241 3412 3421 4123 4132 4213 4231 4312 4321',
start=0
)
# the order of answers repeat every 4 questions so just the index is (question_num % 4)
Questions = {
0: (Style.Contributor, Style.Collaborator, Style.Communicator, Style.Challenger),
1: (Style.Collaborator, Style.Communicator, Style.Challenger, Style.Contributor),
2: (Style.Communicator, Style.Challenger, Style.Contributor, Style.Collaborator),
3: (Style.Challenger, Style.Contributor, Style.Collaborator, Style.Communicator),
}
def get_students_answers(student_id):
"""return list of complete answers for a given student"""
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
'SELECT * FROM answers WHERE student_id=?',
[student_id,],
)
rows = c.fetchall()
conn.commit()
conn.close()
return rows
def find_scores(student_id):
"""Returns a student's scores for each of the possible styles in a tuple"""
answers = get_students_answers(student_id)
questions = [answer[5] for answer in answers]
values = [Answer_Value(answer[1]).name for answer in answers]
scores = [0] * len(Style)
for question in range(len(questions)):
order = Questions[question % 4]
for i, style in enumerate(order):
scores[style.value] += int(values[question][i])
return tuple(scores)
def student_graph(student_id):
"""Returns a path to where the student's graph is saved"""
labels = [Style(x).name for x in range(len(Style))]
scores = find_scores(student_id)
fig1, ax1 = plt.subplots()
ax1.pie(scores, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
if __name__ == "__main__":
student_graph(1)
|
<commit_before><commit_msg>Create individual student pie graph<commit_after>
|
from enum import IntEnum
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
DB = 'db/development.sqlite3'
Style = IntEnum('Style', 'Contributor, Collaborator, Communicator, Challenger', start=0)
Answer_Value = IntEnum(
'Answer_Value',
'1234 1243 1324 1342 1423 1432 2134 2143 2314 2341 2413 2431 3124 3142 3214 3241 3412 3421 4123 4132 4213 4231 4312 4321',
start=0
)
# the order of answers repeat every 4 questions so just the index is (question_num % 4)
Questions = {
0: (Style.Contributor, Style.Collaborator, Style.Communicator, Style.Challenger),
1: (Style.Collaborator, Style.Communicator, Style.Challenger, Style.Contributor),
2: (Style.Communicator, Style.Challenger, Style.Contributor, Style.Collaborator),
3: (Style.Challenger, Style.Contributor, Style.Collaborator, Style.Communicator),
}
def get_students_answers(student_id):
"""return list of complete answers for a given student"""
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
'SELECT * FROM answers WHERE student_id=?',
[student_id,],
)
rows = c.fetchall()
conn.commit()
conn.close()
return rows
def find_scores(student_id):
"""Returns a student's scores for each of the possible styles in a tuple"""
answers = get_students_answers(student_id)
questions = [answer[5] for answer in answers]
values = [Answer_Value(answer[1]).name for answer in answers]
scores = [0] * len(Style)
for question in range(len(questions)):
order = Questions[question % 4]
for i, style in enumerate(order):
scores[style.value] += int(values[question][i])
return tuple(scores)
def student_graph(student_id):
"""Returns a path to where the student's graph is saved"""
labels = [Style(x).name for x in range(len(Style))]
scores = find_scores(student_id)
fig1, ax1 = plt.subplots()
ax1.pie(scores, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
if __name__ == "__main__":
student_graph(1)
|
Create individual student pie graphfrom enum import IntEnum
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
DB = 'db/development.sqlite3'
Style = IntEnum('Style', 'Contributor, Collaborator, Communicator, Challenger', start=0)
Answer_Value = IntEnum(
'Answer_Value',
'1234 1243 1324 1342 1423 1432 2134 2143 2314 2341 2413 2431 3124 3142 3214 3241 3412 3421 4123 4132 4213 4231 4312 4321',
start=0
)
# the order of answers repeat every 4 questions so just the index is (question_num % 4)
Questions = {
0: (Style.Contributor, Style.Collaborator, Style.Communicator, Style.Challenger),
1: (Style.Collaborator, Style.Communicator, Style.Challenger, Style.Contributor),
2: (Style.Communicator, Style.Challenger, Style.Contributor, Style.Collaborator),
3: (Style.Challenger, Style.Contributor, Style.Collaborator, Style.Communicator),
}
def get_students_answers(student_id):
"""return list of complete answers for a given student"""
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
'SELECT * FROM answers WHERE student_id=?',
[student_id,],
)
rows = c.fetchall()
conn.commit()
conn.close()
return rows
def find_scores(student_id):
"""Returns a student's scores for each of the possible styles in a tuple"""
answers = get_students_answers(student_id)
questions = [answer[5] for answer in answers]
values = [Answer_Value(answer[1]).name for answer in answers]
scores = [0] * len(Style)
for question in range(len(questions)):
order = Questions[question % 4]
for i, style in enumerate(order):
scores[style.value] += int(values[question][i])
return tuple(scores)
def student_graph(student_id):
"""Returns a path to where the student's graph is saved"""
labels = [Style(x).name for x in range(len(Style))]
scores = find_scores(student_id)
fig1, ax1 = plt.subplots()
ax1.pie(scores, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
if __name__ == "__main__":
student_graph(1)
|
<commit_before><commit_msg>Create individual student pie graph<commit_after>from enum import IntEnum
import matplotlib.pyplot as plt
import numpy as np
import sqlite3
DB = 'db/development.sqlite3'
Style = IntEnum('Style', 'Contributor, Collaborator, Communicator, Challenger', start=0)
Answer_Value = IntEnum(
'Answer_Value',
'1234 1243 1324 1342 1423 1432 2134 2143 2314 2341 2413 2431 3124 3142 3214 3241 3412 3421 4123 4132 4213 4231 4312 4321',
start=0
)
# the order of answers repeat every 4 questions so just the index is (question_num % 4)
Questions = {
0: (Style.Contributor, Style.Collaborator, Style.Communicator, Style.Challenger),
1: (Style.Collaborator, Style.Communicator, Style.Challenger, Style.Contributor),
2: (Style.Communicator, Style.Challenger, Style.Contributor, Style.Collaborator),
3: (Style.Challenger, Style.Contributor, Style.Collaborator, Style.Communicator),
}
def get_students_answers(student_id):
"""return list of complete answers for a given student"""
conn = sqlite3.connect(DB)
c = conn.cursor()
c.execute(
'SELECT * FROM answers WHERE student_id=?',
[student_id,],
)
rows = c.fetchall()
conn.commit()
conn.close()
return rows
def find_scores(student_id):
"""Returns a student's scores for each of the possible styles in a tuple"""
answers = get_students_answers(student_id)
questions = [answer[5] for answer in answers]
values = [Answer_Value(answer[1]).name for answer in answers]
scores = [0] * len(Style)
for question in range(len(questions)):
order = Questions[question % 4]
for i, style in enumerate(order):
scores[style.value] += int(values[question][i])
return tuple(scores)
def student_graph(student_id):
"""Returns a path to where the student's graph is saved"""
labels = [Style(x).name for x in range(len(Style))]
scores = find_scores(student_id)
fig1, ax1 = plt.subplots()
ax1.pie(scores, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
if __name__ == "__main__":
student_graph(1)
|
|
4b62369087de530a2ba879be40ae5f133e2496a0
|
samples/ReturnUnicode.py
|
samples/ReturnUnicode.py
|
#------------------------------------------------------------------------------
# ReturnUnicode.py
# Returns all strings as unicode. This also demonstrates the use of an output
# type handler to change the way in which data is returned from a cursor.
#------------------------------------------------------------------------------
import cx_Oracle
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize)
connection = cx_Oracle.Connection("cx_Oracle/password")
connection.outputtypehandler = OutputTypeHandler
cursor = connection.cursor()
cursor.execute("select * from teststrings")
for row in cursor:
print "Row:", row
|
Add sample code for returning all strings as unicode strings.
|
Add sample code for returning all strings as unicode strings.
git-svn-id: b746c3c07d6b14fe725b72f068c7252a81557b48@133 0cf6dada-cf32-0410-b4fe-d86b42e8394d
|
Python
|
bsd-3-clause
|
Bluehorn/cx_Oracle,Bluehorn/cx_Oracle,Bluehorn/cx_Oracle
|
Add sample code for returning all strings as unicode strings.
git-svn-id: b746c3c07d6b14fe725b72f068c7252a81557b48@133 0cf6dada-cf32-0410-b4fe-d86b42e8394d
|
#------------------------------------------------------------------------------
# ReturnUnicode.py
# Returns all strings as unicode. This also demonstrates the use of an output
# type handler to change the way in which data is returned from a cursor.
#------------------------------------------------------------------------------
import cx_Oracle
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize)
connection = cx_Oracle.Connection("cx_Oracle/password")
connection.outputtypehandler = OutputTypeHandler
cursor = connection.cursor()
cursor.execute("select * from teststrings")
for row in cursor:
print "Row:", row
|
<commit_before><commit_msg>Add sample code for returning all strings as unicode strings.
git-svn-id: b746c3c07d6b14fe725b72f068c7252a81557b48@133 0cf6dada-cf32-0410-b4fe-d86b42e8394d<commit_after>
|
#------------------------------------------------------------------------------
# ReturnUnicode.py
# Returns all strings as unicode. This also demonstrates the use of an output
# type handler to change the way in which data is returned from a cursor.
#------------------------------------------------------------------------------
import cx_Oracle
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize)
connection = cx_Oracle.Connection("cx_Oracle/password")
connection.outputtypehandler = OutputTypeHandler
cursor = connection.cursor()
cursor.execute("select * from teststrings")
for row in cursor:
print "Row:", row
|
Add sample code for returning all strings as unicode strings.
git-svn-id: b746c3c07d6b14fe725b72f068c7252a81557b48@133 0cf6dada-cf32-0410-b4fe-d86b42e8394d#------------------------------------------------------------------------------
# ReturnUnicode.py
# Returns all strings as unicode. This also demonstrates the use of an output
# type handler to change the way in which data is returned from a cursor.
#------------------------------------------------------------------------------
import cx_Oracle
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize)
connection = cx_Oracle.Connection("cx_Oracle/password")
connection.outputtypehandler = OutputTypeHandler
cursor = connection.cursor()
cursor.execute("select * from teststrings")
for row in cursor:
print "Row:", row
|
<commit_before><commit_msg>Add sample code for returning all strings as unicode strings.
git-svn-id: b746c3c07d6b14fe725b72f068c7252a81557b48@133 0cf6dada-cf32-0410-b4fe-d86b42e8394d<commit_after>#------------------------------------------------------------------------------
# ReturnUnicode.py
# Returns all strings as unicode. This also demonstrates the use of an output
# type handler to change the way in which data is returned from a cursor.
#------------------------------------------------------------------------------
import cx_Oracle
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
return cursor.var(unicode, size, cursor.arraysize)
connection = cx_Oracle.Connection("cx_Oracle/password")
connection.outputtypehandler = OutputTypeHandler
cursor = connection.cursor()
cursor.execute("select * from teststrings")
for row in cursor:
print "Row:", row
|
|
3240053f7df81b6f2488ea6c6023b900ee3f8636
|
proselint/checks/garner/denizen_labels.py
|
proselint/checks/garner/denizen_labels.py
|
# -*- coding: utf-8 -*-
"""MAU109: Denizen labels.
---
layout: post
error_code: MAU109
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: denizen labels
date: 2014-06-10 12:31:19
categories: writing
---
Denizen labels.
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU109"
msg = "'{}' is the preferred denizen label."
preferences = [
["Michigander", ["Michiganite"]],
["Indianan", ["Indianian"]],
]
errors = []
for p in preferences:
for r in p[1]:
for m in re.finditer("\s{}\s".format(r), text, flags=re.IGNORECASE):
errors.append((m.start()+1, m.end(), err, msg.format(p[0])))
return errors
|
Add check for denizen labels
|
Add check for denizen labels
|
Python
|
bsd-3-clause
|
jstewmon/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint
|
Add check for denizen labels
|
# -*- coding: utf-8 -*-
"""MAU109: Denizen labels.
---
layout: post
error_code: MAU109
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: denizen labels
date: 2014-06-10 12:31:19
categories: writing
---
Denizen labels.
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU109"
msg = "'{}' is the preferred denizen label."
preferences = [
["Michigander", ["Michiganite"]],
["Indianan", ["Indianian"]],
]
errors = []
for p in preferences:
for r in p[1]:
for m in re.finditer("\s{}\s".format(r), text, flags=re.IGNORECASE):
errors.append((m.start()+1, m.end(), err, msg.format(p[0])))
return errors
|
<commit_before><commit_msg>Add check for denizen labels<commit_after>
|
# -*- coding: utf-8 -*-
"""MAU109: Denizen labels.
---
layout: post
error_code: MAU109
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: denizen labels
date: 2014-06-10 12:31:19
categories: writing
---
Denizen labels.
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU109"
msg = "'{}' is the preferred denizen label."
preferences = [
["Michigander", ["Michiganite"]],
["Indianan", ["Indianian"]],
]
errors = []
for p in preferences:
for r in p[1]:
for m in re.finditer("\s{}\s".format(r), text, flags=re.IGNORECASE):
errors.append((m.start()+1, m.end(), err, msg.format(p[0])))
return errors
|
Add check for denizen labels# -*- coding: utf-8 -*-
"""MAU109: Denizen labels.
---
layout: post
error_code: MAU109
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: denizen labels
date: 2014-06-10 12:31:19
categories: writing
---
Denizen labels.
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU109"
msg = "'{}' is the preferred denizen label."
preferences = [
["Michigander", ["Michiganite"]],
["Indianan", ["Indianian"]],
]
errors = []
for p in preferences:
for r in p[1]:
for m in re.finditer("\s{}\s".format(r), text, flags=re.IGNORECASE):
errors.append((m.start()+1, m.end(), err, msg.format(p[0])))
return errors
|
<commit_before><commit_msg>Add check for denizen labels<commit_after># -*- coding: utf-8 -*-
"""MAU109: Denizen labels.
---
layout: post
error_code: MAU109
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: denizen labels
date: 2014-06-10 12:31:19
categories: writing
---
Denizen labels.
"""
import re
from proselint.tools import memoize
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU109"
msg = "'{}' is the preferred denizen label."
preferences = [
["Michigander", ["Michiganite"]],
["Indianan", ["Indianian"]],
]
errors = []
for p in preferences:
for r in p[1]:
for m in re.finditer("\s{}\s".format(r), text, flags=re.IGNORECASE):
errors.append((m.start()+1, m.end(), err, msg.format(p[0])))
return errors
|
|
e5e63984354038c61681e9e14b18c3f5a88f9a83
|
migrations/versions/1130_add_copied_to_following_framework_column.py
|
migrations/versions/1130_add_copied_to_following_framework_column.py
|
"""add_copied_to_following_framework_column
Revision ID: 1130
Revises: 1120
Create Date: 2018-04-06 11:52:17.825501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1130'
down_revision = '1120'
def upgrade():
op.add_column('archived_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('draft_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
def downgrade():
op.drop_column('services', 'copied_to_following_framework')
op.drop_column('draft_services', 'copied_to_following_framework')
op.drop_column('archived_services', 'copied_to_following_framework')
|
Add 'copied_to_following_framework' column to services
|
Add 'copied_to_following_framework' column to services
We need to keep track of whether a service has been copied to a
following framework during applications. Adding this column will allow
this.
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add 'copied_to_following_framework' column to services
We need to keep track of whether a service has been copied to a
following framework during applications. Adding this column will allow
this.
|
"""add_copied_to_following_framework_column
Revision ID: 1130
Revises: 1120
Create Date: 2018-04-06 11:52:17.825501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1130'
down_revision = '1120'
def upgrade():
op.add_column('archived_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('draft_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
def downgrade():
op.drop_column('services', 'copied_to_following_framework')
op.drop_column('draft_services', 'copied_to_following_framework')
op.drop_column('archived_services', 'copied_to_following_framework')
|
<commit_before><commit_msg>Add 'copied_to_following_framework' column to services
We need to keep track of whether a service has been copied to a
following framework during applications. Adding this column will allow
this.<commit_after>
|
"""add_copied_to_following_framework_column
Revision ID: 1130
Revises: 1120
Create Date: 2018-04-06 11:52:17.825501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1130'
down_revision = '1120'
def upgrade():
op.add_column('archived_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('draft_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
def downgrade():
op.drop_column('services', 'copied_to_following_framework')
op.drop_column('draft_services', 'copied_to_following_framework')
op.drop_column('archived_services', 'copied_to_following_framework')
|
Add 'copied_to_following_framework' column to services
We need to keep track of whether a service has been copied to a
following framework during applications. Adding this column will allow
this."""add_copied_to_following_framework_column
Revision ID: 1130
Revises: 1120
Create Date: 2018-04-06 11:52:17.825501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1130'
down_revision = '1120'
def upgrade():
op.add_column('archived_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('draft_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
def downgrade():
op.drop_column('services', 'copied_to_following_framework')
op.drop_column('draft_services', 'copied_to_following_framework')
op.drop_column('archived_services', 'copied_to_following_framework')
|
<commit_before><commit_msg>Add 'copied_to_following_framework' column to services
We need to keep track of whether a service has been copied to a
following framework during applications. Adding this column will allow
this.<commit_after>"""add_copied_to_following_framework_column
Revision ID: 1130
Revises: 1120
Create Date: 2018-04-06 11:52:17.825501
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1130'
down_revision = '1120'
def upgrade():
op.add_column('archived_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('draft_services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
op.add_column('services', sa.Column('copied_to_following_framework', sa.Boolean(), server_default=sa.text('false'), nullable=False))
def downgrade():
op.drop_column('services', 'copied_to_following_framework')
op.drop_column('draft_services', 'copied_to_following_framework')
op.drop_column('archived_services', 'copied_to_following_framework')
|
|
b8bec1944b999bda1b5452f3435b0c347ed0b24f
|
scripts/cligen/init_clifiles.py
|
scripts/cligen/init_clifiles.py
|
"""When DEP expands, we have a problem that we have no close by precip files.
So lets ensure we have one file per 0.25 degree lat and lon, we can then use
these to fast-start newly expanded areas...
"""
from pyiem.dep import SOUTH, EAST, NORTH, WEST
import numpy as np
import os
import shutil
# We shall use this file, no mater what
SRC = "/i/0/cli/095x038/095.17x038.13.cli"
SCENARIO = 0
created = 0
for lon in np.arange(WEST, EAST, 0.05):
for lat in np.arange(SOUTH, NORTH, 0.05):
if (("%.2f" % (lon,)).endswith(".25") and
("%.2f" % (lat,)).endswith(".25")):
mydir = "/i/%s/cli/%03.0fx%03.0f" % (SCENARIO, 0 - lon, lat)
if not os.path.isdir(mydir):
os.makedirs(mydir)
fn = "%s/%06.2fx%06.2f.cli" % (mydir, 0 - lon, lat)
if not os.path.isfile(fn):
created += 1
shutil.copyfile(SRC, fn)
print("We just created %s new files!" % (created,))
|
Add init of 0.25x0.25 climatefiles
|
Add init of 0.25x0.25 climatefiles
So that we can more quickly hot-start newly added DEP regions...
|
Python
|
mit
|
akrherz/dep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/dep,akrherz/idep,akrherz/idep
|
Add init of 0.25x0.25 climatefiles
So that we can more quickly hot-start newly added DEP regions...
|
"""When DEP expands, we have a problem that we have no close by precip files.
So lets ensure we have one file per 0.25 degree lat and lon, we can then use
these to fast-start newly expanded areas...
"""
from pyiem.dep import SOUTH, EAST, NORTH, WEST
import numpy as np
import os
import shutil
# We shall use this file, no mater what
SRC = "/i/0/cli/095x038/095.17x038.13.cli"
SCENARIO = 0
created = 0
for lon in np.arange(WEST, EAST, 0.05):
for lat in np.arange(SOUTH, NORTH, 0.05):
if (("%.2f" % (lon,)).endswith(".25") and
("%.2f" % (lat,)).endswith(".25")):
mydir = "/i/%s/cli/%03.0fx%03.0f" % (SCENARIO, 0 - lon, lat)
if not os.path.isdir(mydir):
os.makedirs(mydir)
fn = "%s/%06.2fx%06.2f.cli" % (mydir, 0 - lon, lat)
if not os.path.isfile(fn):
created += 1
shutil.copyfile(SRC, fn)
print("We just created %s new files!" % (created,))
|
<commit_before><commit_msg>Add init of 0.25x0.25 climatefiles
So that we can more quickly hot-start newly added DEP regions...<commit_after>
|
"""When DEP expands, we have a problem that we have no close by precip files.
So lets ensure we have one file per 0.25 degree lat and lon, we can then use
these to fast-start newly expanded areas...
"""
from pyiem.dep import SOUTH, EAST, NORTH, WEST
import numpy as np
import os
import shutil
# We shall use this file, no mater what
SRC = "/i/0/cli/095x038/095.17x038.13.cli"
SCENARIO = 0
created = 0
for lon in np.arange(WEST, EAST, 0.05):
for lat in np.arange(SOUTH, NORTH, 0.05):
if (("%.2f" % (lon,)).endswith(".25") and
("%.2f" % (lat,)).endswith(".25")):
mydir = "/i/%s/cli/%03.0fx%03.0f" % (SCENARIO, 0 - lon, lat)
if not os.path.isdir(mydir):
os.makedirs(mydir)
fn = "%s/%06.2fx%06.2f.cli" % (mydir, 0 - lon, lat)
if not os.path.isfile(fn):
created += 1
shutil.copyfile(SRC, fn)
print("We just created %s new files!" % (created,))
|
Add init of 0.25x0.25 climatefiles
So that we can more quickly hot-start newly added DEP regions..."""When DEP expands, we have a problem that we have no close by precip files.
So lets ensure we have one file per 0.25 degree lat and lon, we can then use
these to fast-start newly expanded areas...
"""
from pyiem.dep import SOUTH, EAST, NORTH, WEST
import numpy as np
import os
import shutil
# We shall use this file, no mater what
SRC = "/i/0/cli/095x038/095.17x038.13.cli"
SCENARIO = 0
created = 0
for lon in np.arange(WEST, EAST, 0.05):
for lat in np.arange(SOUTH, NORTH, 0.05):
if (("%.2f" % (lon,)).endswith(".25") and
("%.2f" % (lat,)).endswith(".25")):
mydir = "/i/%s/cli/%03.0fx%03.0f" % (SCENARIO, 0 - lon, lat)
if not os.path.isdir(mydir):
os.makedirs(mydir)
fn = "%s/%06.2fx%06.2f.cli" % (mydir, 0 - lon, lat)
if not os.path.isfile(fn):
created += 1
shutil.copyfile(SRC, fn)
print("We just created %s new files!" % (created,))
|
<commit_before><commit_msg>Add init of 0.25x0.25 climatefiles
So that we can more quickly hot-start newly added DEP regions...<commit_after>"""When DEP expands, we have a problem that we have no close by precip files.
So lets ensure we have one file per 0.25 degree lat and lon, we can then use
these to fast-start newly expanded areas...
"""
from pyiem.dep import SOUTH, EAST, NORTH, WEST
import numpy as np
import os
import shutil
# We shall use this file, no mater what
SRC = "/i/0/cli/095x038/095.17x038.13.cli"
SCENARIO = 0
created = 0
for lon in np.arange(WEST, EAST, 0.05):
for lat in np.arange(SOUTH, NORTH, 0.05):
if (("%.2f" % (lon,)).endswith(".25") and
("%.2f" % (lat,)).endswith(".25")):
mydir = "/i/%s/cli/%03.0fx%03.0f" % (SCENARIO, 0 - lon, lat)
if not os.path.isdir(mydir):
os.makedirs(mydir)
fn = "%s/%06.2fx%06.2f.cli" % (mydir, 0 - lon, lat)
if not os.path.isfile(fn):
created += 1
shutil.copyfile(SRC, fn)
print("We just created %s new files!" % (created,))
|
|
da9ee7ac5adeb4ebf364c886c99c9f0531bc156f
|
techgig_join.py
|
techgig_join.py
|
def main():
s=raw_input()
a=raw_input()
b=raw_input()
c=raw_input()
x = (a,b,c)
print s.join(x)
main()
|
Concatenate each line of the string
|
Concatenate each line of the string
|
Python
|
mit
|
kumarisneha/practice_repo
|
Concatenate each line of the string
|
def main():
s=raw_input()
a=raw_input()
b=raw_input()
c=raw_input()
x = (a,b,c)
print s.join(x)
main()
|
<commit_before><commit_msg>Concatenate each line of the string<commit_after>
|
def main():
s=raw_input()
a=raw_input()
b=raw_input()
c=raw_input()
x = (a,b,c)
print s.join(x)
main()
|
Concatenate each line of the stringdef main():
s=raw_input()
a=raw_input()
b=raw_input()
c=raw_input()
x = (a,b,c)
print s.join(x)
main()
|
<commit_before><commit_msg>Concatenate each line of the string<commit_after>def main():
s=raw_input()
a=raw_input()
b=raw_input()
c=raw_input()
x = (a,b,c)
print s.join(x)
main()
|
|
ba6d92b034ee9e71d2c17d16168c1a4cc0ca2821
|
src/main/python/logs/makeUserEventList.py
|
src/main/python/logs/makeUserEventList.py
|
#!/usr/bin/env python
"""
Extract the usernames from a JSON file, and write a file of those usernames + event type and event times
Given a file with a field marked 'username', extract the values of those fields
and write, one per line, to a file with the supplied name.
"""
import json
import sys
if __name__ == '__main__':
f1 = open(sys.argv[1], 'r')
f2 = open(sys.argv[2], 'w')
dc = json.JSONDecoder()
f2.write('username' + ',' + 'event_type' + ',' + 'time' + '\n')
for line in f1:
dcl = dc.decode(line)
f2.write(dcl['username'] + ',' + dcl['event_type'] + ',' + str(dcl['time']) + '\n')
|
Add script to make list of users + event types + event times
|
Add script to make list of users + event types + event times
|
Python
|
bsd-3-clause
|
jimwaldo/HarvardX-Tools,jimwaldo/HarvardX-Tools
|
Add script to make list of users + event types + event times
|
#!/usr/bin/env python
"""
Extract the usernames from a JSON file, and write a file of those usernames + event type and event times
Given a file with a field marked 'username', extract the values of those fields
and write, one per line, to a file with the supplied name.
"""
import json
import sys
if __name__ == '__main__':
f1 = open(sys.argv[1], 'r')
f2 = open(sys.argv[2], 'w')
dc = json.JSONDecoder()
f2.write('username' + ',' + 'event_type' + ',' + 'time' + '\n')
for line in f1:
dcl = dc.decode(line)
f2.write(dcl['username'] + ',' + dcl['event_type'] + ',' + str(dcl['time']) + '\n')
|
<commit_before><commit_msg>Add script to make list of users + event types + event times<commit_after>
|
#!/usr/bin/env python
"""
Extract the usernames from a JSON file, and write a file of those usernames + event type and event times
Given a file with a field marked 'username', extract the values of those fields
and write, one per line, to a file with the supplied name.
"""
import json
import sys
if __name__ == '__main__':
f1 = open(sys.argv[1], 'r')
f2 = open(sys.argv[2], 'w')
dc = json.JSONDecoder()
f2.write('username' + ',' + 'event_type' + ',' + 'time' + '\n')
for line in f1:
dcl = dc.decode(line)
f2.write(dcl['username'] + ',' + dcl['event_type'] + ',' + str(dcl['time']) + '\n')
|
Add script to make list of users + event types + event times#!/usr/bin/env python
"""
Extract the usernames from a JSON file, and write a file of those usernames + event type and event times
Given a file with a field marked 'username', extract the values of those fields
and write, one per line, to a file with the supplied name.
"""
import json
import sys
if __name__ == '__main__':
f1 = open(sys.argv[1], 'r')
f2 = open(sys.argv[2], 'w')
dc = json.JSONDecoder()
f2.write('username' + ',' + 'event_type' + ',' + 'time' + '\n')
for line in f1:
dcl = dc.decode(line)
f2.write(dcl['username'] + ',' + dcl['event_type'] + ',' + str(dcl['time']) + '\n')
|
<commit_before><commit_msg>Add script to make list of users + event types + event times<commit_after>#!/usr/bin/env python
"""
Extract the usernames from a JSON file, and write a file of those usernames + event type and event times
Given a file with a field marked 'username', extract the values of those fields
and write, one per line, to a file with the supplied name.
"""
import json
import sys
if __name__ == '__main__':
f1 = open(sys.argv[1], 'r')
f2 = open(sys.argv[2], 'w')
dc = json.JSONDecoder()
f2.write('username' + ',' + 'event_type' + ',' + 'time' + '\n')
for line in f1:
dcl = dc.decode(line)
f2.write(dcl['username'] + ',' + dcl['event_type'] + ',' + str(dcl['time']) + '\n')
|
|
f9d113aa528b60d1a41b5caf0f1f0f7da4a8c8b3
|
inbox/util/rdb.py
|
inbox/util/rdb.py
|
import socket
from gevent import monkey; monkey.patch_all()
import sys
import random
from code import InteractiveConsole
class RemoteConsole(InteractiveConsole):
def __init__(self, handle, locals=None):
self.handle = handle
sys.stderr = self.handle
InteractiveConsole.__init__(self, locals=locals)
def write(self, data):
self.handle.write(data)
def runcode(self, code):
# preserve stdout/stderr
oldstdout = sys.stdout
oldstderr = sys.stderr
sys.stdout = self.handle
sys.stderr = self.handle
InteractiveConsole.runcode(self, code)
sys.stdout = oldstdout
sys.stderr = oldstderr
def raw_input(self, prompt=""):
self.handle.write(prompt)
self.handle.flush()
return self.handle.readline()
def break_to_interpreter(address="localhost", portmin=4000, portmax=5000):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = random.randint(portmin, portmax)
sock.bind((address, port))
sock.listen(1)
print "Interpreter waiting on %s port %d..." % (address, port)
(clientsocket, address) = sock.accept()
handle = clientsocket.makefile('rw')
handle.write('Embedded interpreter')
console = RemoteConsole(handle, locals())
console.interact()
# example usage - connect with 'netcat localhost 4444'
if __name__ == '__main__':
break_to_interpreter()
|
Break into a python interpreter from anywhere
|
Break into a python interpreter from anywhere
Summary:
I use pdb a lot when debugging and I find it really annoying that we only get stack traces from errors on gunks. Wouldn't it be cool to be able to at least have access to an interpreter at the bottom of a stack trace? Well, rdb does this. To use it, insert a call to "rdb.break_to_interpreter" and the greenlet will block while waiting for a telnet (or netcat if you can) connection on localhost (and without blocking the whole process).
Currently the debugger is very basic - it's still possible to examine the stackframe using sys._getframe, though. I'm looking for a way to make the frame data available in locals() and to have greenlets break into the debugger whenever they get an uncaught exception.
Test Plan: Ran a sync
Reviewers: charles, spang, emfree
Reviewed By: emfree
Projects: #inbox-sync
Differential Revision: https://review.inboxapp.com/D321
|
Python
|
agpl-3.0
|
wakermahmud/sync-engine,closeio/nylas,jobscore/sync-engine,PriviPK/privipk-sync-engine,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,nylas/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,nylas/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,Eagles2F/sync-engine,gale320/sync-engine,gale320/sync-engine,nylas/sync-engine,gale320/sync-engine,ErinCall/sync-engine,wakermahmud/sync-engine,Eagles2F/sync-engine,closeio/nylas,Eagles2F/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,closeio/nylas,jobscore/sync-engine,EthanBlackburn/sync-engine,gale320/sync-engine,jobscore/sync-engine,closeio/nylas
|
Break into a python interpreter from anywhere
Summary:
I use pdb a lot when debugging and I find it really annoying that we only get stack traces from errors on gunks. Wouldn't it be cool to be able to at least have access to an interpreter at the bottom of a stack trace? Well, rdb does this. To use it, insert a call to "rdb.break_to_interpreter" and the greenlet will block while waiting for a telnet (or netcat if you can) connection on localhost (and without blocking the whole process).
Currently the debugger is very basic - it's still possible to examine the stackframe using sys._getframe, though. I'm looking for a way to make the frame data available in locals() and to have greenlets break into the debugger whenever they get an uncaught exception.
Test Plan: Ran a sync
Reviewers: charles, spang, emfree
Reviewed By: emfree
Projects: #inbox-sync
Differential Revision: https://review.inboxapp.com/D321
|
import socket
from gevent import monkey; monkey.patch_all()
import sys
import random
from code import InteractiveConsole
class RemoteConsole(InteractiveConsole):
def __init__(self, handle, locals=None):
self.handle = handle
sys.stderr = self.handle
InteractiveConsole.__init__(self, locals=locals)
def write(self, data):
self.handle.write(data)
def runcode(self, code):
# preserve stdout/stderr
oldstdout = sys.stdout
oldstderr = sys.stderr
sys.stdout = self.handle
sys.stderr = self.handle
InteractiveConsole.runcode(self, code)
sys.stdout = oldstdout
sys.stderr = oldstderr
def raw_input(self, prompt=""):
self.handle.write(prompt)
self.handle.flush()
return self.handle.readline()
def break_to_interpreter(address="localhost", portmin=4000, portmax=5000):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = random.randint(portmin, portmax)
sock.bind((address, port))
sock.listen(1)
print "Interpreter waiting on %s port %d..." % (address, port)
(clientsocket, address) = sock.accept()
handle = clientsocket.makefile('rw')
handle.write('Embedded interpreter')
console = RemoteConsole(handle, locals())
console.interact()
# example usage - connect with 'netcat localhost 4444'
if __name__ == '__main__':
break_to_interpreter()
|
<commit_before><commit_msg>Break into a python interpreter from anywhere
Summary:
I use pdb a lot when debugging and I find it really annoying that we only get stack traces from errors on gunks. Wouldn't it be cool to be able to at least have access to an interpreter at the bottom of a stack trace? Well, rdb does this. To use it, insert a call to "rdb.break_to_interpreter" and the greenlet will block while waiting for a telnet (or netcat if you can) connection on localhost (and without blocking the whole process).
Currently the debugger is very basic - it's still possible to examine the stackframe using sys._getframe, though. I'm looking for a way to make the frame data available in locals() and to have greenlets break into the debugger whenever they get an uncaught exception.
Test Plan: Ran a sync
Reviewers: charles, spang, emfree
Reviewed By: emfree
Projects: #inbox-sync
Differential Revision: https://review.inboxapp.com/D321<commit_after>
|
import socket
from gevent import monkey; monkey.patch_all()
import sys
import random
from code import InteractiveConsole
class RemoteConsole(InteractiveConsole):
def __init__(self, handle, locals=None):
self.handle = handle
sys.stderr = self.handle
InteractiveConsole.__init__(self, locals=locals)
def write(self, data):
self.handle.write(data)
def runcode(self, code):
# preserve stdout/stderr
oldstdout = sys.stdout
oldstderr = sys.stderr
sys.stdout = self.handle
sys.stderr = self.handle
InteractiveConsole.runcode(self, code)
sys.stdout = oldstdout
sys.stderr = oldstderr
def raw_input(self, prompt=""):
self.handle.write(prompt)
self.handle.flush()
return self.handle.readline()
def break_to_interpreter(address="localhost", portmin=4000, portmax=5000):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = random.randint(portmin, portmax)
sock.bind((address, port))
sock.listen(1)
print "Interpreter waiting on %s port %d..." % (address, port)
(clientsocket, address) = sock.accept()
handle = clientsocket.makefile('rw')
handle.write('Embedded interpreter')
console = RemoteConsole(handle, locals())
console.interact()
# example usage - connect with 'netcat localhost 4444'
if __name__ == '__main__':
break_to_interpreter()
|
Break into a python interpreter from anywhere
Summary:
I use pdb a lot when debugging and I find it really annoying that we only get stack traces from errors on gunks. Wouldn't it be cool to be able to at least have access to an interpreter at the bottom of a stack trace? Well, rdb does this. To use it, insert a call to "rdb.break_to_interpreter" and the greenlet will block while waiting for a telnet (or netcat if you can) connection on localhost (and without blocking the whole process).
Currently the debugger is very basic - it's still possible to examine the stackframe using sys._getframe, though. I'm looking for a way to make the frame data available in locals() and to have greenlets break into the debugger whenever they get an uncaught exception.
Test Plan: Ran a sync
Reviewers: charles, spang, emfree
Reviewed By: emfree
Projects: #inbox-sync
Differential Revision: https://review.inboxapp.com/D321import socket
from gevent import monkey; monkey.patch_all()
import sys
import random
from code import InteractiveConsole
class RemoteConsole(InteractiveConsole):
def __init__(self, handle, locals=None):
self.handle = handle
sys.stderr = self.handle
InteractiveConsole.__init__(self, locals=locals)
def write(self, data):
self.handle.write(data)
def runcode(self, code):
# preserve stdout/stderr
oldstdout = sys.stdout
oldstderr = sys.stderr
sys.stdout = self.handle
sys.stderr = self.handle
InteractiveConsole.runcode(self, code)
sys.stdout = oldstdout
sys.stderr = oldstderr
def raw_input(self, prompt=""):
self.handle.write(prompt)
self.handle.flush()
return self.handle.readline()
def break_to_interpreter(address="localhost", portmin=4000, portmax=5000):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = random.randint(portmin, portmax)
sock.bind((address, port))
sock.listen(1)
print "Interpreter waiting on %s port %d..." % (address, port)
(clientsocket, address) = sock.accept()
handle = clientsocket.makefile('rw')
handle.write('Embedded interpreter')
console = RemoteConsole(handle, locals())
console.interact()
# example usage - connect with 'netcat localhost 4444'
if __name__ == '__main__':
break_to_interpreter()
|
<commit_before><commit_msg>Break into a python interpreter from anywhere
Summary:
I use pdb a lot when debugging and I find it really annoying that we only get stack traces from errors on gunks. Wouldn't it be cool to be able to at least have access to an interpreter at the bottom of a stack trace? Well, rdb does this. To use it, insert a call to "rdb.break_to_interpreter" and the greenlet will block while waiting for a telnet (or netcat if you can) connection on localhost (and without blocking the whole process).
Currently the debugger is very basic - it's still possible to examine the stackframe using sys._getframe, though. I'm looking for a way to make the frame data available in locals() and to have greenlets break into the debugger whenever they get an uncaught exception.
Test Plan: Ran a sync
Reviewers: charles, spang, emfree
Reviewed By: emfree
Projects: #inbox-sync
Differential Revision: https://review.inboxapp.com/D321<commit_after>import socket
from gevent import monkey; monkey.patch_all()
import sys
import random
from code import InteractiveConsole
class RemoteConsole(InteractiveConsole):
def __init__(self, handle, locals=None):
self.handle = handle
sys.stderr = self.handle
InteractiveConsole.__init__(self, locals=locals)
def write(self, data):
self.handle.write(data)
def runcode(self, code):
# preserve stdout/stderr
oldstdout = sys.stdout
oldstderr = sys.stderr
sys.stdout = self.handle
sys.stderr = self.handle
InteractiveConsole.runcode(self, code)
sys.stdout = oldstdout
sys.stderr = oldstderr
def raw_input(self, prompt=""):
self.handle.write(prompt)
self.handle.flush()
return self.handle.readline()
def break_to_interpreter(address="localhost", portmin=4000, portmax=5000):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = random.randint(portmin, portmax)
sock.bind((address, port))
sock.listen(1)
print "Interpreter waiting on %s port %d..." % (address, port)
(clientsocket, address) = sock.accept()
handle = clientsocket.makefile('rw')
handle.write('Embedded interpreter')
console = RemoteConsole(handle, locals())
console.interact()
# example usage - connect with 'netcat localhost 4444'
if __name__ == '__main__':
break_to_interpreter()
|
|
3a79cbf70e75689bd88418d831e5c8953c3d997f
|
bin/test_setup.py
|
bin/test_setup.py
|
"""
Test that the installed modules in setup.py are up-to-date.
If this test fails, run
python bin/generate_test_list.py
and
python bin/generate_module_list.py
to generate the up-to-date test and modules list to put in setup.py.
"""
import generate_test_list
import generate_module_list
from get_sympy import path_hack
path_hack()
import setup
module_list = generate_module_list.generate_module_list()
test_list = generate_test_list.generate_test_list()
assert setup.modules == module_list, set(setup.modules).symmetric_difference(set(module_list))
assert setup.tests == test_list, set(setup.tests).symmetric_difference(set(test_list))
print("setup.py modules and tests are OK")
|
Add a script to test that tests and modules in setup.py are up-to-date
|
Add a script to test that tests and modules in setup.py are up-to-date
|
Python
|
bsd-3-clause
|
aktech/sympy,souravsingh/sympy,madan96/sympy,sampadsaha5/sympy,kaushik94/sympy,drufat/sympy,chaffra/sympy,drufat/sympy,postvakje/sympy,yashsharan/sympy,drufat/sympy,jaimahajan1997/sympy,sampadsaha5/sympy,madan96/sympy,yashsharan/sympy,jaimahajan1997/sympy,jaimahajan1997/sympy,madan96/sympy,kaushik94/sympy,souravsingh/sympy,sampadsaha5/sympy,chaffra/sympy,skidzo/sympy,souravsingh/sympy,rahuldan/sympy,skidzo/sympy,skidzo/sympy,rahuldan/sympy,yashsharan/sympy,aktech/sympy,aktech/sympy,postvakje/sympy,kaushik94/sympy,rahuldan/sympy,chaffra/sympy,postvakje/sympy
|
Add a script to test that tests and modules in setup.py are up-to-date
|
"""
Test that the installed modules in setup.py are up-to-date.
If this test fails, run
python bin/generate_test_list.py
and
python bin/generate_module_list.py
to generate the up-to-date test and modules list to put in setup.py.
"""
import generate_test_list
import generate_module_list
from get_sympy import path_hack
path_hack()
import setup
module_list = generate_module_list.generate_module_list()
test_list = generate_test_list.generate_test_list()
assert setup.modules == module_list, set(setup.modules).symmetric_difference(set(module_list))
assert setup.tests == test_list, set(setup.tests).symmetric_difference(set(test_list))
print("setup.py modules and tests are OK")
|
<commit_before><commit_msg>Add a script to test that tests and modules in setup.py are up-to-date<commit_after>
|
"""
Test that the installed modules in setup.py are up-to-date.
If this test fails, run
python bin/generate_test_list.py
and
python bin/generate_module_list.py
to generate the up-to-date test and modules list to put in setup.py.
"""
import generate_test_list
import generate_module_list
from get_sympy import path_hack
path_hack()
import setup
module_list = generate_module_list.generate_module_list()
test_list = generate_test_list.generate_test_list()
assert setup.modules == module_list, set(setup.modules).symmetric_difference(set(module_list))
assert setup.tests == test_list, set(setup.tests).symmetric_difference(set(test_list))
print("setup.py modules and tests are OK")
|
Add a script to test that tests and modules in setup.py are up-to-date"""
Test that the installed modules in setup.py are up-to-date.
If this test fails, run
python bin/generate_test_list.py
and
python bin/generate_module_list.py
to generate the up-to-date test and modules list to put in setup.py.
"""
import generate_test_list
import generate_module_list
from get_sympy import path_hack
path_hack()
import setup
module_list = generate_module_list.generate_module_list()
test_list = generate_test_list.generate_test_list()
assert setup.modules == module_list, set(setup.modules).symmetric_difference(set(module_list))
assert setup.tests == test_list, set(setup.tests).symmetric_difference(set(test_list))
print("setup.py modules and tests are OK")
|
<commit_before><commit_msg>Add a script to test that tests and modules in setup.py are up-to-date<commit_after>"""
Test that the installed modules in setup.py are up-to-date.
If this test fails, run
python bin/generate_test_list.py
and
python bin/generate_module_list.py
to generate the up-to-date test and modules list to put in setup.py.
"""
import generate_test_list
import generate_module_list
from get_sympy import path_hack
path_hack()
import setup
module_list = generate_module_list.generate_module_list()
test_list = generate_test_list.generate_test_list()
assert setup.modules == module_list, set(setup.modules).symmetric_difference(set(module_list))
assert setup.tests == test_list, set(setup.tests).symmetric_difference(set(test_list))
print("setup.py modules and tests are OK")
|
|
a77b72358132ac6f8d58fcb72dee7a762cebec87
|
python/fire_rs/monitoring/ui.py
|
python/fire_rs/monitoring/ui.py
|
# Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import abc
class NoUI:
def message_dialog(self, message: str):
pass
def warning_dialog(self, message: str):
pass
def error_dialog(self, message: str):
pass
def question_dialog(self, message: str) -> bool:
"""Always returns True"""
return True
class ZenityUI(NoUI):
def message_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--info",
"--text={}".format(message)])
def warning_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--warning",
"--text={}".format(message)])
def error_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--error",
"--text={}".format(message)])
def question_dialog(self, message: str) -> bool:
"""Show a dialog window with a 'message' next an image 'type'"""
if subprocess.run(["zenity", "--question",
"--text={}".format(message)]).returncode == 0:
return True
else:
return False
|
Add Zenity-based user interface for SuperSAOP
|
Add Zenity-based user interface for SuperSAOP
|
Python
|
bsd-2-clause
|
fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop,fire-rs-laas/fire-rs-saop
|
Add Zenity-based user interface for SuperSAOP
|
# Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import abc
class NoUI:
def message_dialog(self, message: str):
pass
def warning_dialog(self, message: str):
pass
def error_dialog(self, message: str):
pass
def question_dialog(self, message: str) -> bool:
"""Always returns True"""
return True
class ZenityUI(NoUI):
def message_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--info",
"--text={}".format(message)])
def warning_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--warning",
"--text={}".format(message)])
def error_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--error",
"--text={}".format(message)])
def question_dialog(self, message: str) -> bool:
"""Show a dialog window with a 'message' next an image 'type'"""
if subprocess.run(["zenity", "--question",
"--text={}".format(message)]).returncode == 0:
return True
else:
return False
|
<commit_before><commit_msg>Add Zenity-based user interface for SuperSAOP<commit_after>
|
# Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import abc
class NoUI:
def message_dialog(self, message: str):
pass
def warning_dialog(self, message: str):
pass
def error_dialog(self, message: str):
pass
def question_dialog(self, message: str) -> bool:
"""Always returns True"""
return True
class ZenityUI(NoUI):
def message_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--info",
"--text={}".format(message)])
def warning_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--warning",
"--text={}".format(message)])
def error_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--error",
"--text={}".format(message)])
def question_dialog(self, message: str) -> bool:
"""Show a dialog window with a 'message' next an image 'type'"""
if subprocess.run(["zenity", "--question",
"--text={}".format(message)]).returncode == 0:
return True
else:
return False
|
Add Zenity-based user interface for SuperSAOP# Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import abc
class NoUI:
def message_dialog(self, message: str):
pass
def warning_dialog(self, message: str):
pass
def error_dialog(self, message: str):
pass
def question_dialog(self, message: str) -> bool:
"""Always returns True"""
return True
class ZenityUI(NoUI):
def message_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--info",
"--text={}".format(message)])
def warning_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--warning",
"--text={}".format(message)])
def error_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--error",
"--text={}".format(message)])
def question_dialog(self, message: str) -> bool:
"""Show a dialog window with a 'message' next an image 'type'"""
if subprocess.run(["zenity", "--question",
"--text={}".format(message)]).returncode == 0:
return True
else:
return False
|
<commit_before><commit_msg>Add Zenity-based user interface for SuperSAOP<commit_after># Copyright (c) 2018, CNRS-LAAS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import abc
class NoUI:
def message_dialog(self, message: str):
pass
def warning_dialog(self, message: str):
pass
def error_dialog(self, message: str):
pass
def question_dialog(self, message: str) -> bool:
"""Always returns True"""
return True
class ZenityUI(NoUI):
def message_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--info",
"--text={}".format(message)])
def warning_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--warning",
"--text={}".format(message)])
def error_dialog(self, message: str):
"""Show a dialog window with a 'message' next an image 'type'"""
subprocess.run(["zenity", "--error",
"--text={}".format(message)])
def question_dialog(self, message: str) -> bool:
"""Show a dialog window with a 'message' next an image 'type'"""
if subprocess.run(["zenity", "--question",
"--text={}".format(message)]).returncode == 0:
return True
else:
return False
|
|
a15aecdc72b135fe55778a28416b1efaca6be727
|
quran_text/tests/test_models.py
|
quran_text/tests/test_models.py
|
from django.test import TestCase
from model_mommy import mommy
from ..models import Ayah
class SuraModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
def test_str(self):
self.assertEqual('Al-Fateha', str(self.sura))
class AyahModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_str(self):
self.assertEqual('1 - 1', str(self.ayah))
class TestAyahModelManager(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_get_sura_text(self):
ayaht = Ayah.objects.get_sura_text(self.sura.pk)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
def test_get_sura_ayah(self):
ayah = Ayah.objects.get_sura_ayah(self.sura.pk,
self.ayah.pk)
self.assertIn(self.ayah, ayah)
def get_sura_ayat_range(self):
ayaht = Ayah.objects.get_sura_ayah(self.sura.pk, 1, 2)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
|
Add tests for Quran Text models
|
Add tests for Quran Text models
|
Python
|
mit
|
EmadMokhtar/tafseer_api
|
Add tests for Quran Text models
|
from django.test import TestCase
from model_mommy import mommy
from ..models import Ayah
class SuraModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
def test_str(self):
self.assertEqual('Al-Fateha', str(self.sura))
class AyahModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_str(self):
self.assertEqual('1 - 1', str(self.ayah))
class TestAyahModelManager(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_get_sura_text(self):
ayaht = Ayah.objects.get_sura_text(self.sura.pk)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
def test_get_sura_ayah(self):
ayah = Ayah.objects.get_sura_ayah(self.sura.pk,
self.ayah.pk)
self.assertIn(self.ayah, ayah)
def get_sura_ayat_range(self):
ayaht = Ayah.objects.get_sura_ayah(self.sura.pk, 1, 2)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
|
<commit_before><commit_msg>Add tests for Quran Text models<commit_after>
|
from django.test import TestCase
from model_mommy import mommy
from ..models import Ayah
class SuraModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
def test_str(self):
self.assertEqual('Al-Fateha', str(self.sura))
class AyahModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_str(self):
self.assertEqual('1 - 1', str(self.ayah))
class TestAyahModelManager(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_get_sura_text(self):
ayaht = Ayah.objects.get_sura_text(self.sura.pk)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
def test_get_sura_ayah(self):
ayah = Ayah.objects.get_sura_ayah(self.sura.pk,
self.ayah.pk)
self.assertIn(self.ayah, ayah)
def get_sura_ayat_range(self):
ayaht = Ayah.objects.get_sura_ayah(self.sura.pk, 1, 2)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
|
Add tests for Quran Text modelsfrom django.test import TestCase
from model_mommy import mommy
from ..models import Ayah
class SuraModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
def test_str(self):
self.assertEqual('Al-Fateha', str(self.sura))
class AyahModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_str(self):
self.assertEqual('1 - 1', str(self.ayah))
class TestAyahModelManager(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_get_sura_text(self):
ayaht = Ayah.objects.get_sura_text(self.sura.pk)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
def test_get_sura_ayah(self):
ayah = Ayah.objects.get_sura_ayah(self.sura.pk,
self.ayah.pk)
self.assertIn(self.ayah, ayah)
def get_sura_ayat_range(self):
ayaht = Ayah.objects.get_sura_ayah(self.sura.pk, 1, 2)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
|
<commit_before><commit_msg>Add tests for Quran Text models<commit_after>from django.test import TestCase
from model_mommy import mommy
from ..models import Ayah
class SuraModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
def test_str(self):
self.assertEqual('Al-Fateha', str(self.sura))
class AyahModelTest(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_str(self):
self.assertEqual('1 - 1', str(self.ayah))
class TestAyahModelManager(TestCase):
def setUp(self):
self.sura = mommy.make('quran_text.sura', name='Al-Fateha', index=1)
self.ayah = mommy.make('quran_text.ayah', number=1, sura=self.sura,
text='بسم الله الرحمن الرحيم')
mommy.make('quran_text.ayah', number=2, sura=self.sura,
text='الحمدلله رب العالمين')
def test_get_sura_text(self):
ayaht = Ayah.objects.get_sura_text(self.sura.pk)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
def test_get_sura_ayah(self):
ayah = Ayah.objects.get_sura_ayah(self.sura.pk,
self.ayah.pk)
self.assertIn(self.ayah, ayah)
def get_sura_ayat_range(self):
ayaht = Ayah.objects.get_sura_ayah(self.sura.pk, 1, 2)
self.assertIn(self.ayah, ayaht)
self.assertEqual(2, ayaht.count())
|
|
704b2fe733ca8a8f5d680c1cf829a124bf3041bd
|
crawler/wikitravel-manual-rename.py
|
crawler/wikitravel-manual-rename.py
|
#!/opt/local/bin/python
import hashlib
import json
import os.path
import sys
import string
for i, line in enumerate(sys.stdin):
(url, title, fileName1) = json.loads(line)
page1 = url[27:]
page2 = page1.replace('(', '%28').replace(')', '%29')
fileName2 = page2.replace('/', '_') + '_' + hashlib.md5(page2).hexdigest()[:8];
suffix = '.google_geocoded'
if os.path.exists(fileName2 + suffix):
if not os.path.exists(fileName1 + suffix):
cmd = 'mv "%s" "%s"' % (fileName2 + suffix, fileName1 + suffix)
print(cmd)
os.system(cmd)
|
Add script to do manual renames.
|
[crawler] Add script to do manual renames.
|
Python
|
apache-2.0
|
TimurTarasenko/omim,milchakov/omim,matsprea/omim,yunikkk/omim,trashkalmar/omim,yunikkk/omim,krasin/omim,mapsme/omim,UdjinM6/omim,programming086/omim,andrewshadura/omim,milchakov/omim,sidorov-panda/omim,syershov/omim,Transtech/omim,goblinr/omim,augmify/omim,Komzpa/omim,mgsergio/omim,mpimenov/omim,ygorshenin/omim,felipebetancur/omim,sidorov-panda/omim,kw217/omim,yunikkk/omim,kw217/omim,Volcanoscar/omim,Endika/omim,jam891/omim,Komzpa/omim,vladon/omim,TimurTarasenko/omim,VladiMihaylenko/omim,Saicheg/omim,goblinr/omim,vng/omim,simon247/omim,Volcanoscar/omim,guard163/omim,felipebetancur/omim,mpimenov/omim,Komzpa/omim,programming086/omim,jam891/omim,lydonchandra/omim,guard163/omim,simon247/omim,augmify/omim,ygorshenin/omim,wersoo/omim,gardster/omim,bykoianko/omim,matsprea/omim,ygorshenin/omim,igrechuhin/omim,matsprea/omim,stangls/omim,sidorov-panda/omim,sidorov-panda/omim,VladiMihaylenko/omim,albertshift/omim,dkorolev/omim,dobriy-eeh/omim,TimurTarasenko/omim,sidorov-panda/omim,victorbriz/omim,kw217/omim,krasin/omim,darina/omim,ygorshenin/omim,matsprea/omim,rokuz/omim,simon247/omim,TimurTarasenko/omim,trashkalmar/omim,syershov/omim,dobriy-eeh/omim,65apps/omim,dobriy-eeh/omim,therearesomewhocallmetim/omim,Saicheg/omim,Transtech/omim,dkorolev/omim,AlexanderMatveenko/omim,felipebetancur/omim,rokuz/omim,augmify/omim,rokuz/omim,simon247/omim,darina/omim,darina/omim,rokuz/omim,dobriy-eeh/omim,krasin/omim,gardster/omim,dobriy-eeh/omim,lydonchandra/omim,edl00k/omim,trashkalmar/omim,goblinr/omim,dobriy-eeh/omim,VladiMihaylenko/omim,mpimenov/omim,milchakov/omim,ygorshenin/omim,programming086/omim,vasilenkomike/omim,mapsme/omim,yunikkk/omim,guard163/omim,mpimenov/omim,wersoo/omim,therearesomewhocallmetim/omim,vasilenkomike/omim,andrewshadura/omim,igrechuhin/omim,darina/omim,gardster/omim,kw217/omim,Endika/omim,65apps/omim,Endika/omim,Saicheg/omim,augmify/omim,darina/omim,vasilenkomike/omim,victorbriz/omim,victorbriz/omim,Zverik/omim,alexzatsepin/omim,Zverik/omim,Transtech/omim,programming086/omim,krasin/omim,guard163/omim,dkorolev/omim,milchakov/omim,darina/omim,programming086/omim,wersoo/omim,mpimenov/omim,felipebetancur/omim,Zverik/omim,UdjinM6/omim,ygorshenin/omim,vng/omim,programming086/omim,edl00k/omim,UdjinM6/omim,AlexanderMatveenko/omim,Volcanoscar/omim,alexzatsepin/omim,dobriy-eeh/omim,milchakov/omim,TimurTarasenko/omim,rokuz/omim,andrewshadura/omim,edl00k/omim,programming086/omim,Transtech/omim,alexzatsepin/omim,trashkalmar/omim,augmify/omim,goblinr/omim,bykoianko/omim,syershov/omim,UdjinM6/omim,wersoo/omim,dkorolev/omim,Zverik/omim,bykoianko/omim,goblinr/omim,mapsme/omim,lydonchandra/omim,matsprea/omim,Saicheg/omim,krasin/omim,dkorolev/omim,guard163/omim,wersoo/omim,albertshift/omim,65apps/omim,TimurTarasenko/omim,syershov/omim,victorbriz/omim,victorbriz/omim,vng/omim,guard163/omim,yunikkk/omim,kw217/omim,Komzpa/omim,65apps/omim,Komzpa/omim,UdjinM6/omim,andrewshadura/omim,victorbriz/omim,UdjinM6/omim,vasilenkomike/omim,lydonchandra/omim,bykoianko/omim,TimurTarasenko/omim,jam891/omim,goblinr/omim,lydonchandra/omim,mpimenov/omim,mapsme/omim,goblinr/omim,Endika/omim,lydonchandra/omim,milchakov/omim,vng/omim,ygorshenin/omim,mgsergio/omim,ygorshenin/omim,felipebetancur/omim,albertshift/omim,mapsme/omim,VladiMihaylenko/omim,igrechuhin/omim,dobriy-eeh/omim,andrewshadura/omim,igrechuhin/omim,rokuz/omim,bykoianko/omim,stangls/omim,therearesomewhocallmetim/omim,65apps/omim,igrechuhin/omim,Endika/omim,vasilenkomike/omim,victorbriz/omim,andrewshadura/omim,TimurTarasenko/omim,dkorolev/omim,krasin/omim,VladiMihaylenko/omim,jam891/omim,alexzatsepin/omim,felipebetancur/omim,dobriy-eeh/omim,programming086/omim,krasin/omim,sidorov-panda/omim,Zverik/omim,ygorshenin/omim,Saicheg/omim,simon247/omim,trashkalmar/omim,augmify/omim,bykoianko/omim,felipebetancur/omim,jam891/omim,albertshift/omim,yunikkk/omim,darina/omim,Volcanoscar/omim,alexzatsepin/omim,gardster/omim,Zverik/omim,alexzatsepin/omim,mgsergio/omim,mpimenov/omim,krasin/omim,Zverik/omim,Komzpa/omim,mapsme/omim,VladiMihaylenko/omim,therearesomewhocallmetim/omim,Transtech/omim,yunikkk/omim,edl00k/omim,yunikkk/omim,bykoianko/omim,gardster/omim,augmify/omim,vladon/omim,Transtech/omim,vng/omim,simon247/omim,therearesomewhocallmetim/omim,goblinr/omim,mpimenov/omim,mpimenov/omim,alexzatsepin/omim,stangls/omim,milchakov/omim,gardster/omim,Saicheg/omim,guard163/omim,andrewshadura/omim,VladiMihaylenko/omim,goblinr/omim,65apps/omim,andrewshadura/omim,edl00k/omim,mgsergio/omim,stangls/omim,milchakov/omim,rokuz/omim,programming086/omim,mpimenov/omim,Komzpa/omim,Komzpa/omim,mapsme/omim,rokuz/omim,krasin/omim,milchakov/omim,vladon/omim,trashkalmar/omim,VladiMihaylenko/omim,trashkalmar/omim,kw217/omim,edl00k/omim,wersoo/omim,stangls/omim,goblinr/omim,kw217/omim,augmify/omim,UdjinM6/omim,syershov/omim,jam891/omim,jam891/omim,Saicheg/omim,Zverik/omim,matsprea/omim,TimurTarasenko/omim,ygorshenin/omim,Transtech/omim,VladiMihaylenko/omim,AlexanderMatveenko/omim,syershov/omim,AlexanderMatveenko/omim,rokuz/omim,stangls/omim,dkorolev/omim,mpimenov/omim,Zverik/omim,65apps/omim,augmify/omim,krasin/omim,goblinr/omim,Volcanoscar/omim,mgsergio/omim,milchakov/omim,Transtech/omim,igrechuhin/omim,mgsergio/omim,dkorolev/omim,vng/omim,igrechuhin/omim,andrewshadura/omim,dobriy-eeh/omim,syershov/omim,alexzatsepin/omim,Volcanoscar/omim,wersoo/omim,therearesomewhocallmetim/omim,AlexanderMatveenko/omim,Transtech/omim,lydonchandra/omim,UdjinM6/omim,albertshift/omim,Transtech/omim,vng/omim,gardster/omim,VladiMihaylenko/omim,vladon/omim,rokuz/omim,bykoianko/omim,albertshift/omim,bykoianko/omim,syershov/omim,dobriy-eeh/omim,alexzatsepin/omim,matsprea/omim,syershov/omim,victorbriz/omim,simon247/omim,trashkalmar/omim,mpimenov/omim,Saicheg/omim,bykoianko/omim,darina/omim,felipebetancur/omim,vladon/omim,VladiMihaylenko/omim,mgsergio/omim,Endika/omim,mgsergio/omim,darina/omim,gardster/omim,65apps/omim,AlexanderMatveenko/omim,wersoo/omim,stangls/omim,vladon/omim,kw217/omim,mapsme/omim,programming086/omim,augmify/omim,Transtech/omim,syershov/omim,guard163/omim,vng/omim,mgsergio/omim,albertshift/omim,mgsergio/omim,65apps/omim,Endika/omim,igrechuhin/omim,trashkalmar/omim,Zverik/omim,therearesomewhocallmetim/omim,kw217/omim,mgsergio/omim,sidorov-panda/omim,65apps/omim,vladon/omim,bykoianko/omim,Komzpa/omim,andrewshadura/omim,milchakov/omim,AlexanderMatveenko/omim,simon247/omim,Volcanoscar/omim,goblinr/omim,mapsme/omim,rokuz/omim,igrechuhin/omim,rokuz/omim,kw217/omim,trashkalmar/omim,vladon/omim,darina/omim,Volcanoscar/omim,guard163/omim,goblinr/omim,yunikkk/omim,ygorshenin/omim,trashkalmar/omim,AlexanderMatveenko/omim,mgsergio/omim,syershov/omim,vladon/omim,rokuz/omim,dobriy-eeh/omim,albertshift/omim,milchakov/omim,Volcanoscar/omim,jam891/omim,wersoo/omim,edl00k/omim,darina/omim,therearesomewhocallmetim/omim,AlexanderMatveenko/omim,Endika/omim,jam891/omim,therearesomewhocallmetim/omim,albertshift/omim,dkorolev/omim,lydonchandra/omim,felipebetancur/omim,matsprea/omim,Transtech/omim,65apps/omim,syershov/omim,Zverik/omim,wersoo/omim,albertshift/omim,lydonchandra/omim,jam891/omim,mapsme/omim,darina/omim,gardster/omim,sidorov-panda/omim,simon247/omim,lydonchandra/omim,vasilenkomike/omim,yunikkk/omim,alexzatsepin/omim,bykoianko/omim,edl00k/omim,vasilenkomike/omim,stangls/omim,victorbriz/omim,vasilenkomike/omim,stangls/omim,matsprea/omim,alexzatsepin/omim,Zverik/omim,Volcanoscar/omim,mapsme/omim,Saicheg/omim,bykoianko/omim,guard163/omim,UdjinM6/omim,gardster/omim,alexzatsepin/omim,Komzpa/omim,edl00k/omim,mapsme/omim,vng/omim,VladiMihaylenko/omim,darina/omim,VladiMihaylenko/omim,sidorov-panda/omim,milchakov/omim,sidorov-panda/omim,AlexanderMatveenko/omim,stangls/omim,Zverik/omim,felipebetancur/omim,vng/omim,alexzatsepin/omim,dobriy-eeh/omim,stangls/omim,UdjinM6/omim,matsprea/omim,vasilenkomike/omim,Saicheg/omim,therearesomewhocallmetim/omim,igrechuhin/omim,Endika/omim,edl00k/omim,vasilenkomike/omim,syershov/omim,trashkalmar/omim,yunikkk/omim,mapsme/omim,mpimenov/omim,vladon/omim,Endika/omim,TimurTarasenko/omim,simon247/omim,ygorshenin/omim,victorbriz/omim,dkorolev/omim
|
[crawler] Add script to do manual renames.
|
#!/opt/local/bin/python
import hashlib
import json
import os.path
import sys
import string
for i, line in enumerate(sys.stdin):
(url, title, fileName1) = json.loads(line)
page1 = url[27:]
page2 = page1.replace('(', '%28').replace(')', '%29')
fileName2 = page2.replace('/', '_') + '_' + hashlib.md5(page2).hexdigest()[:8];
suffix = '.google_geocoded'
if os.path.exists(fileName2 + suffix):
if not os.path.exists(fileName1 + suffix):
cmd = 'mv "%s" "%s"' % (fileName2 + suffix, fileName1 + suffix)
print(cmd)
os.system(cmd)
|
<commit_before><commit_msg>[crawler] Add script to do manual renames.<commit_after>
|
#!/opt/local/bin/python
import hashlib
import json
import os.path
import sys
import string
for i, line in enumerate(sys.stdin):
(url, title, fileName1) = json.loads(line)
page1 = url[27:]
page2 = page1.replace('(', '%28').replace(')', '%29')
fileName2 = page2.replace('/', '_') + '_' + hashlib.md5(page2).hexdigest()[:8];
suffix = '.google_geocoded'
if os.path.exists(fileName2 + suffix):
if not os.path.exists(fileName1 + suffix):
cmd = 'mv "%s" "%s"' % (fileName2 + suffix, fileName1 + suffix)
print(cmd)
os.system(cmd)
|
[crawler] Add script to do manual renames.#!/opt/local/bin/python
import hashlib
import json
import os.path
import sys
import string
for i, line in enumerate(sys.stdin):
(url, title, fileName1) = json.loads(line)
page1 = url[27:]
page2 = page1.replace('(', '%28').replace(')', '%29')
fileName2 = page2.replace('/', '_') + '_' + hashlib.md5(page2).hexdigest()[:8];
suffix = '.google_geocoded'
if os.path.exists(fileName2 + suffix):
if not os.path.exists(fileName1 + suffix):
cmd = 'mv "%s" "%s"' % (fileName2 + suffix, fileName1 + suffix)
print(cmd)
os.system(cmd)
|
<commit_before><commit_msg>[crawler] Add script to do manual renames.<commit_after>#!/opt/local/bin/python
import hashlib
import json
import os.path
import sys
import string
for i, line in enumerate(sys.stdin):
(url, title, fileName1) = json.loads(line)
page1 = url[27:]
page2 = page1.replace('(', '%28').replace(')', '%29')
fileName2 = page2.replace('/', '_') + '_' + hashlib.md5(page2).hexdigest()[:8];
suffix = '.google_geocoded'
if os.path.exists(fileName2 + suffix):
if not os.path.exists(fileName1 + suffix):
cmd = 'mv "%s" "%s"' % (fileName2 + suffix, fileName1 + suffix)
print(cmd)
os.system(cmd)
|
|
4b36485fa49da975ce624f287febf02ac967ce03
|
tensorflow/mydatasets.py
|
tensorflow/mydatasets.py
|
import tensorflow as tf
import numpy as np
#import pandas as pd
def getdataset(myobj):
if myobj.dataset == 'mnist':
return getmnist()
if myobj.dataset == 'dailymintemperatures':
return getdailymintemperatures(myobj)
def getmnist():
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data("/tmp/datasets/mnist.npz")
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
print(type(x_train))
print(type(y_train))
print(type(train_dataset))
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
y_train = np.int32(y_train)
y_test = np.int32(y_test)
print(x_train.shape)
print(y_train.shape)
return x_train, y_train, x_test, y_test, 784, 10
def getdailymintemperatures(myobj):
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
file_path = tf.keras.utils.get_file("/tmp/daily-min-temperatures.csv", url)
data = np.genfromtxt(file_path, delimiter = ',', skip_header = 1, dtype = {'names': ('date', 'temp'), 'formats': (np.str, np.float)})
data = data['temp']
return data, None, None, None, myobj.size, myobj.classes
|
Add tests for neural net (I106).
|
Add tests for neural net (I106).
|
Python
|
agpl-3.0
|
rroart/stockstat,rroart/stockstat,rroart/stockstat,rroart/stockstat,rroart/stockstat,rroart/stockstat,rroart/stockstat
|
Add tests for neural net (I106).
|
import tensorflow as tf
import numpy as np
#import pandas as pd
def getdataset(myobj):
if myobj.dataset == 'mnist':
return getmnist()
if myobj.dataset == 'dailymintemperatures':
return getdailymintemperatures(myobj)
def getmnist():
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data("/tmp/datasets/mnist.npz")
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
print(type(x_train))
print(type(y_train))
print(type(train_dataset))
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
y_train = np.int32(y_train)
y_test = np.int32(y_test)
print(x_train.shape)
print(y_train.shape)
return x_train, y_train, x_test, y_test, 784, 10
def getdailymintemperatures(myobj):
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
file_path = tf.keras.utils.get_file("/tmp/daily-min-temperatures.csv", url)
data = np.genfromtxt(file_path, delimiter = ',', skip_header = 1, dtype = {'names': ('date', 'temp'), 'formats': (np.str, np.float)})
data = data['temp']
return data, None, None, None, myobj.size, myobj.classes
|
<commit_before><commit_msg>Add tests for neural net (I106).<commit_after>
|
import tensorflow as tf
import numpy as np
#import pandas as pd
def getdataset(myobj):
if myobj.dataset == 'mnist':
return getmnist()
if myobj.dataset == 'dailymintemperatures':
return getdailymintemperatures(myobj)
def getmnist():
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data("/tmp/datasets/mnist.npz")
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
print(type(x_train))
print(type(y_train))
print(type(train_dataset))
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
y_train = np.int32(y_train)
y_test = np.int32(y_test)
print(x_train.shape)
print(y_train.shape)
return x_train, y_train, x_test, y_test, 784, 10
def getdailymintemperatures(myobj):
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
file_path = tf.keras.utils.get_file("/tmp/daily-min-temperatures.csv", url)
data = np.genfromtxt(file_path, delimiter = ',', skip_header = 1, dtype = {'names': ('date', 'temp'), 'formats': (np.str, np.float)})
data = data['temp']
return data, None, None, None, myobj.size, myobj.classes
|
Add tests for neural net (I106).import tensorflow as tf
import numpy as np
#import pandas as pd
def getdataset(myobj):
if myobj.dataset == 'mnist':
return getmnist()
if myobj.dataset == 'dailymintemperatures':
return getdailymintemperatures(myobj)
def getmnist():
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data("/tmp/datasets/mnist.npz")
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
print(type(x_train))
print(type(y_train))
print(type(train_dataset))
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
y_train = np.int32(y_train)
y_test = np.int32(y_test)
print(x_train.shape)
print(y_train.shape)
return x_train, y_train, x_test, y_test, 784, 10
def getdailymintemperatures(myobj):
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
file_path = tf.keras.utils.get_file("/tmp/daily-min-temperatures.csv", url)
data = np.genfromtxt(file_path, delimiter = ',', skip_header = 1, dtype = {'names': ('date', 'temp'), 'formats': (np.str, np.float)})
data = data['temp']
return data, None, None, None, myobj.size, myobj.classes
|
<commit_before><commit_msg>Add tests for neural net (I106).<commit_after>import tensorflow as tf
import numpy as np
#import pandas as pd
def getdataset(myobj):
if myobj.dataset == 'mnist':
return getmnist()
if myobj.dataset == 'dailymintemperatures':
return getdailymintemperatures(myobj)
def getmnist():
#load mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data("/tmp/datasets/mnist.npz")
def create_mnist_dataset(data, labels, batch_size):
def gen():
for image, label in zip(data, labels):
yield image, label
ds = tf.data.Dataset.from_generator(gen, (tf.float32, tf.int32), ((28,28 ), ()))
return ds.repeat().batch(batch_size)
#train and validation dataset with different batch size
train_dataset = create_mnist_dataset(x_train, y_train, 10)
valid_dataset = create_mnist_dataset(x_test, y_test, 20)
print(type(x_train))
print(type(y_train))
print(type(train_dataset))
x_train = x_train.reshape((x_train.shape[0], 784))
x_test = x_test.reshape((x_test.shape[0], 784))
y_train = np.int32(y_train)
y_test = np.int32(y_test)
print(x_train.shape)
print(y_train.shape)
return x_train, y_train, x_test, y_test, 784, 10
def getdailymintemperatures(myobj):
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-min-temperatures.csv'
file_path = tf.keras.utils.get_file("/tmp/daily-min-temperatures.csv", url)
data = np.genfromtxt(file_path, delimiter = ',', skip_header = 1, dtype = {'names': ('date', 'temp'), 'formats': (np.str, np.float)})
data = data['temp']
return data, None, None, None, myobj.size, myobj.classes
|
|
1dfb79111690a43fbf780119c153e096be9c122e
|
docker/kolla-ansible/kolla_zookeeper.py
|
docker/kolla-ansible/kolla_zookeeper.py
|
#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import kazoo
@contextlib.contextmanager
def zk_connection(zk_host, zk_port):
zk = kazoo.client.KazooClient(hosts='{}:{}'.format(zk_host, zk_port))
zk.start()
yield zk
zk.stop()
def main():
module = AnsibleModule(
argument_spec=dict(
zk_host=dict(required=True, type='str'),
zk_port=dict(required=True, type='str'),
path=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str')
)
)
try:
zk_host = module.params.pop('zk_host')
zk_port = module.params.pop('zk_port')
path = module.params.pop('path')
value = module.params.pop('value')
changed = False
with zk_connection(zk_host, zk_port) as zk:
try:
zk.get(path)
except kazoo.exceptions.NoNodeError:
if value is None:
zk.create(path)
else:
zk.create(path, value=value.encode())
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=e)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Add ZooKeeper support in kolla-ansible
|
Add ZooKeeper support in kolla-ansible
In kolla-mesos, ZK will be used for storing information about
actions done in bootstrap.
Change-Id: Id7067a493b457cf7b73eb007852df7b13f4363b3
Partially-Implements: blueprint zookeeper
|
Python
|
apache-2.0
|
dardelean/kolla-ansible,negronjl/kolla,limamauricio/mykolla,stackforge/kolla,openstack/kolla,dardelean/kolla-ansible,GalenMa/kolla,mrangana/kolla,stackforge/kolla,negronjl/kolla,mrangana/kolla,nihilifer/kolla,stackforge/kolla,nihilifer/kolla,openstack/kolla,mandre/kolla,mandre/kolla,coolsvap/kolla,intel-onp/kolla,negronjl/kolla,GalenMa/kolla,rahulunair/kolla,toby82/kolla,rahulunair/kolla,tonyli71/kolla,mandre/kolla,limamauricio/mykolla,toby82/kolla,coolsvap/kolla,tonyli71/kolla,limamauricio/mykolla,coolsvap/kolla,toby82/kolla,dardelean/kolla-ansible,intel-onp/kolla
|
Add ZooKeeper support in kolla-ansible
In kolla-mesos, ZK will be used for storing information about
actions done in bootstrap.
Change-Id: Id7067a493b457cf7b73eb007852df7b13f4363b3
Partially-Implements: blueprint zookeeper
|
#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import kazoo
@contextlib.contextmanager
def zk_connection(zk_host, zk_port):
zk = kazoo.client.KazooClient(hosts='{}:{}'.format(zk_host, zk_port))
zk.start()
yield zk
zk.stop()
def main():
module = AnsibleModule(
argument_spec=dict(
zk_host=dict(required=True, type='str'),
zk_port=dict(required=True, type='str'),
path=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str')
)
)
try:
zk_host = module.params.pop('zk_host')
zk_port = module.params.pop('zk_port')
path = module.params.pop('path')
value = module.params.pop('value')
changed = False
with zk_connection(zk_host, zk_port) as zk:
try:
zk.get(path)
except kazoo.exceptions.NoNodeError:
if value is None:
zk.create(path)
else:
zk.create(path, value=value.encode())
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=e)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add ZooKeeper support in kolla-ansible
In kolla-mesos, ZK will be used for storing information about
actions done in bootstrap.
Change-Id: Id7067a493b457cf7b73eb007852df7b13f4363b3
Partially-Implements: blueprint zookeeper<commit_after>
|
#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import kazoo
@contextlib.contextmanager
def zk_connection(zk_host, zk_port):
zk = kazoo.client.KazooClient(hosts='{}:{}'.format(zk_host, zk_port))
zk.start()
yield zk
zk.stop()
def main():
module = AnsibleModule(
argument_spec=dict(
zk_host=dict(required=True, type='str'),
zk_port=dict(required=True, type='str'),
path=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str')
)
)
try:
zk_host = module.params.pop('zk_host')
zk_port = module.params.pop('zk_port')
path = module.params.pop('path')
value = module.params.pop('value')
changed = False
with zk_connection(zk_host, zk_port) as zk:
try:
zk.get(path)
except kazoo.exceptions.NoNodeError:
if value is None:
zk.create(path)
else:
zk.create(path, value=value.encode())
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=e)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Add ZooKeeper support in kolla-ansible
In kolla-mesos, ZK will be used for storing information about
actions done in bootstrap.
Change-Id: Id7067a493b457cf7b73eb007852df7b13f4363b3
Partially-Implements: blueprint zookeeper#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import kazoo
@contextlib.contextmanager
def zk_connection(zk_host, zk_port):
zk = kazoo.client.KazooClient(hosts='{}:{}'.format(zk_host, zk_port))
zk.start()
yield zk
zk.stop()
def main():
module = AnsibleModule(
argument_spec=dict(
zk_host=dict(required=True, type='str'),
zk_port=dict(required=True, type='str'),
path=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str')
)
)
try:
zk_host = module.params.pop('zk_host')
zk_port = module.params.pop('zk_port')
path = module.params.pop('path')
value = module.params.pop('value')
changed = False
with zk_connection(zk_host, zk_port) as zk:
try:
zk.get(path)
except kazoo.exceptions.NoNodeError:
if value is None:
zk.create(path)
else:
zk.create(path, value=value.encode())
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=e)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add ZooKeeper support in kolla-ansible
In kolla-mesos, ZK will be used for storing information about
actions done in bootstrap.
Change-Id: Id7067a493b457cf7b73eb007852df7b13f4363b3
Partially-Implements: blueprint zookeeper<commit_after>#!/usr/bin/python
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import kazoo
@contextlib.contextmanager
def zk_connection(zk_host, zk_port):
zk = kazoo.client.KazooClient(hosts='{}:{}'.format(zk_host, zk_port))
zk.start()
yield zk
zk.stop()
def main():
module = AnsibleModule(
argument_spec=dict(
zk_host=dict(required=True, type='str'),
zk_port=dict(required=True, type='str'),
path=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str')
)
)
try:
zk_host = module.params.pop('zk_host')
zk_port = module.params.pop('zk_port')
path = module.params.pop('path')
value = module.params.pop('value')
changed = False
with zk_connection(zk_host, zk_port) as zk:
try:
zk.get(path)
except kazoo.exceptions.NoNodeError:
if value is None:
zk.create(path)
else:
zk.create(path, value=value.encode())
changed = True
module.exit_json(changed=changed)
except Exception as e:
module.exit_json(failed=True, changed=True, msg=e)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
|
2544eab8770577b3dfe3ae4559965393eab69a45
|
python/tag_img.py
|
python/tag_img.py
|
import requests
import json
# Tag an image based on detected visual content which mean running a CNN on top of it.
# https://pixlab.io/#/cmd?id=tagimg for more info.
# Target Image: Change to any link or switch to POST if you want to upload your image directly, refer to the sample set for more info.
img = 'https://s-media-cache-ak0.pinimg.com/originals/35/d0/f6/35d0f6ee0e40306c41cfd714c625f78e.jpg'
# Your PixLab key
key = 'My_PixLab_Key'
# Censure an image based on its NSFW score
req = requests.get('https://api.pixlab.io/tagimg',params={'img':img,'key':key})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
total = len(reply['tags']) # Total tags
print ("Total tags: "+str(total))
for tag in reply['tags']:
print("Tag: "+tag['name']+" - Confidence: "+str(tag['confidence']))
|
Tag an image based on detected visual content which mean running a CNN on top of it.
|
Tag an image based on detected visual content which mean running a CNN on top of it.
|
Python
|
bsd-2-clause
|
symisc/pixlab,symisc/pixlab,symisc/pixlab
|
Tag an image based on detected visual content which mean running a CNN on top of it.
|
import requests
import json
# Tag an image based on detected visual content which mean running a CNN on top of it.
# https://pixlab.io/#/cmd?id=tagimg for more info.
# Target Image: Change to any link or switch to POST if you want to upload your image directly, refer to the sample set for more info.
img = 'https://s-media-cache-ak0.pinimg.com/originals/35/d0/f6/35d0f6ee0e40306c41cfd714c625f78e.jpg'
# Your PixLab key
key = 'My_PixLab_Key'
# Censure an image based on its NSFW score
req = requests.get('https://api.pixlab.io/tagimg',params={'img':img,'key':key})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
total = len(reply['tags']) # Total tags
print ("Total tags: "+str(total))
for tag in reply['tags']:
print("Tag: "+tag['name']+" - Confidence: "+str(tag['confidence']))
|
<commit_before><commit_msg>Tag an image based on detected visual content which mean running a CNN on top of it.<commit_after>
|
import requests
import json
# Tag an image based on detected visual content which mean running a CNN on top of it.
# https://pixlab.io/#/cmd?id=tagimg for more info.
# Target Image: Change to any link or switch to POST if you want to upload your image directly, refer to the sample set for more info.
img = 'https://s-media-cache-ak0.pinimg.com/originals/35/d0/f6/35d0f6ee0e40306c41cfd714c625f78e.jpg'
# Your PixLab key
key = 'My_PixLab_Key'
# Censure an image based on its NSFW score
req = requests.get('https://api.pixlab.io/tagimg',params={'img':img,'key':key})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
total = len(reply['tags']) # Total tags
print ("Total tags: "+str(total))
for tag in reply['tags']:
print("Tag: "+tag['name']+" - Confidence: "+str(tag['confidence']))
|
Tag an image based on detected visual content which mean running a CNN on top of it.import requests
import json
# Tag an image based on detected visual content which mean running a CNN on top of it.
# https://pixlab.io/#/cmd?id=tagimg for more info.
# Target Image: Change to any link or switch to POST if you want to upload your image directly, refer to the sample set for more info.
img = 'https://s-media-cache-ak0.pinimg.com/originals/35/d0/f6/35d0f6ee0e40306c41cfd714c625f78e.jpg'
# Your PixLab key
key = 'My_PixLab_Key'
# Censure an image based on its NSFW score
req = requests.get('https://api.pixlab.io/tagimg',params={'img':img,'key':key})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
total = len(reply['tags']) # Total tags
print ("Total tags: "+str(total))
for tag in reply['tags']:
print("Tag: "+tag['name']+" - Confidence: "+str(tag['confidence']))
|
<commit_before><commit_msg>Tag an image based on detected visual content which mean running a CNN on top of it.<commit_after>import requests
import json
# Tag an image based on detected visual content which mean running a CNN on top of it.
# https://pixlab.io/#/cmd?id=tagimg for more info.
# Target Image: Change to any link or switch to POST if you want to upload your image directly, refer to the sample set for more info.
img = 'https://s-media-cache-ak0.pinimg.com/originals/35/d0/f6/35d0f6ee0e40306c41cfd714c625f78e.jpg'
# Your PixLab key
key = 'My_PixLab_Key'
# Censure an image based on its NSFW score
req = requests.get('https://api.pixlab.io/tagimg',params={'img':img,'key':key})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
total = len(reply['tags']) # Total tags
print ("Total tags: "+str(total))
for tag in reply['tags']:
print("Tag: "+tag['name']+" - Confidence: "+str(tag['confidence']))
|
|
0b6ee2e0f4a7431a120968fb487757fa53e9dc7d
|
server/kcaa/kcsapi_util_test.py
|
server/kcaa/kcsapi_util_test.py
|
#!/usr/bin/env python
import pytest
import kcsapi_util
class TestKCSAPIHandler(object):
def pytest_funcarg__kcsapi_handler(self):
return kcsapi_util.KCSAPIHandler(None, False)
def test_get_kcsapi_responses_ignore_unrelated_request(
self, kcsapi_handler):
entries = [
{
'request': {
'url': 'http://www.example.com/unrelated/url',
}
},
]
assert list(kcsapi_handler.get_kcsapi_responses(entries)) == []
def main():
import doctest
doctest.testmod(kcsapi_util)
import sys
sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')]))
if __name__ == '__main__':
main()
|
Add a test for kcsapi_util.
|
Add a test for kcsapi_util.
|
Python
|
apache-2.0
|
kcaa/kcaa,kcaa/kcaa,kcaa/kcaa,kcaa/kcaa
|
Add a test for kcsapi_util.
|
#!/usr/bin/env python
import pytest
import kcsapi_util
class TestKCSAPIHandler(object):
def pytest_funcarg__kcsapi_handler(self):
return kcsapi_util.KCSAPIHandler(None, False)
def test_get_kcsapi_responses_ignore_unrelated_request(
self, kcsapi_handler):
entries = [
{
'request': {
'url': 'http://www.example.com/unrelated/url',
}
},
]
assert list(kcsapi_handler.get_kcsapi_responses(entries)) == []
def main():
import doctest
doctest.testmod(kcsapi_util)
import sys
sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a test for kcsapi_util.<commit_after>
|
#!/usr/bin/env python
import pytest
import kcsapi_util
class TestKCSAPIHandler(object):
def pytest_funcarg__kcsapi_handler(self):
return kcsapi_util.KCSAPIHandler(None, False)
def test_get_kcsapi_responses_ignore_unrelated_request(
self, kcsapi_handler):
entries = [
{
'request': {
'url': 'http://www.example.com/unrelated/url',
}
},
]
assert list(kcsapi_handler.get_kcsapi_responses(entries)) == []
def main():
import doctest
doctest.testmod(kcsapi_util)
import sys
sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')]))
if __name__ == '__main__':
main()
|
Add a test for kcsapi_util.#!/usr/bin/env python
import pytest
import kcsapi_util
class TestKCSAPIHandler(object):
def pytest_funcarg__kcsapi_handler(self):
return kcsapi_util.KCSAPIHandler(None, False)
def test_get_kcsapi_responses_ignore_unrelated_request(
self, kcsapi_handler):
entries = [
{
'request': {
'url': 'http://www.example.com/unrelated/url',
}
},
]
assert list(kcsapi_handler.get_kcsapi_responses(entries)) == []
def main():
import doctest
doctest.testmod(kcsapi_util)
import sys
sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a test for kcsapi_util.<commit_after>#!/usr/bin/env python
import pytest
import kcsapi_util
class TestKCSAPIHandler(object):
def pytest_funcarg__kcsapi_handler(self):
return kcsapi_util.KCSAPIHandler(None, False)
def test_get_kcsapi_responses_ignore_unrelated_request(
self, kcsapi_handler):
entries = [
{
'request': {
'url': 'http://www.example.com/unrelated/url',
}
},
]
assert list(kcsapi_handler.get_kcsapi_responses(entries)) == []
def main():
import doctest
doctest.testmod(kcsapi_util)
import sys
sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')]))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.