content
stringlengths 5
1.05M
|
|---|
# Generated by Django 2.1.11 on 2020-02-18 19:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('qa', '0050_auto_20200131_1020'),
]
operations = [
migrations.AddField(
model_name='test',
name='wrap_high',
field=models.FloatField(blank=True, help_text='Maximum value at which test wraps around to minimum value', null=True),
),
migrations.AddField(
model_name='test',
name='wrap_low',
field=models.FloatField(blank=True, help_text='Minimum value at which test wraps around to maximum value', null=True),
),
migrations.AlterField(
model_name='test',
name='type',
field=models.CharField(choices=[('boolean', 'Boolean'), ('simple', 'Simple Numerical'), ('wraparound', 'Wraparound'), ('multchoice', 'Multiple Choice'), ('constant', 'Constant'), ('composite', 'Composite'), ('date', 'Date'), ('datetime', 'Date & Time'), ('string', 'String'), ('scomposite', 'String Composite/JSON'), ('upload', 'File Upload')], default='simple', help_text='Indicate if this test is a Boolean,Simple Numerical,Wraparound,Multiple Choice,Constant,Composite,Date,Date & Time,String,String Composite/Json,File Upload', max_length=10),
),
]
|
class Solution:
def breakPalindrome(self, palindrome: str) -> str:
"""
:type palindrome: str
:rtype: str
"""
for i in range(len(palindrome)):
if palindrome[i] == 'a':
continue
if (i == int(len(palindrome) / 2)):
continue
return palindrome[:i] + 'a' + palindrome[i+1:]
if len(palindrome) == 1:
return ''
elif palindrome[-1] == 'a': # xxxxa
return palindrome[:-1] + 'b'
return '' #inpossible
|
import os
import sublime
from . import GitWindowCommand, git_root
class GitOpenConfigFileCommand(GitWindowCommand):
def run(self):
working_dir = git_root(self.get_working_dir())
config_file = os.path.join(working_dir, '.git/config')
if os.path.exists(config_file):
self.window.open_file(config_file)
else:
sublime.status_message("No config found")
class GitOpenConfigUrlCommand(GitWindowCommand):
def run(self, url_param):
self.run_command(['git', 'config', url_param], self.url_done)
def url_done(self, result):
results = [r for r in result.rstrip().split('\n') if r.startswith("http")]
if len(results):
url = results[0]
user_end = url.index('@')
if user_end > -1:
# Remove user and pass from url
user_start = url.index('//') + 1
user = url[user_start + 1:user_end + 1]
url = url.replace(user, '')
self.window.run_command('open_url', {"url": url})
else:
sublime.status_message("No url to open")
|
# -*- coding: utf-8 -*-
__all__ = ["logger", "as_tensor_variable", "deprecation_warning", "deprecated"]
import logging
import warnings
from functools import wraps
from aesara_theano_fallback import aesara as theano
logger = logging.getLogger("exoplanet")
def as_tensor_variable(x, dtype="float64", **kwargs):
t = theano.tensor.as_tensor_variable(x, **kwargs)
if dtype is None:
return t
return t.astype(dtype)
def deprecation_warning(msg):
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
def deprecated(alternate=None): # pragma: no cover
def wrapper(func, alternate=alternate):
msg = "'{0}' is deprecated.".format(func.__name__)
if alternate is not None:
msg += " Use '{0}' instead.".format(alternate)
@wraps(func)
def f(*args, **kwargs):
deprecation_warning(msg)
return func(*args, **kwargs)
return f
return wrapper
|
"""Begin Imports"""
# Internal imports
from package.package_manager import PackageManager
from package.package_manager import Package
from package.pacman_wrapper import invoke_pacman
# Python stdlib imports
import requests
"""End Imports"""
def send_aur_rpc(arguments):
aur_req = requests.get(f"https://aur.archlinux.org/rpc/?v=5{arguments}")
return (aur_req.status_code, aur_req.json())
def do_aur_search(package_name):
return send_aur_rpc(f"&type=search&arg={package_name}")
def do_aur_lookup(package_names):
return send_aur_rpc(
f"&type=info{''.join(f'&arg[]={pkg}' for pkg in package_names)}"
)
def generate_aur_packages(aur_data):
if aur_data[0] != 200:
return None
aur_json = aur_data[1]["results"]
return (
Package(pkg_json["Name"], pkg_json["Description"], "aur", pkg_json["Version"])
for pkg_json in aur_json
)
class AURManager(PackageManager):
def __init__(self):
super().__init__()
def search(self, package_name):
return generate_aur_packages(do_aur_search(package_name))
def lookup(self, package_names):
return generate_aur_packages(do_aur_lookup(package_names))
def install(self, package_name):
pass
def check_for_updates(self):
print("Checking for updates to AUR packages...")
package_data = invoke_pacman(["-Qm"], want_output=True)
package_splits = (line.split(" ") for line in package_data.splitlines())
packages = dict((split[0], split[1]) for split in package_splits)
aur_packages = self.lookup(pkg for pkg in packages)
for pkg in aur_packages:
local_ver = packages[pkg.get_name()]
if local_ver != pkg.get_version():
# Update available
print("Update for " + pkg.get_name())
del packages[pkg.get_name()]
# Non-AUR packs remain in the package dictionary
|
def delete_duplicates(a_list):
"""This function recives a list, it returns a new list based on the
original list but without the duplicate elements of it"""
new_list = []
for i in a_list:
if not i in new_list:
new_list.append(i)
return new_list
def main():
print "Please, introduce a list of things you want:"
raw_list = raw_input("> ")
a_list = raw_list.split()
print "your new list would be this:"
print delete_duplicates(a_list)
if __name__ == '__main__':
main()
|
import unittest
from app.models import User
class TestUser(unittest.TestCase):
"""
Test class to test the behaviour of the User class
"""
def setUp(self):
"""
Set up method that will run before every test
"""
self.new_user = User(
username='nyambura',
name='liz nyambura',
email='nyamburaliz91@mail.com',
password='11223344'
)
def test_no_access_password(self):
"""
Test to check if the password is not accessible
"""
with self.assertRaises(AttributeError):
self.new_user.password
|
"""
此模块提供了异常类。
"""
__all__ = [
'Error',
'ApiNotAvailable',
'ApiError',
'HttpFailed',
'ActionFailed',
'NetworkError',
'TimingError',
]
class Error(Exception):
"""`aiocqhttp` 所有异常的基类。"""
pass
class ApiNotAvailable(Error):
"""OneBot API 不可用。"""
pass
class ApiError(Error, RuntimeError):
"""调用 OneBot API 发生错误。"""
pass
class HttpFailed(ApiError):
"""HTTP 请求响应码不是 2xx。"""
def __init__(self, status_code: int):
self.status_code = status_code
"""HTTP 响应码。"""
def __repr__(self):
return f'<HttpFailed, status_code={self.status_code}>'
def __str__(self):
return self.__repr__()
class ActionFailed(ApiError):
"""
OneBot 已收到 API 请求,但执行失败。
```py
except ActionFailed as e:
print(e)
# 或检查返回码
if e.retcode == 12345:
pass
```
"""
def __init__(self, result: dict):
self.result = result
@property
def retcode(self) -> int:
"""OneBot API 请求的返回码。"""
return self.result['retcode']
def __repr__(self):
return "<ActionFailed " + ", ".join(
f"{k}={repr(v)}" for k, v in self.result.items()) + ">"
def __str__(self):
return self.__repr__()
class NetworkError(Error, IOError):
"""网络错误。"""
pass
class TimingError(Error):
"""时机错误。"""
pass
|
import unittest
from event_reporter import EventReporter
import fakeredis
import os
class EventReporterTest(unittest.TestCase):
def setUp(self):
self.conn = fakeredis.FakeStrictRedis()
self.conn.flushdb()
# override with your own UA to verify test results in GA
self.my_ua = os.getenv('UA_ID', 'UA-116198943-3')
self.er = EventReporter(UA=self.my_ua, conn=self.conn)
def test_base(self):
"""
Checks to see that the base EventReporter class loads
"""
pass
@unittest.skip('will fail if env var overrides UA_ID')
def test_args(self):
"""
Checks to see that EventReporter stores UA
"""
self.assertTrue(self.er.UA == 'UA-116198943-3')
def test_store_fetch_dispatch(self):
"""
Checks to see that the EventReporter stores expected data
"""
ar = self.er.store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web',
ua='my-useragent-test')
self.assertTrue(ar == None)
expected = {
'handler': 'ga',
'etype': 'event',
'clientid': '20538abc-a8af-46e0-b292-0999d94468e9',
'ts': 1548546584914,
'args': {
'category': 'user',
'action': 'action_name',
'aip': '1',
'uip': '1.2.3.4',
'ds': 'web',
'ua': 'my-useragent-test'
}
}
r = self.er.fetch()
self.assertTrue(isinstance(r['ts'], int))
# ts varies
del expected['ts']
self.assertDictContainsSubset(expected, r)
# NOTE: live test.
self.assertTrue(self.er.dispatch(r))
# print(f'Data has been sent to {self.my_ua}. Please check real-time stats to confirm correctness.')
def test_store_fetch_oldest_double(self):
"""
Checks to see that the EventReporter fetch_oldest gets expected data
"""
ar = self.er.store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
ar2 = self.er.store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name_2',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(ar == None)
expected = {
'handler': 'ga',
'etype': 'event',
'clientid': '20538abc-a8af-46e0-b292-0999d94468e9',
'ts': 1548546584914,
'args': {
'category': 'user',
'action': 'action_name',
'aip': '1',
'uip': '1.2.3.4',
'ds': 'web'
}
}
r = self.er.fetch_oldest()
self.assertTrue(isinstance(r['ts'], int))
# ts varies
del expected['ts']
self.assertDictContainsSubset(expected, r)
def test_store_fetch_oldest_single(self):
"""
Checks to see that the EventReporter fetch_oldest gets expected data
"""
ar = self.er.store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(ar == None)
expected = {
'handler': 'ga',
'etype': 'event',
'clientid': '20538abc-a8af-46e0-b292-0999d94468e9',
'ts': 1548546584914,
'args': {
'category': 'user',
'action': 'action_name',
'aip': '1',
'uip': '1.2.3.4',
'ds': 'web'
}
}
r = self.er.fetch_oldest()
self.assertTrue(isinstance(r['ts'], int))
# ts varies
del expected['ts']
self.assertDictContainsSubset(expected, r)
def test_unsafe_store(self):
'''
Verify that e.g. a redis or argument failure throws an exception.
'''
with self.assertRaises(TypeError):
self.er.store(
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
def test_safe_store_fail(self):
'''
Verify that e.g. a redis or argument failure does not throw an exception.
'''
r = self.er.safe_store(
None,
None,
None,
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(r == False)
def test_safe_store_success(self):
'''
Verify that e.g. a redis or argument failure does not throw an exception.
'''
r = self.er.safe_store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(r == None)
def test_safe_store_success_honey(self):
'''
Verify that e.g. a redis or argument failure does not throw an exception.
'''
r = self.er.safe_store(
'honey',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(r == None)
def test_store_fetch_dispatch_referrer(self):
"""
Checks to see that the EventReporter stores expected data with referrer.
Looks like it's unnecessary to urlencode prior to handing it off.
"""
ar = self.er.store(
'ga',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web',
dr='http://www.test.com')
self.assertTrue(ar == None)
expected = {
'handler': 'ga',
'etype': 'event',
'clientid': '20538abc-a8af-46e0-b292-0999d94468e9',
'ts': 1548546584914,
'args': {
'category': 'user',
'action': 'action_name',
'aip': '1',
'uip': '1.2.3.4',
'ds': 'web',
'dr': 'http://www.test.com'
}
}
r = self.er.fetch()
self.assertTrue(isinstance(r['ts'], int))
# ts varies
del expected['ts']
self.assertDictContainsSubset(expected, r)
# NOTE: live test.
self.assertTrue(self.er.dispatch(r))
@unittest.skip('will fail if env var SLACK_WEBHOOK_TEST_URL is missing')
def test_store_fetch_dispatch_slack(self):
"""
Checks to see that the EventReporter stores expected data for slack.
Then try delivery. Note: requires active slack token.
"""
ar = self.er.store(
'slack',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
webhook=os.getenv('SLACK_WEBHOOK_TEST_URL'),
message='my message test')
self.assertTrue(ar == None)
expected = {
'handler': 'slack',
'etype': 'event',
'clientid': '20538abc-a8af-46e0-b292-0999d94468e9',
'ts': 1548546584914,
'args': {
'message': 'my message test'
}
}
r = self.er.fetch()
self.assertTrue(isinstance(r['ts'], int))
# varies
del expected['ts']
# stash
wh = r['args']['webhook']
del r['args']['webhook']
self.assertDictContainsSubset(expected, r)
r['args']['webhook'] = wh
# NOTE: live test.
self.assertTrue(self.er.dispatch(r))
def test_safe_store_success_honey(self):
'''
Verify that e.g. a redis or argument failure does not throw an exception.
'''
r = self.er.safe_store(
'honey',
'event',
'20538abc-a8af-46e0-b292-0999d94468e9',
category='user',
action='action_name',
aip='1',
uip='1.2.3.4',
ds='web')
self.assertTrue(r == None)
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
import firebase_admin
import random
from firebase_admin import credentials
from firebase_admin import firestore
# Use a service account
cred = credentials.Certificate('db-service-account.json')
firebase_admin.initialize_app(cred)
db = firestore.client()
# Settings
collection_name = 'dict-nl'
batch_size = 499
d = open('nl.txt', 'r')
lines = d.readlines()
batch_index = 0
current_batch_size = 0
batch = db.batch()
for line in lines:
word = line.strip()
word_doc = db.collection(collection_name).document()
batch.set(word_doc, {
u'word': word,
u'length': len(word),
u'random': random.randint(0, 2**32-1)
}
)
current_batch_size = current_batch_size + 1
if current_batch_size >= batch_size:
print("Commiting batch: ", batch_index)
batch.commit()
batch = db.batch()
current_batch_size = 0
batch_index = batch_index + 1
batch.commit()
|
# Common code
import numpy as np
# Common formula, All Models
def Cost_waiting(f, y, t01, t02, t11, t12, fbar, Pw, epsilon):
# Cost of waiting
if (f >= fbar):
t0 = t02
t1 = t12
else:
t0 = t01
t1 = t11
Cw = Pw * (t0 + t1 * (epsilon/f)) * y
return Cw
# Common formulae, Models III and IV
def cycle_time(f, n, d, y, betav, R, L, Tl):
# Compute the cycle time
tc = (y/f) * betav / (3600.0 * float(n)) + R + 2.0 * L / d * Tl
return tc
def max_freq_theta(n, y, l, L, kv, thetamin):
#Max freq to have theta >= thetamin
ft = l * y /(2.0 * L * thetamin * kv * float(n))
return ft
def penalty_crowding(f, n, y, l, L, kv, xi, rho, thetamin):
# Return cost of crowding
ft = max_freq_theta(n, y, l, L, kv, thetamin)
if f <= ft:
delta = xi + rho * (l * y /(n * f * 2.0 * L * kv)) # Crowding penalty
else:
delta = 1.0
return delta
# Common accounting formulae, All Models
def amortization_factor(discount_rate, technical_life):
# Compute amortization factor
from math import pow
if technical_life > 0:
af = discount_rate/(1.0 - 1.0/pow(1.0 + discount_rate, technical_life))
else:
af = 0.0
return af
def infra_fixed_cost_per_hour(mode, hours_per_year, L, width_m, land_cost_per_hectare, infracost_m, infralife_m, inframaint_m, discount_rate):
# Compute infrastructure fixed cost per equivalent hour of service in $/h
area = L * 1000 * width_m[mode] #(mq)
value_land = area / 10000.0 * land_cost_per_hectare * 1000000.0 #($)
value_line = L * infracost_m[mode] * 1000000.0 #($)
tot_value = (value_land + value_line)
af = amortization_factor(discount_rate, infralife_m[mode])
perhour = tot_value * af / hours_per_year
perhour += inframaint_m[mode]
# print m, value_line * af/hours_per_year, value_land * af/hours_per_year
return perhour
def stop_fixed_cost_per_hour(mode, hours_per_year, stopcost_m, infralife_m, discount_rate):
# Compute stop fixed cost per equivalent hour of service in $/h
value_stop = stopcost_m[mode] * 1000000.0 #($)
af = amortization_factor(discount_rate, infralife_m[mode])
perhour = value_stop * af / hours_per_year
return perhour
def rolling_stock_cost_per_hour(mode, hours_per_year, vehcost_m, vehlife_m, discount_rate, res_value_rs):
# Compute the rolling cost per equivalent hour of service in $/h
af = amortization_factor(discount_rate, vehlife_m[mode])
perhour = vehcost_m[mode] * 1000000.0 * (1.0 - res_value_rs) * af / hours_per_year
return perhour
# Common utility code
def print_some_computational_details(mode_label, opt_gap, heur_gap, number_iterations, tot_timespent):
# Print some computational details
print 'For mode %s the avg optimality gap is %.1f%% and the worst is %.1f%%' % (mode_label, np.average(opt_gap), np.amax(opt_gap))
print ' the median optimality gap is %.1f%% and the 80th percentile is %.1f%%' % (np.percentile(opt_gap, 50), np.percentile(opt_gap, 80))
print ' the avg heuristic gap is %.1f%% and the worst is %.1f%%' % (np.average(heur_gap), np.amax(heur_gap))
print ' the median heuristic gap is %.1f%% and the 80th percentile is %.1f%%' % (np.percentile(heur_gap, 50), np.percentile(heur_gap, 80))
print ' L-BFGS-B algorithm iterated in avg %.1f times, using in total %.2f seconds' \
% (np.average(number_iterations),tot_timespent)
def print_ratios_avg_stop_spacing(min_d_range, Tl_m):
# print a comparison of ratios of avg stop spacing
from math import sqrt
num_modes = len(Tl_m)
for m1 in range(num_modes - 1):
Tl1 = Tl_m[m1]
avg_d1 = np.average(min_d_range[m1])
for m2 in range(m1 + 1, num_modes):
Tl2 = Tl_m[m2]
avg_d2 = np.average(min_d_range[m2])
ratio = avg_d1/avg_d2
ratio_approx = sqrt(Tl1/Tl2)
error = (ratio - ratio_approx) / ratio * 100
print 'Ratio of avd dist beween modes %2d %2d = %.2f approx = %.2f accuracy %.0f%%' \
% (m1, m2, ratio, ratio_approx, error)
def print_itersection_points(y_range, min_avg_tot_cost, mode_label):
# Print points of total cost intersection between modes
from shapely.geometry import LineString
num_modes = len(y_range)
for m1 in range(num_modes - 1):
for m2 in range(m1 + 1, num_modes):
line1 = LineString([(a, b) for a , b in zip(y_range[m1], min_avg_tot_cost[m1])])
line2 = LineString([(a, b) for a , b in zip(y_range[m2], min_avg_tot_cost[m2])])
point = line1.intersection(line2)
if not point.is_empty and point.geom_type == 'Point':
print 'Point of intersection between modes %3s and %3s (%5.0f, %.2f) '\
% (mode_label[m1], mode_label[m2], point.x, point.y)
elif point.geom_type == 'MultiPoint':
print 'Multiple points of intersection between modes %3s and %3s '\
% (mode_label[m1], mode_label[m2])
print ' the first is (%5.0f, %.2f) '\
% (point[0].x, point[0].y)
# Common code for plots
import matplotlib.pyplot as plt
xfigsize = 6
yfigsize = 6
def plot_single(x_range, y_range, x_label, y_label, namefile, linestyle, colors, mode_label):
fig, axes = plt.subplots(1,1, figsize=(xfigsize, yfigsize))
axes.set_xlabel(x_label)
axes.set_ylabel(y_label)
from matplotlib.ticker import MultipleLocator
majorLocator = MultipleLocator(10000)
axes.xaxis.set_major_locator(majorLocator)
#axes.grid()
for m in range(len(x_range)):
axes.plot(x_range[m], y_range[m], ls=linestyle[m], color=colors[m], label=mode_label[m], linewidth=3.0)
#axes.plot(x_range[m], y_range[m], 'o', color=colors[m], label=mode_label[m], linewidth=3.0)
#axes.legend(loc='upper right')
axes.legend(loc='best', framealpha=0.5, prop={'size':18})
plt.savefig(namefile)
plt.close()
def plot_frequency(y_range, min_f_range, f_min_range, x_label, y_label, namefile, linestyle, colors, mode_label):
fig, axes = plt.subplots(1,1, figsize=(xfigsize, yfigsize))
axes.set_xlabel(x_label)
axes.set_ylabel(y_label)
#axes.grid()
for m in range(len(y_range)):
axes.plot(y_range[m], min_f_range[m], ls=linestyle[m], color=colors[m], label=r'$\hat f$ ' + mode_label[m], linewidth=3.0)
m2 = m + 1
if m2 >= len(y_range):
m2 = 0
axes.plot(y_range[m], f_min_range[m], ls=linestyle[m2], color=colors[m], label=r'$f_{min}$ '+ mode_label[m], linewidth=3.0)
#axes.legend(loc='upper right')
axes.legend(loc='best', framealpha=0.5, prop={'size':14})
plt.savefig(namefile)
plt.close()
def plot_frequency_max(y_range, min_f_range, f_max_range, x_label, y_label, namefile, linestyle, colors, mode_label):
fig, axes = plt.subplots(1,1, figsize=(xfigsize, yfigsize))
axes.set_xlabel(x_label)
axes.set_ylabel(y_label)
#axes.grid()
for m in range(len(y_range)):
axes.plot(y_range[m], min_f_range[m], ls=linestyle[m], color=colors[m], label=r'$\hat f$ ' + mode_label[m], linewidth=3.0)
m2 = m + 1
if m2 >= len(y_range):
m2 = 0
axes.plot(y_range[m], f_max_range[m], ls=linestyle[m2], color=colors[m], label=r'$f: \theta = \theta_{min}$ '+ mode_label[m], linewidth=3.0)
#axes.legend(loc='upper right')
axes.legend(loc='best', framealpha=0.5, fontsize=11)
plt.savefig(namefile)
plt.close()
def plot_three_economics(x_range, min_avg_op_cost, min_avg_user_cost, min_avg_tot_cost, name_outputfile0e, linestyle, colors, mode_label):
# A figure with all plots related to the economical aspects
fig, axes = plt.subplots(3,1)
fig.set_size_inches(8,12)
from math import floor
for i in range(3):
axes[i].set_xlabel('Demand (pax/h)')
#axes[i].grid(ls=':', lw=0.5)
for item in ([axes[i].title, axes[i].xaxis.label, axes[i].yaxis.label] +
axes[i].get_xticklabels() + axes[i].get_yticklabels()):
item.set_fontsize(8)
for m in range(len(x_range)):
if i == 0:
axes[i].plot(x_range[m], min_avg_op_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[i].set_ylabel('Operator cost ($/pax)')
axes[i].set_title('a) Operator cost')
elif i == 1:
axes[i].plot(x_range[m], min_avg_user_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[i].set_ylabel('User cost ($/pax)')
axes[i].set_title('b) User cost')
elif i == 2:
axes[i].plot(x_range[m], min_avg_tot_cost[m], ls=linestyle[m], lw=3,
color=colors[m], label=mode_label[m])
axes[i].set_ylabel('Total cost ($/pax)')
axes[i].set_title('c) Total cost')
axes[i].legend(loc='upper right', fancybox=True, framealpha=0.5, fontsize=10)
plt.savefig(name_outputfile0e)
plt.close()
|
"""
Board representation of No Dice Einstein.
"""
COLOR = {
"red": 'R',
"blue": 'B'
}
VALUE = {
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6'
}
MOVE = {
"U": "up",
"D": "down",
"L": "left",
"R": "right",
"X": "diagonal"
}
class Piece:
def __init__(self, row, col, color, value):
self.row = row # the piece's Y-coordinate on the board
self.col = col # the piece's X-coordinate on the board
self.color = color
self.value = value
def __str__(self):
return COLOR[self.color] + VALUE[self.value]
class Board:
def __init__(self, num_of_rows, num_of_cols):
self.NUM_OF_ROWS = num_of_rows
self.NUM_OF_COLS = num_of_cols
self.board = []
for _ in range(self.NUM_OF_ROWS):
newRow = [None] * self.NUM_OF_COLS
self.board.append(newRow)
def print_board(self):
print()
for r in range(self.NUM_OF_ROWS):
for c in range(self.NUM_OF_COLS):
print(self.board[r][c], end = " ")
print()
print()
def __str__(self):
s = ""
for r in range(self.NUM_OF_ROWS):
for c in range(self.NUM_OF_COLS):
if self.board[r][c]:
s = s + str(self.board[r][c]) + ' '
else:
s = s + "." + ' '
s = s + '\n'
return s
def addPiece(self, piece):
self.board[piece.row][piece.col] = piece
def movePiece(self, piece, row, col):
oldRow = piece.row
oldCol = piece.col
self.board[row][col] = self.board[piece.row][piece.col]
self.board[oldRow][oldCol] = None
piece.row = row
piece.col = col
def removePiece(self, piece):
self.board[piece.row][piece.col] = None
del piece
def get_piece(self, row, col):
return self.board[row][col]
def getRedPieces(self):
numberofpieces = 0
for r in range(self.NUM_OF_ROWS):
for c in range(self.NUM_OF_COLS):
if self.board[r][c]:
if self.board[r][c].color == "red":
numberofpieces += 1
return numberofpieces
def getBluePieces(self):
numberofpieces = 0
for r in range(self.NUM_OF_ROWS):
for c in range(self.NUM_OF_COLS):
if self.board[r][c]:
if self.board[r][c].color == "blue":
numberofpieces += 1
return numberofpieces
def getColorFromCoords(self, row, col):
if row>=self.NUM_OF_ROWS or row<0 or col>=self.NUM_OF_COLS or col<0:
return None # (checking boundaries!)
if self.board[row][col] != None:
return self.board[row][col].color
return None
|
"""
Copyright 2018-present Open Networking Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
load("//tools/build/bazel:generate_workspace.bzl", "COMPILE", "TEST", "maven_coordinates")
load("//tools/build/bazel:variables.bzl", "ONOS_GROUP_ID", "ONOS_VERSION")
def dump(obj):
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
# Implementation of a rule to produce an OSGi feature XML snippet
def _osgi_feature_impl(ctx):
output = ctx.outputs.feature_xml
args = [
"-O",
output.path,
"-n",
ctx.attr.name,
"-v",
ctx.attr.version,
"-t",
ctx.attr.description,
]
inputs = []
for dep in ctx.attr.included_bundles:
args += ["-b", maven_coordinates(dep.label)]
for f in dep.java.outputs.jars:
inputs += [f.class_jar]
for dep in ctx.attr.excluded_bundles:
args += ["-e", maven_coordinates(dep.label)]
for f in dep.java.outputs.jars:
inputs += [f.class_jar]
for f in ctx.attr.required_features:
args += ["-f", f]
args += ["-F" if ctx.attr.generate_file else "-E"]
ctx.actions.run(
inputs = inputs,
outputs = [output],
arguments = args,
progress_message = "Generating feature %s" % ctx.attr.name,
executable = ctx.executable._writer,
)
osgi_feature = rule(
attrs = {
"description": attr.string(),
"version": attr.string(default = ONOS_VERSION),
"required_features": attr.string_list(default = ["onos-api"]),
"included_bundles": attr.label_list(),
"excluded_bundles": attr.label_list(default = []),
"generate_file": attr.bool(default = False),
"_writer": attr.label(
executable = True,
cfg = "host",
allow_files = True,
default = Label("//tools/build/bazel:onos_app_writer"),
),
},
outputs = {
"feature_xml": "feature-%{name}.xml",
},
implementation = _osgi_feature_impl,
)
# OSGi feature XML header & footer constants
FEATURES_HEADER = '''\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<features xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
name="onos-%s">
<repository>mvn:org.apache.karaf.features/standard/3.0.8/xml/features</repository>
''' % ONOS_VERSION
FEATURES_FOOTER = "</features>"
# Implementation of a rule to produce an OSGi feature repo XML file
def _osgi_feature_repo_impl(ctx):
output = ctx.outputs.feature_repo_xml
cmd = "(echo '%s';" % FEATURES_HEADER
inputs = []
for dep in ctx.attr.exported_features:
for f in dep.files.to_list():
inputs += [f]
cmd += "cat %s;" % f.path
cmd += "echo '%s') > %s;" % (FEATURES_FOOTER, output.path)
ctx.actions.run_shell(
inputs = inputs,
outputs = [output],
progress_message = "Generating feature repo %s" % ctx.attr.name,
command = cmd,
)
osgi_feature_repo = rule(
attrs = {
"description": attr.string(),
"version": attr.string(default = ONOS_VERSION),
"exported_features": attr.label_list(),
},
outputs = {
"feature_repo_xml": "feature-repo-%{name}.xml",
},
implementation = _osgi_feature_repo_impl,
)
|
import graphene
class ResponseField(graphene.Interface):
"""Response interface"""
is_success = graphene.Boolean(default_value=True)
error_message = graphene.String()
|
# An English text needs to be encrypted using the following encryption scheme.
# First, the spaces are removed from the text. Let be the length of this text.
# Then, characters are written into a grid, whose rows and columns have the following constraints:
#
# , where is floor function and is ceil function
# For example, the sentence if man was meant to stay on the ground god would have given us roots after removing spaces is characters long, so it is written in the form of a grid with 7 rows and 8 columns.
#
# ifmanwas
# meanttos
# tayonthe
# groundgo
# dwouldha
# vegivenu
# sroots
# Ensure that
# If multiple grids satisfy the above conditions, choose the one with the minimum area, i.e. .
# The encoded message is obtained by displaying the characters in a column, inserting a space, and then displaying the next column and inserting a space, and so on. For example, the encoded message for the above rectangle is:
#
# imtgdvs fearwer mayoogo anouuio ntnnlvt wttddes aohghn sseoau
#
# You will be given a message in English with no spaces between the words. The maximum message length can be characters. Print the encoded message.
#
# Here are some more examples:
#
# Sample Input:
#
# haveaniceday
# Sample Output:
#
# hae and via ecy
import sys
import math
s = raw_input().strip()
ceil = math.ceil(math.sqrt(len(s)))
b = ''
for i in range(int(ceil)):
j = i
while j < len(s):
b += s[j]
j += int(ceil)
b += ' '
print b
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import unicode_literals
from rest_framework import generics
from tark.views import DataTableListApi
from tark.utils.schema_utils import SchemaUtils
from release.models import ReleaseSource, ReleaseSet,\
TranscriptReleaseTagRelationship
from release.drf.serializers import ReleaseSourceSerializer,\
ReleaseSetSerializer
from release.drf.filters import ReleaseSetFilterBackend
from rest_framework.pagination import PageNumberPagination
from tark_drf.utils.decorators import setup_eager_loading
from transcript.drf.serializers import TranscriptReleaseTagRelationshipSerializer
# ============For Datatables========
class NotPaginatedSetPagination(PageNumberPagination):
page_size = None
class ReleaseSourceList(generics.ListAPIView):
queryset = ReleaseSource.objects.all()
serializer_class = ReleaseSourceSerializer
class ReleaseSourceDetail(generics.RetrieveAPIView):
queryset = ReleaseSource.objects.all()
serializer_class = ReleaseSourceSerializer
class ReleaseSourceDatatableView(DataTableListApi):
serializer_class = ReleaseSourceSerializer
search_parameters = SchemaUtils.get_field_names(app_name='release', model_name='releasesource', exclude_pk=False)
default_order_by = 3
queryset = ReleaseSource.objects.all()
class ReleaseSetList(generics.ListAPIView):
queryset = ReleaseSet.objects.all()
serializer_class = ReleaseSetSerializer
filter_backends = (ReleaseSetFilterBackend, )
class ReleaseSetDetail(generics.RetrieveAPIView):
queryset = ReleaseSet.objects.all()
serializer_class = ReleaseSetSerializer
class TranscriptReleaseTagRelationshipList(generics.ListAPIView):
queryset = TranscriptReleaseTagRelationship.objects.all()
serializer_class = TranscriptReleaseTagRelationshipSerializer
class TranscriptReleaseTagRelationshipListAll(generics.ListAPIView):
queryset = TranscriptReleaseTagRelationship.objects.all()
serializer_class = TranscriptReleaseTagRelationshipSerializer
pagination_class = None
class ReleaseSetDatatableView(generics.ListAPIView):
serializer_class = ReleaseSetSerializer
pagination_class = NotPaginatedSetPagination
@setup_eager_loading(ReleaseSetSerializer)
def get_queryset(self):
queryset = ReleaseSet.objects.order_by('pk')
return queryset
|
import time
class Timeouts():
def __init__(self):
self.timeouts = dict()
def add(self, command, length):
ctime = time.time()
if command not in self.timeouts or ctime > self.timeouts[command]:
self.timeouts[command] = ctime + length
def is_timeout(self, command):
if command in self.timeouts:
if time.time() > self.timeouts[command]:
return False
else:
return True
return False
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
# 脚本库
class OpsJobJobScriptInfo(models.Model):
job_name = models.CharField(max_length=32) # job name
script_name = models.CharField(max_length=32) # script_name
script_content = models.TextField(default='') # script_content
drop_status = models.IntegerField(blank=True, null=True, default=0) # drop_status
mTime = models.DateTimeField(db_column='m_time', blank=True, null=True, auto_now=True) # mTime
class Meta:
# managed = False
db_table = 'ops_job_job_script_info'
# 脚本执行历史
class OpsJobScriptHistory(models.Model):
id = models.AutoField(primary_key=True)
job_name = models.CharField(max_length=32) # job name
result = models.TextField(default='') # result
dtEventTime = models.DateTimeField(db_column='dtEventTime', blank=True, null=True, auto_now=True, db_index=True) # dtEventTime
exec_status = models.IntegerField(blank=True, null=True, default=0) #0 exec success, 1 execing
ip_list = models.TextField(default='') #ip_list
class Meta:
# managed = False
db_table = 'ops_job_script_history'
|
from copy import deepcopy
from src.cards import start_card, cards
# Default values
# DO NOT CHANGE THE WIDTH AND HEIGHT, as the entire game is made to render the items to the screen using these values, especially the images
window_width = 1500
window_height = 800
game_width = 1200
game_height = 800
cell_width = 40
cell_height = 40
board = [["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30] + list(map(lambda row: ["*"] * 13 + row + ["*"] * 13, start_card["board"])) + [["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30, ["*"] * 30] # game start board, only the start card (ID 1) is present at the center
stock = deepcopy(cards) # remaining cards that can be picked to be displayed at the screen
on_board_cards = [{"id": 1, "top_left": (8, 13), "rotations": 0}] # define which cards have been extracted from the stock and added to the game, and their top left cell coordinates
escalators = start_card["escalators"] # present escalators in the game coordinates
walls = start_card["walls"] # present walls in the game coordinates
pawns = { "purple": [9, 14], "orange": [10, 14], "yellow": [9, 15], "green": [10, 15] } # pawns start coordinates
pawns_on_objects = {"purple": False, "orange": False, "yellow": False, "green": False} # define which pawn is on its object or not
pawns_outside = pawns_on_objects.copy() # define which pawn has successfully escaped from the game or not
current_color = "purple"
selected_colors = ["purple", "purple", "purple"]
debug_mode = False
exit_available = False
telekinesis_times_used = 0
timeout = 3 # timeout is in minutes
lost = False
won = False
|
#!/usr/bin/env python3.7
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import time
start = time.time()
sys.stdin.reconfigure(errors='surrogateescape')
sys.stdout.reconfigure(errors='surrogateescape')
for line in sys.stdin:
seconds = time.time() - start
sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
sys.stdout.flush()
|
#!/usr/bin/env python
from pyrf.devices.thinkrf import WSA
from pyrf.connectors.twisted_async import TwistedConnector
from pyrf.sweep_device import SweepDevice
import sys
import time
import math
from matplotlib.pyplot import plot, figure, axis, xlabel, ylabel, show
import numpy as np
from twisted.internet import reactor, defer
import twisted.python.log
def plot_sweep(fstart, fstop, bins):
# setup my graph
fig = figure(1)
xvalues = np.linspace(fstart, fstop, len(bins))
xlabel("Frequency")
ylabel("Amplitude")
# plot something
plot(xvalues, bins, color='blue')
# show graph
show()
reactor.callLater(2 ** -4, reactor.stop)
def start_sweep(v):
global sd
sd = SweepDevice(dut, plot_sweep)
sd.capture_power_spectrum(0e6, 20000e6, 5e6, {'attenuator': 0 })
# connect to wsa
dut = WSA(connector=TwistedConnector(reactor))
d = dut.connect(sys.argv[1])
d.addCallbacks(start_sweep, twisted.python.log.err)
reactor.run()
print 'context_bytes_received', sd.context_bytes_received
print 'data_bytes_received', sd.data_bytes_received
print 'data_bytes_processed', sd.data_bytes_processed
print 'martian_bytes_discarded', sd.martian_bytes_discarded
print 'past_end_bytes_discarded', sd.past_end_bytes_discarded
print 'fft_calculation_seconds', sd.fft_calculation_seconds
print 'bin_collection_seconds', sd.bin_collection_seconds
|
from enum import Enum, unique
from typing import List
from shotgrid_leecher.utils.functional import try_or
@unique
class QueryStringType(Enum):
STR = str
INT = int
FLOAT = float
@staticmethod
def from_param(type_name: str) -> "QueryStringType":
if not type_name:
return QueryStringType.STR
return try_or(
lambda: QueryStringType[type_name.strip().upper()],
QueryStringType.STR,
)
@unique
class DbName(Enum):
AVALON = "avalon"
INTERMEDIATE = "shotgrid_openpype"
SCHEDULE = "shotgrid_schedule"
@unique
class DbCollection(Enum):
SCHEDULE_PROJECTS = "projects"
SCHEDULE_QUEUE = "queue"
SCHEDULE_LOGS = "logs"
@unique
class AvalonType(Enum):
PROJECT = "project"
ASSET = "asset"
@unique
class ShotgridType(Enum):
PROJECT = "Project"
ASSET = "Asset"
LINKED_ENTITY = "LinkedEntity"
SHOT = "Shot"
EPISODE = "Episode"
SEQUENCE = "Sequence"
GROUP = "Group"
TASK = "Task"
STEP = "Step"
ASSET_TO_SHOT_LINK = "AssetShotConnection"
SHOT_TO_SHOT_LINK = "ShotShotConnection"
ASSET_TO_ASSET_LINK = "AssetAssetConnection"
@staticmethod
def middle_types() -> List["ShotgridType"]:
return [
ShotgridType.GROUP,
ShotgridType.ASSET,
ShotgridType.SHOT,
ShotgridType.EPISODE,
ShotgridType.SEQUENCE,
]
@staticmethod
def middle_names() -> List[str]:
return [x.value for x in ShotgridType.middle_types()]
@unique
class ShotgridEvents(Enum):
NEW_ASSET = "Shotgun_Asset_New"
@unique
class EventTables(Enum):
ASSET_EVENTS = "asset_events"
VERSION_EVENTS = "version_events"
@unique
class EventTypes(Enum):
INITIALIZED = "Initialized"
ASSIGNED = "Assigned"
DONE = "Done"
@unique
class ShotgridEventEntries(Enum):
EVENT_ENTRY = "EventLogEntry"
@unique
class ShotgridField(Enum):
ENTITY = "entity"
ID = "id"
STEP = "step"
CONTENT = "content"
NAME = "name"
SEQUENCE = "sequence"
EPISODE = "episode"
SEQUENCE_EPISODE = "sequence_episode"
CUT_DURATION = "cut_duration"
FRAME_RATE = "frame_rate"
CUT_IN = "cut_in"
CUT_OUT = "cut_out"
HEAD_IN = "head_in"
HEAD_OUT = "head_out"
TAIL_IN = "tail_in"
TAIL_OUT = "tail_out"
CODE = "code"
TYPE = "type"
TASKS = "tasks"
TASK_STATUS = "sg_status_list"
TASK_ASSIGNEES = "task_assignees"
ASSET_TYPE = "asset_type"
SHORT_NAME = "short_name"
ASSETS = "assets"
PARENTS = "parents"
CACHED_DISPLAY_NAME = "cached_display_name"
LINK_QUANTITY = "quantity"
LINK_SHOT_ID = "link_shot_id"
LINK_ASSET_ID = "link_asset_id"
LINK_PARENT_ID = "link_parent_id"
LINK_PARENT_SHOT_ID = "link_parent_shot_id"
def to_db_key(self) -> str:
return str(self.value).replace(".", "_")
|
from regression_tests import *
class TestBasic(Test):
"""Related to:
#41: https://github.com/avast/retdec/issues/41
#169: https://github.com/avast/retdec/issues/169
#391: https://github.com/avast/retdec/pull/391
"""
settings=TestSettings(
input='Test.exe'
)
def test(self):
main = self.out_c.funcs['main']
assert main.calls('__readfsdword')
assert self.out_c.contains('__readfsdword\(24\)')
assert not main.calls('abort')
class TestLoadNull(Test):
"""Related to:
#41: https://github.com/avast/retdec/issues/41
#169: https://github.com/avast/retdec/issues/169
#391: https://github.com/avast/retdec/pull/391
Test that load from null is replaced by special intrinsic.
"""
settings=TestSettings(
input='TestNull.exe'
)
def test(self):
main = self.out_c.funcs['main']
assert main.calls('__readNullptrDword')
assert not main.calls('abort')
class TestIssue376(Test):
"""Related to:
#376: https://github.com/avast/retdec/issues/376
"""
settings=TestSettings(
input='96BA2AE23FB2267D993BD018A5BDEEF062BED7C56DD6C37BDDC00EFA65085363',
args='--select-functions=function_4169830c,TModuleEntry'
)
def test(self):
f1 = self.out_c.funcs['function_4169830c']
assert f1.calls('__readfsdword')
assert f1.calls('__writefsdword')
assert f1.calls('_40_FinalizeArray')
assert f1.calls('_40_LStrClr')
assert not f1.calls('abort')
f2 = self.out_c.funcs['TModuleEntry']
assert f2.calls('__readfsdword')
assert f2.calls('__writefsdword')
assert f2.calls('FileSearch')
assert f2.calls('_40_LStrAsg')
assert f2.calls('function_41697aa4')
assert not f2.calls('abort')
class TestIssue347(Test):
"""Related to:
#347: https://github.com/avast/retdec/issues/347
"""
settings=TestSettings(
input='625dc8112bc509236ff5d0255b85cc0b82c9dd1ef27f6320a7394f33ab46800e',
args='--select-functions=function_402ee0,function_402f30,function_403880'
)
def test(self):
f1 = self.out_c.funcs['function_402ee0']
assert f1.calls('__readfsdword')
assert f1.calls('__writefsdword')
assert f1.calls('function_41ce00')
assert not f1.calls('abort')
f2 = self.out_c.funcs['function_402f30']
assert f2.calls('__readfsdword')
assert f2.calls('__writefsdword')
assert f2.calls('function_41ce00')
assert f2.calls('function_41e347')
assert not f2.calls('abort')
f3 = self.out_c.funcs['function_403880']
assert f3.calls('__readfsdword')
assert f3.calls('__writefsdword')
assert f3.calls('function_4154ec')
assert f3.calls('function_416645')
assert f3.calls('function_41654c')
assert not f3.calls('abort')
assert self.out_c.has_string_literal(r'Megafiles (*.meg)|*.meg|All Files (*.*)|*.*||')
|
from __future__ import absolute_import
from .lib import Sedflux3D
|
import os, platform, collections
import socket, subprocess,sys
import threading
from datetime import datetime
class myThread (threading.Thread):
def __init__(self,startLastOctet,endLastOctet):
threading.Thread.__init__(self)
self.startLastOctet = startLastOctet
self.endLastOctet = endLastOctet
def run(self):
runThread(self.startLastOctet,self.endLastOctet)
def getNetwork():
net = raw_input("Enter the Network Address:\t\t ")
netSplit= net.split('.')
a = '.'
firstThreeOctet = netSplit[0]+a+netSplit[1]+a+netSplit[2]+a
startLastOctet = int(raw_input("Enter the beginning of last Octet:\t "))
endLastOctet = int(raw_input("Enter the end od last Octet:\t\t "))
endLastOctet =endLastOctet+1
dic = collections.OrderedDict()
oper = platform.system()
if (oper=="Windows"):
pingCmd = "ping -n 1 "
elif (oper== "Linux"):
pingCmd = "ping -c 1 "
else :
pingCmd = "ping -c 1 "
return firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd
def runThread(startLastOctet,endLastOctet):
#print "Scanning in Progess"
for ip in xrange(startLastOctet,endLastOctet):
addr = firstThreeOctet+str(ip)
pingAddress = pingCmd+addr
response = os.popen(pingAddress)
for line in response.readlines():
#if(line.count("TTL")):
# break
if (line.count("ttl")):
#print addr, "--> Live"
dic[ip]= addr
break
if __name__ == '__main__':
subprocess.call('clear',shell=True)
print "-" * 75
print "This program search for life IPs in last octet, with multiple threads "
print "\tFor example: 192.168.11.xxx - 192.168.11.yyy"
print "-" * 75
firstThreeOctet, startLastOctet, endLastOctet, dic, pingCmd = getNetwork()
t1= datetime.now()
total_ip =endLastOctet-startLastOctet
tn =3 # number of ip handled by one thread
total_thread = total_ip/tn
total_thread=total_thread+1
threads= []
try:
for i in xrange(total_thread):
en = startLastOctet+tn
if(en >endLastOctet):
en =endLastOctet
thread = myThread(startLastOctet,en)
thread.start()
threads.append(thread)
startLastOctet =en
except:
print "Error: unable to start thread"
print "\t Number of Threads active:", threading.activeCount()
for t in threads:
t.join()
print "\tExiting Main Thread"
sortedIPs = collections.OrderedDict(sorted(dic.items()))
for key in sortedIPs:
print "IP address: {} \t --> Live".format(sortedIPs[key])
t2= datetime.now()
total =t2-t1
print "Scanning complete in " , total
|
#
# PySNMP MIB module ChrComAtmVplTpVp-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ChrComAtmVplTpVp-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:19:02 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
OperStatus, = mibBuilder.importSymbols("CISCO-RHINO-MIB", "OperStatus")
TruthValue, = mibBuilder.importSymbols("ChrTyp-MIB", "TruthValue")
chrComAtmVpl, = mibBuilder.importSymbols("Chromatis-MIB", "chrComAtmVpl")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, TimeTicks, Unsigned32, Counter32, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Bits, MibIdentifier, ObjectIdentity, NotificationType, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "TimeTicks", "Unsigned32", "Counter32", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Bits", "MibIdentifier", "ObjectIdentity", "NotificationType", "ModuleIdentity", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
chrComAtmVplTpVpTable = MibTable((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2), )
if mibBuilder.loadTexts: chrComAtmVplTpVpTable.setStatus('current')
chrComAtmVplTpVpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1), ).setIndexNames((0, "ChrComAtmVplTpVp-MIB", "chrComAtmVplifIndex"), (0, "ChrComAtmVplTpVp-MIB", "chrComAtmVplAtmVplVpi"))
if mibBuilder.loadTexts: chrComAtmVplTpVpEntry.setStatus('current')
chrComAtmVplifIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chrComAtmVplifIndex.setStatus('current')
chrComAtmVplAtmVplVpi = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chrComAtmVplAtmVplVpi.setStatus('current')
chrComAtmVplCCSource = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 3), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chrComAtmVplCCSource.setStatus('current')
chrComAtmVplCCSink = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 4), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chrComAtmVplCCSink.setStatus('current')
chrComAtmVplTPOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 5), OperStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chrComAtmVplTPOperStatus.setStatus('current')
chrComAtmVplAlarmVector = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chrComAtmVplAlarmVector.setStatus('current')
chrComAtmVplAlarmSeverityProfileIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3695, 1, 9, 1, 2, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: chrComAtmVplAlarmSeverityProfileIndex.setStatus('current')
mibBuilder.exportSymbols("ChrComAtmVplTpVp-MIB", chrComAtmVplAtmVplVpi=chrComAtmVplAtmVplVpi, chrComAtmVplCCSink=chrComAtmVplCCSink, chrComAtmVplAlarmVector=chrComAtmVplAlarmVector, chrComAtmVplTPOperStatus=chrComAtmVplTPOperStatus, chrComAtmVplTpVpEntry=chrComAtmVplTpVpEntry, chrComAtmVplTpVpTable=chrComAtmVplTpVpTable, chrComAtmVplifIndex=chrComAtmVplifIndex, chrComAtmVplAlarmSeverityProfileIndex=chrComAtmVplAlarmSeverityProfileIndex, chrComAtmVplCCSource=chrComAtmVplCCSource)
|
import numpy as np
import torch
import torch.nn as nn
# import torch.nn.functional as F
from dp.modules.backbones.resnet import ResNetBackbone
from dp.modules.decoders.OrdinalRegression import OrdinalRegressionLayer
from dp.modules.encoders.SceneUnderstandingModule import SceneUnderstandingModule
from dp.modules.losses.ordinal_regression_loss import OrdinalRegressionLoss
class DepthPredModel(nn.Module):
def __init__(
self,
ord_num=90,
gamma=1.0,
beta=80.0,
input_size=(385, 513),
kernel_size=16,
pyramid=[8, 12, 16], # noqa
batch_norm=False,
discretization="SID",
pretrained=True,
):
super().__init__()
assert len(input_size) == 2
assert isinstance(kernel_size, int)
self.ord_num = ord_num
self.gamma = gamma
self.beta = beta
self.discretization = discretization
self.backbone = ResNetBackbone(pretrained=pretrained)
self.SceneUnderstandingModule = SceneUnderstandingModule(
ord_num,
size=input_size,
kernel_size=kernel_size,
pyramid=pyramid,
batch_norm=batch_norm,
)
self.regression_layer = OrdinalRegressionLayer()
self.criterion = OrdinalRegressionLoss(ord_num, beta, discretization)
def optimizer_params(self):
group_params = [
{
"params": filter(lambda p: p.requires_grad, self.backbone.parameters()),
"lr": 1.0,
},
{
"params": filter(
lambda p: p.requires_grad, self.SceneUnderstandingModule.parameters()
),
"lr": 10.0,
},
]
return group_params
def forward(self, image, target=None):
"""
:param image: RGB image, torch.Tensor, Nx3xHxW
:param target: ground truth depth, torch.Tensor, NxHxW
:return: output: if training, return loss, torch.Float,
else return {"target": depth, "prob": prob, "label": label},
depth: predicted depth, torch.Tensor, NxHxW
prob: probability of each label, torch.Tensor, NxCxHxW, C is number of label
label: predicted label, torch.Tensor, NxHxW
"""
N, C, H, W = image.shape
feat = self.backbone(image)
# print(feat)
feat = self.SceneUnderstandingModule(feat)
# print("feat shape:", feat.shape)
# feat = F.interpolate(feat, size=(H, W), mode="bilinear", align_corners=True)
if self.training:
prob = self.regression_layer(feat)
loss = self.criterion(prob, target)
return loss
prob, label = self.regression_layer(feat)
# print("prob shape:", prob.shape, " label shape:", label.shape)
if self.discretization == "SID":
t0 = torch.exp(np.log(self.beta) * label.float() / self.ord_num)
t1 = torch.exp(np.log(self.beta) * (label.float() + 1) / self.ord_num)
else:
t0 = 1.0 + (self.beta - 1.0) * label.float() / self.ord_num
t1 = 1.0 + (self.beta - 1.0) * (label.float() + 1) / self.ord_num
depth = (t0 + t1) / 2 - self.gamma
# print("depth min:", torch.min(depth), " max:", torch.max(depth),
# " label min:", torch.min(label), " max:", torch.max(label))
return {"target": [depth], "prob": [prob], "label": [label]}
|
# The MIT License (MIT)
#
# Copyright (c) 2018 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`ads1015`
====================================================
CircuitPython driver for ADS1015 ADCs.
* Author(s): Carter Nelson
"""
import struct
# pylint: disable=unused-import
from .ads1x15 import ADS1x15, Mode
# Data sample rates
_ADS1015_CONFIG_DR = {
128: 0x0000,
250: 0x0020,
490: 0x0040,
920: 0x0060,
1600: 0x0080,
2400: 0x00A0,
3300: 0x00C0,
}
# Pins
P0 = 0
P1 = 1
P2 = 2
P3 = 3
class ADS1015(ADS1x15):
"""Class for the ADS1015 12 bit ADC."""
@property
def bits(self):
"""The ADC bit resolution."""
return 12
@property
def rates(self):
"""Possible data rate settings."""
r = list(_ADS1015_CONFIG_DR.keys())
r.sort()
return r
@property
def rate_config(self):
"""Rate configuration masks."""
return _ADS1015_CONFIG_DR
def _data_rate_default(self):
return 1600
def _conversion_value(self, raw_adc):
raw_adc = raw_adc.to_bytes(2, "big")
value = struct.unpack(">h", raw_adc)[0]
return value >> 4
|
import csv
from django.core.management.base import BaseCommand
from nationalparks.models import FederalSite
def determine_site_type(name, website):
""" The name (or website) of a Federal Site in this list provides an
indication of what type of site it might be. This extracts that out. """
name_fragments = [
(' BLM', 'BLM'), (' NF', 'NF'), ('National Forest', 'NF'),
(' NWR', 'NWR'), (' NHS', 'NHS'), (' NRA', 'NRA'),
('National Recreation Area', 'NRA'),
('National Wildlife Refuge', 'NWR'), ('Fish and Wildlife', 'NWR')]
for fragment, code in name_fragments:
if fragment in name:
return code
website_fragments = [
('fs.fed.us', 'NF'), ('fs.usda.gov', 'NF'),
('blm.gov', 'BLM'), ('fws.gov', 'NWR'), ('nps.gov', 'NPS')]
for fragment, code in website_fragments:
if fragment in website:
return code
return 'OTH'
def phone_number(pstr):
""" Extract the extension from the phone number if it exists. """
if ';' in pstr:
# In one case we have multiple phone numbers separated by a
# semi-colon. We simply pick the first one. Note this means we're
# "throwing away" the other phone numbers.
pstr = pstr.split(';')[0]
for m in [' x ', 'ext.', 'ext']:
if m in pstr:
phone, extension = pstr.split(m)
return (phone.strip(), extension.strip())
return (pstr.strip(), None)
def process_site(row):
""" Create an entry in the database for a federal site."""
# Some rows in the CSV don't represent sites. This is indicative by them
# missing the city name.
if row[2] != '':
name = row[0]
phone, phone_extension = phone_number(row[1])
city = row[2]
state = row[3]
website = row[4]
annual = row[5] == 'YES'
senior = row[6] == 'YES'
access = row[7] == 'YES'
site_type = determine_site_type(name, website)
sites = FederalSite.objects.filter(name=name, city=city)
if len(sites) > 0:
# If we encounter a duplicate, let's update instead of inserting.
fs = sites[0]
else:
fs = FederalSite()
fs.name = name
fs.site_type = site_type
fs.phone = phone
fs.phone_extension = phone_extension
fs.city = city
fs.state = state
fs.website = website
fs.annual_pass = annual
fs.senior_pass = senior
fs.access_pass = access
fs.save()
def read_pass_list(filename):
with open(filename, 'r', encoding='latin-1') as passcsv:
passreader = csv.reader(passcsv, delimiter=',')
for l in passreader:
process_site(l)
class Command(BaseCommand):
""" Read and import a pass list. """
def add_arguments(self, parser):
parser.add_argument('filename', nargs=1)
def handle(self, *args, **options):
filename = options['filename'][0]
read_pass_list(filename)
|
import pyexcel
sheet = pyexcel.get_sheet(file_name="test.csv")
sheet
|
from copy import deepcopy
import pytest
import random
import bigchaindb
from bigchaindb.core import Bigchain
from contextlib import contextmanager
from bigchaindb.common.crypto import generate_key_pair
from tests.pipelines.stepping import create_stepper
################################################################################
# Test setup code
@contextmanager
def federation(n):
"""Return a list of Bigchain objects and pipeline steppers to represent
a BigchainDB federation
"""
keys = [generate_key_pair() for _ in range(n)]
config_orig = bigchaindb.config
@contextmanager
def make_nodes(i):
"""make_nodes is a recursive context manager. Essentially it is doing:
with f(a[0]) as b0:
with f(a[1]) as b1:
with f(a[2]) as b2:
yield [b0, b1, b2]
with an arbitrary depth. It is also temporarily patching global
configuration to simulate nodes with separate identities.
"""
nonlocal keys
if i == 0:
yield []
else:
config = deepcopy(config_orig)
keys = [keys[-1]] + keys[:-1] # Rotate keys
config['keyring'] = [pub for _, pub in keys[1:]]
config['keypair']['private'] = keys[0][0]
config['keypair']['public'] = keys[0][1]
bigchaindb.config = config
stepper = create_stepper()
with stepper.start():
node = (Bigchain(), stepper)
with make_nodes(i-1) as rest:
yield [node] + rest
with make_nodes(n) as steppers:
bigchaindb.config = config_orig
yield zip(*steppers)
@pytest.fixture
def federation_3():
with federation(3) as f:
yield f
def process_tx(steps):
steps.block_changefeed(timeout=1)
if steps.block_filter_tx():
steps.block_validate_tx()
steps.block_create(timeout=True)
steps.block_write()
steps.block_delete_tx()
def input_single_create(b):
from bigchaindb.common.transaction import Transaction
metadata = {'r': random.random()}
tx = Transaction.create([b.me], [([b.me], 1)], metadata).sign([b.me_private])
b.write_transaction(tx)
return tx
def process_vote(steps, result=None):
steps.vote_changefeed()
steps.vote_validate_block()
steps.vote_ungroup()
steps.vote_validate_tx()
if result is not None:
steps.queues['vote_vote'][0][0] = result
vote = steps.vote_vote()
steps.vote_write_vote()
return vote
################################################################################
# Tests here on down
@pytest.mark.bdb
@pytest.mark.genesis
@pytest.mark.skip_travis_rdb
def test_elect_valid(federation_3):
[bx, (s0, s1, s2)] = federation_3
tx = input_single_create(bx[0])
process_tx(s0)
process_tx(s1)
process_tx(s2)
process_vote(s2, False)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'undecided'
process_vote(s0, True)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'undecided'
process_vote(s1, True)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'valid'
@pytest.mark.bdb
@pytest.mark.genesis
@pytest.mark.skip_travis_rdb
def test_elect_invalid(federation_3):
[bx, (s0, s1, s2)] = federation_3
tx = input_single_create(bx[0])
process_tx(s0)
process_tx(s1)
process_tx(s2)
process_vote(s1, True)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'undecided'
process_vote(s2, False)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'undecided'
process_vote(s0, False)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] is None
@pytest.mark.bdb
@pytest.mark.genesis
@pytest.mark.skip_travis_rdb
def test_elect_sybill(federation_3):
[bx, (s0, s1, s2)] = federation_3
tx = input_single_create(bx[0])
process_tx(s0)
process_tx(s1)
process_tx(s2)
# What we need is some votes from unknown nodes!
# Incorrectly signed votes are ineligible.
for s in [s0, s1, s2]:
s.vote.bigchain.me_private = generate_key_pair()[0]
process_vote(s0, True)
process_vote(s1, True)
process_vote(s2, True)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'undecided'
@pytest.mark.skip()
@pytest.mark.bdb
@pytest.mark.genesis
def test_elect_dos(federation_3):
"""https://github.com/bigchaindb/bigchaindb/issues/1314
Test that a node cannot block another node's opportunity to vote
on a block by writing an incorrectly signed vote
"""
raise NotImplementedError()
@pytest.mark.skip('Revisit when we have block election status cache')
@pytest.mark.bdb
@pytest.mark.genesis
def test_elect_bad_block_voters_list(federation_3):
"""See https://github.com/bigchaindb/bigchaindb/issues/1224"""
[bx, (s0, s1, s2)] = federation_3
b = s0.block.bigchain
# First remove other nodes from node 0 so that it self assigns the tx
b.nodes_except_me = []
tx = input_single_create(b)
# Now create a block voters list which will not match other keyrings
b.nodes_except_me = [bx[1].me]
process_tx(s0)
process_vote(s0)
process_vote(s1)
process_vote(s2)
for i in range(3):
assert bx[i].get_transaction(tx.id, True)[1] == 'invalid'
|
import pandas as pd
import requests
from LeapOfThought.common.file_utils import cached_path
from tqdm import tqdm
import pandas as pd
# This is mainly for testing and debugging ...
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 2000)
pd.set_option('display.max_colwidth', 130)
pd.set_option("display.colheader_justify", "left")
class WikiData:
""" A python singleton """
class __impl:
""" Implementation of the singleton interface """
def __init__(self):
self._sparql_url = 'https://query.wikidata.org/sparql'
def run_query(self,query):
r = requests.get(self._sparql_url, params={'format': 'json', 'query': query})
if r.status_code != 200:
return r.content
else:
data = r.json()
query_data = []
for item in data['results']['bindings']:
record = {}
for var in data['head']['vars']:
record[var.replace('Label', '')] = item[var]['value']
query_data.append(record)
df = pd.DataFrame(query_data)
df.drop_duplicates(inplace=True)
ids_to_filter = []
for col in df.columns:
ids_to_filter += list(df[df[col].str.contains('Q[0-9]+')].index)
df = df[~df.index.isin(ids_to_filter)]
return df
def get_capitals(self, min_city_size):
countries_query = """
SELECT DISTINCT ?cityLabel ?countryLabel ?pop ?capitalLabel
WHERE
{
?city wdt:P31/wdt:P279* wd:Q515 ; wdt:P1082 ?pop . FILTER (?pop > %d)
?city wdt:P17 ?country .
?country wdt:P36 ?capital.
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 100000
""" % (min_city_size)
print(countries_query)
print('querying on sparql wikidata')
r = requests.get(self._sparql_url, params={'format': 'json', 'query': countries_query})
print(r)
data = r.json()
print('creating tabular data')
countries_data = []
for item in data['results']['bindings']:
city = item['cityLabel']['value']
country = item['countryLabel']['value']
capital = item['capitalLabel']['value']
pop = item['pop']['value']
countries_data.append([city, country, capital, pop])
df = pd.DataFrame(countries_data, columns=['city', 'country', 'capital','population'])
df.drop_duplicates(inplace=True)
return df
def get_music_bands(self):
countries_query = """
SELECT DISTINCT ?bandLabel ?inception ?hasPartLabel WHERE {
?band wdt:P31 wd:Q215380 .
?band wdt:P136 ?genre .
?band wdt:P571 ?inception .
?band wdt:P527 ?hasPart .
SERVICE wikibase:label {
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" .
}
}
"""
print('querying on sparql wikidata')
r = requests.get(self._sparql_url, params={'format': 'json', 'query': countries_query})
data = r.json()
print('creating tabular data')
countries_data = []
for item in data['results']['bindings']:
band = item['bandLabel']['value']
inception = item['inception']['value']
musician = item['hasPartLabel']['value']
countries_data.append([band, inception, musician])
df = pd.DataFrame(countries_data, columns=['band', 'inception', 'musician'])
df['inception'] = pd.to_datetime(df['inception'], errors='coerce')
df.drop_duplicates(inplace=True)
return df
def get_company_founders(self, min_employee_num):
query = """
SELECT DISTINCT ?founderLabel ?companyLabel ?locationLabel ?employees
WHERE
{
?company wdt:P112 ?founder ; wdt:P1128 ?employees . FILTER (?employees > %d)
?founder wdt:P31 wd:Q5 .
?company wdt:P159 ?location .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 10000
""" % (min_employee_num)
return self.run_query(query)
def get_animals(self):
query = """
SELECT DISTINCT ?animalLabel ?mammalsub2Label ?mammalsub1Label
WHERE
{
?animal wdt:P279 ?mammalsub2 .
?mammalsub2 wdt:P279 ?mammalsub1 .
?mammalsub1 wdt:P279 wd:Q729 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 10000
"""
query = """
SELECT DISTINCT ?fishLabel
WHERE
{
?fish wdt:P279 wd:Q3314483 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 10000
"""
return self.run_query(query)
def get_spouses(self):
query = """
SELECT DISTINCT ?personLabel ?spouseLabel
WHERE
{
?spouse wdt:P26 ?person .
?person wdt:P27 wd:Q30 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 10000
"""
return self.run_query(query)
def get_person_place_of_birth(self):
query = """
SELECT DISTINCT ?personLabel ?placeofbirthLabel
WHERE
{
?person wdt:P19 ?placeofbirth .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 1
"""
return self.run_query(query)
def get_children(self):
query = """
SELECT DISTINCT ?personLabel ?childLabel ?countryLabel
WHERE
{
?person wdt:P40 ?child .
?person wdt:P27 wd:Q30 .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 20000
"""
return self.run_query(query)
def get_olmpics_composition_data(self):
sparql_queries = [{'query': """
SELECT DISTINCT ?bandLabel ?inception ?hasPartLabel WHERE {
?band wdt:P31 wd:Q215380 .
?band wdt:P136 ?genre .
?band wdt:P571 ?inception .
?band wdt:P527 ?hasPart .
SERVICE wikibase:label {
bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en" .
}
}
""",
'object': 'hasPartLabel', 'inner': 'bandLabel', 'answer': 'inception',
'name': 'bands'},
{'query': """
SELECT DISTINCT ?movieLabel ?personLabel ?spouseLabel WHERE {
?movie wdt:P31 wd:Q11424 .
?movie wdt:P161 ?person .
?person wdt:P26 ?spouse .
# ?narrative_location wdt:P625 ?coordinates .
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
LIMIT 10000
""",
'object': 'movieLabel', 'inner': 'personLabel', 'answer': 'spouseLabel',
'name': 'movies'},
{'query': """
SELECT DISTINCT ?personLabel ?companyLabel ?locationLabel
WHERE
{
?company wdt:P112 ?person .
?person wdt:P31 wd:Q5 .
?company wdt:P159 ?location .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}
LIMIT 10000
""",
'object': 'personLabel', 'inner': 'companyLabel', 'answer': 'locationLabel',
'name': 'companies'}]
all_data = []
print('querying on sparql wikidata')
for query in tqdm(sparql_queries):
task_name = query['name']
print('query: {}'.format(task_name))
r = requests.get(self._sparql_url, params={'format': 'json', 'query': query['query']})
print(r)
data = r.json()
print('creating tabular data')
query_data = []
for item in data['results']['bindings']:
inner = item[query['inner']]['value']
answer = item[query['answer']]['value']
obj = item[query['object']]['value']
query_data.append([inner, answer, obj, task_name])
df = pd.DataFrame(query_data, columns=['inner', 'answer', 'object', 'name'])
# particular handling dates
if query['name'] == 'bands':
df['answer'] = pd.to_datetime(df['answer'], errors='coerce')
df['answer'] = df['answer'].dt.strftime('%Y')
df['answer'] = df['answer'].astype(str)
df.drop_duplicates(inplace=True)
df = df.drop(df[df.object.str.startswith('Q')].index)
all_data.append(df)
df = pd.concat(all_data)
return df
# storage for the instance reference
__instance = None
def __init__(self):
""" Create singleton instance """
# Check whether we already have an instance
if WikiData.__instance is None:
# Create and remember instance
WikiData.__instance = WikiData.__impl()
# Store instance reference as the only member in the handle
self.__dict__['_Singleton__instance'] = WikiData.__instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
|
from .empty2d import EmptyWorld2D
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: orderbook/orderbook.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from github.com.gogo.protobuf.gogoproto import gogo_pb2 as github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2
from github.com.crypto_bank.proto.order import order_pb2 as github_dot_com_dot_crypto__bank_dot_proto_dot_order_dot_order__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='orderbook/orderbook.proto',
package='orderbook',
syntax='proto3',
serialized_pb=_b('\n\x19orderbook/orderbook.proto\x12\torderbook\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto\x1a.github.com/crypto-bank/proto/order/order.proto\"N\n\x05\x45vent\x12\x1d\n\x05order\x18\x01 \x01(\x0b\x32\x0c.order.OrderH\x00\x12\x1d\n\x05trade\x18\x02 \x01(\x0b\x32\x0c.order.TradeH\x00\x42\x07\n\x05\x65vent2\x0b\n\tOrderBookB\x17Z\torderbook\xd0\xe2\x1e\x01\xc8\xe2\x1e\x01\xa8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[github_dot_com_dot_gogo_dot_protobuf_dot_gogoproto_dot_gogo__pb2.DESCRIPTOR,github_dot_com_dot_crypto__bank_dot_proto_dot_order_dot_order__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_EVENT = _descriptor.Descriptor(
name='Event',
full_name='orderbook.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='order', full_name='orderbook.Event.order', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trade', full_name='orderbook.Event.trade', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='event', full_name='orderbook.Event.event',
index=0, containing_type=None, fields=[]),
],
serialized_start=135,
serialized_end=213,
)
_EVENT.fields_by_name['order'].message_type = github_dot_com_dot_crypto__bank_dot_proto_dot_order_dot_order__pb2._ORDER
_EVENT.fields_by_name['trade'].message_type = github_dot_com_dot_crypto__bank_dot_proto_dot_order_dot_order__pb2._TRADE
_EVENT.oneofs_by_name['event'].fields.append(
_EVENT.fields_by_name['order'])
_EVENT.fields_by_name['order'].containing_oneof = _EVENT.oneofs_by_name['event']
_EVENT.oneofs_by_name['event'].fields.append(
_EVENT.fields_by_name['trade'])
_EVENT.fields_by_name['trade'].containing_oneof = _EVENT.oneofs_by_name['event']
DESCRIPTOR.message_types_by_name['Event'] = _EVENT
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
DESCRIPTOR = _EVENT,
__module__ = 'orderbook.orderbook_pb2'
# @@protoc_insertion_point(class_scope:orderbook.Event)
))
_sym_db.RegisterMessage(Event)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\torderbook\320\342\036\001\310\342\036\001\250\342\036\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class OrderBookStub(object):
"""Orders - Streams order book updates.
rpc Orders(OrdersRequest) returns (stream OrderBookUpdate) {};
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
class OrderBookServicer(object):
"""Orders - Streams order book updates.
rpc Orders(OrdersRequest) returns (stream OrderBookUpdate) {};
"""
def add_OrderBookServicer_to_server(servicer, server):
rpc_method_handlers = {
}
generic_handler = grpc.method_handlers_generic_handler(
'orderbook.OrderBook', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaOrderBookServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Orders - Streams order book updates.
rpc Orders(OrdersRequest) returns (stream OrderBookUpdate) {};
"""
class BetaOrderBookStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Orders - Streams order book updates.
rpc Orders(OrdersRequest) returns (stream OrderBookUpdate) {};
"""
def beta_create_OrderBook_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
}
response_serializers = {
}
method_implementations = {
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_OrderBook_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
}
response_deserializers = {
}
cardinalities = {
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'orderbook.OrderBook', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from awscli.customizations import utils
def register_rename_config(cli):
cli.register('building-command-table.main', change_name)
def change_name(command_table, session, **kwargs):
"""
Change all existing ``aws config`` commands to ``aws configservice``
commands.
"""
utils.rename_command(command_table, 'config', 'configservice')
|
# !wget https://thispersondoesnotexist.com/image -O 'image.png'
import matplotlib.pyplot as plt
import cv2
# load
image = cv2.imread("image.png")
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# show
plt.imshow(image)
# show without edge blur
plt.imshow(image, interpolation='nearest')
|
import threading
def create_thread(obj):
obj_thread = threading.Thread(target=obj.run, daemon=True)
obj_thread.start()
while obj_thread.isAlive():
obj_thread.join(1)
obj_response = obj.get_response()
return obj_response
|
import os # NOQA
import sys # NOQA
import re
import fileinput
from utils import Point, parse_line
def printgrid():
for y in range(min_y - 1, max_y + 1):
print ''.join(grid.get(Point(x, y), '.') for x in range(min_x - 1, max_x + 2))
print
grid = {}
min_y = 1e10
max_y = -1e10
min_x = 1e10
max_x = -1e10
for i, line in enumerate(fileinput.input()):
a, x, b, y, z = parse_line(r'(.)=(\d+), (.)=(\d+)..(\d+)', line)
for i in range(y, z + 1):
if a == 'x':
grid[Point(x, i)] = '#'
min_y = min(min_y, i)
max_y = max(max_y, i)
min_x = min(min_x, x)
max_x = max(max_x, x)
else:
grid[Point(i, x)] = '#'
min_y = min(min_y, x)
max_y = max(max_y, x)
min_x = min(min_x, i)
max_x = max(max_x, i)
SPRING = 500
DOWN = Point(0, 1)
LEFT = Point(-1, 0)
RIGHT = Point(1, 0)
sources = set([Point(SPRING, 0)])
while sources:
p = sorted(iter(sources), key=lambda x: x.y)[0]
sources.remove(p)
if p.y > max_y:
continue
source_added = False
if p + DOWN in grid:
# Try going left and right
q = p
left_wall = None
while q + DOWN in grid:
q += LEFT
left_wall = q
if grid.get(q) == '#':
break
else:
sources.add(q)
source_added = True
right_wall = None
q = p
while q + DOWN in grid:
q += RIGHT
right_wall = q
if grid.get(q) == '#':
break
else:
sources.add(q)
source_added = True
for x in range(left_wall.x + 1, right_wall.x):
z = Point(x, p.y)
sources.discard(z)
if grid.get(z) != '#':
grid[z] = '~'
if not source_added:
sources.add(p - DOWN)
else:
grid[p] = '~'
sources.add(p + DOWN)
grid[Point(SPRING, 0)] = '+'
# printgrid()
water_count = [v for k, v in grid.items() if min_y <= k.y <= max_y].count('~')
# There's still a bug with the above algorithm where it produces
# a single "double-stream" near the bottom, so just correct it...
print "Tiles reachable by water:", water_count - 21
lines = []
for y in range(min_y - 1, max_y + 1):
lines.append(''.join(grid.get(Point(x, y), '.') for x in range(min_x - 1, max_x + 2)))
total = 0
for line in lines:
matches = re.findall(r'#((?:~|#)+)#', line)
total += sum(m.count('~') for m in matches)
print "Steady-state water tiles:", total
|
# -*- coding: utf-8 -*-
"""
Solution to Project Euler problem 109 - Darts
Author: Jaime Liew
https://github.com/jaimeliew1/Project_Euler_Solutions
"""
import numpy as np
def run():
target = 100
poss_d = np.concatenate([2 * np.arange(1, 21), [50]])
poss = np.concatenate(
[np.arange(1, 21), 2 * np.arange(1, 21), 3 * np.arange(1, 21), [25, 50]]
)
count = 0
# single dart
for d in poss_d:
if d < target:
count += 1
# double darts
for d1 in poss_d:
for d2 in poss:
if d1 + d2 < target:
count += 1
# triple darts
for d1 in poss_d:
for i, d2 in enumerate(poss):
for d3 in poss[i:]:
if d1 + d2 + d3 < target:
count += 1
return count
if __name__ == "__main__":
print(run())
|
class Files():
completion_error='school_invalid_data.csv'
student_donwload='/student_attendance_allDistricts_'
student_block='/student_attendance_allBlocks_'
student_cluster='/student_attendance_allClusters_'
student_school='/student_attendance_allSchools_'
student_districtwise='/student_attendance_Blocks_of_district_'
student_blockwise='/student_attendance_Clusters_of_block_'
student_clusterwise='/student_attendance_schools_of_cluster_'
teacher_donwload = 'teacher_attendance_allDistricts_overall_'
teacher_block = 'teacher_attendance_allBlocks_overall_'
teacher_cluster = 'teacher_attendance_allClusters_'
teacher_school = 'teacher_attendance_allSchools_'
teacher_districtwise ='teacher_attendance_Blocks_of_district_'
teacher_blockwise ='teacher_attendance_Clusters_of_block_'
teacher_clusterwise ='teacher_attendance_schools_of_cluster_'
sr_block='semester_assessment_test_all_allGrades__allBlocks_'
sr_cluster='semester_assessment_test_all_allGrades__allClusters_'
sr_school='semester_assessment_test_all_allGrades__allSchools_'
sr_district='semester_assessment_test_all_allGrades__allDistricts_'
sr_districtwise='semester_assessment_test_all_allGrades__blocks_of_district_'
sr_blockwise='semester_assessment_test_all_allGrades__clusters_of_block_'
sr_clusterwise='semester_assessment_test_all_allGrades__schools_of_cluster_'
sr_gradewise="semester_assessment_test_all_Grade_"
sr_subjectwise="semester_assessment_test_all_Grade_"
satchart_district = 'semester_assessment_test_heatmap_overall_allDistricts_'
satchart_districtwise ='semester_assessment_test_heatmap_'
satchart_blockwise ='semester_assessment_test_heatmap_'
satchart_clusterwise = 'semester_assessment_test_heatmap_'
satchart_view = "semester_assessment_test_heatmap_"
sat_subject_wise = "semester_assessment_test_heatmap_"
sat_chart_grades = "semester_assessment_test_heatmap_"
exception_block='semester_exception_2nd_sem_allBlocks_'
exception_cluster='semester_exception_2nd_sem_allClusters_'
exception_school='semester_exception_2nd_sem_allSchools_'
exception_district='semester_exception_2nd_sem_allDistricts_'
exception_districtwise='semester_exception_2nd_sem_blocks_of_district_'
exception_blockwise='semester_exception_2nd_sem_clusters_of_block_'
exception_clusterwise='semester_exception_2nd_sem_schools_of_cluster_'
crc_district='crc_report_overall_allDistricts_'
crc_block='crc_report_overall_allBlocks_'
crc_cluster='crc_report_overall_allClusters_'
crc_school='crc_report_overall_allSchools_'
crc_districtwise='crc_report_overall_blocks_of_district_'
crc_blockwise='crc_report_overall_clusters_of_block_'
crc_clusterwise='crc_report_overall_schools_of_cluster_'
composite_block='composite_report_across_metrics_allBlocks_'
composite_cluster='composite_report_across_metrics_allClusters_'
composite_district='composite_report_across_metrics_allDistricts_'
composite_districtwise='composite_report_across_metrics_blocks_of_district_'
composite_blockwise='composite_report_across_metrics_clusters_of_block_'
composite_clusterwise='composite_report_across_metrics_schools_of_cluster_'
scmap_block='infrastructure_access_by_location_allBlocks_'
scmap_cluster='infrastructure_access_by_location_allClusters_'
scmap_school='infrastructure_access_by_location_allSchools_'
scmap_district='infrastructure_access_by_location_allDistricts_'
scmap_districtwise='infrastructure_access_by_location_blocks_of_district_'
scmap_blockwise='infrastructure_access_by_location_clusters_of_block_'
scmap_clusterwise='infrastructure_access_by_location_schools_of_cluster_'
sc_district='composite_report_allDistricts_'
sc_block='composite_report_allBlocks_'
sc_cluster='composite_report_allClusters_'
sc_school='composite_report_allSchools_'
sc_districtwise='composite_report_blocks_of_district_'
sc_blockwise='composite_report_clusters_of_block_'
sc_clusterwise='composite_report_schools_of_cluster_'
diksha_stack_all='Diksha_last_30_days_data_All.csv'
diksha_stack_teacher='Diksha_last_30_days_data_Teacher.csv'
diskha_stack_student='Diksha_last_30_days_data_Student.csv'
diskha_stack_others='Diksha_last_30_days_data_Other.csv'
udise_district='UDISE_report_Infrastructure_Score_allDistricts_'
udise_block='UDISE_report_Infrastructure_Score_allBlocks_'
udise_cluster='UDISE_report_Infrastructure_Score_allClusters_'
udise_school='UDISE_report_Infrastructure_Score_allSchools_'
udise_districtwise='UDISE_report_Infrastructure_Score_blocks_of_district_'
udise_blockwise='UDISE_report_Infrastructure_Score_clusters_of_block_'
udise_clusterwise='UDISE_report_Infrastructure_Score_schools_of_cluster_'
telemtry_block='telemerty_allBlocks_overall_'
telemetry_cluster='telemerty_allClusters_overall_'
telemetry_school='telemerty_allSchools_overall_'
telemetry_overall='telemerty_allDistricts_overall_'
telemetry_last7days='telemerty_allDistricts_last_7_days_'
telemetry_lastday='telemerty_allDistricts_last_day_'
telemetry_lastmonth='telemerty_allDistricts_last_30_days_'
pat_district='periodic_assessment_test_all_allGrades__allDistricts_'
pat_block='periodic_assessment_test_all_allGrades__allBlocks_'
pat_cluster='periodic_assessment_test_all_allGrades__allClusters_'
pat_school='periodic_assessment_test_all_allGrades__allSchools_'
pat_districtwise='periodic_assessment_test_all_allGrades__blocks_of_district_'
pat_blockwise='periodic_assessment_test_all_allGrades__clusters_of_block_'
pat_clusterwise='periodic_assessment_test_all_allGrades__schools_of_cluster_'
pat_gradewise ='periodic_assessment_test_all_grade_'
pat_subjectwise='periodic_assessment_test_all_Grade_'
location_course ="usage_by_course_all_"
location_textbook="usage_by_textbook_all_"
pchart_district ='periodic_assessment_test_heatmap_overall_allDistricts_'
pchart_districtwise='periodic_assessment_test_heatmap_'
pchart_blockwise='periodic_assessment_test_heatmap_'
pchart_clusterwise='periodic_assessment_test_heatmap_'
pchart_view ="periodic_assessment_test_heatmap_"
subject_wise ="periodic_assessment_test_heatmap_"
pchart_grades ="periodic_assessment_test_heatmap_"
patlo_district = 'periodic_assessment_test_loTable_overall_allDistricts_'
patlo_districtwise = 'periodic_assessment_test_loTable_'
patlo_blockwise = 'periodic_assessment_test_loTable_'
patlo_clusterwise = 'periodic_assessment_test_loTable_'
patlo_view = "periodic_assessment_test_loTable_"
patlo_subjectwise = "periodic_assessment_test_loTable_"
patlo_grades = "periodic_assessment_test_loTable_"
student_academic ="student_academic_"
teacher_academic ="teacher_academic_"
tpd_district="TPD_course_progress_allDistrict_overall_"
tpd_lastday="TPD_course_progress_allDistrict_Last_Day_"
tpd_lastweek="TPD_course_progress_allDistrict_Last_7_Day_"
tpd_lastmonth="TPD_course_progress_allDistrict_Last_30_Day_"
tpd_all_districtwise="TPD_course_progress_overall_blocks_of_district_"
tpd_lastday_districtwise="TPD_course_progress_Last_Day_blocks_of_district_"
tpd_lastmonth_districtwise="TPD_course_progress_Last_30_Day_blocks_of_district_"
tpd_lastweek_districtwise="TPD_course_progress_Last_7_Day_blocks_of_district_"
tpd_cluster="TPD_course_progress_overall_clusters_of_block_"
tpd_school ="TPD_course_progress_overall_schools_of_cluster_"
tpd_teacher_district = "TPD_teacher_percentage_allDistrict_overall_"
tpd_teacher_lastday = "TPD_teacher_percentage_allDistrict_Last_Day_"
tpd_teacher_lastmonth = "TPD_teacher_percentage_allDistrict_Last_30_Day_"
tpd_teacher_lastweek = "TPD_teacher_percentage_allDistrict_Last_7_Day_"
tpd_teacher_all_districtwise = "TPD_teacher_percentage_overall_blocks_of_district_"
tpd_teacher_lastday_districtwise = "TPD_teacher_percentage_Last_Day_blocks_of_district_"
tpd_teacher_lastmonth_districtwise = "TPD_teacher_percentage_Last_30_Day_blocks_of_district_"
tpd_teacher_lastweek_districtwise = "TPD_teacher_percentage_Last_7_Day_blocks_of_district_"
tpd_teacher_cluster = "TPD_teacher_percentage_overall_clusters_of_block_"
tpd_teacher_school = "TPD_teacher_percentage_overall_schools_of_cluster_"
d1_name="Ahmedabad"
d2_name="Valsad"
b1_name="City"
b2_name="Amc"
c1_name="Dungari"
c2_name="Amalsad"
s1_name="School No-53 Mahadevnagar"
s2_name="Ameshwarpura Pri. Sch."
d1_id = "2407"
d2_id = "2425"
b1_id = "240705"
b2_id = "240712"
c1_id = "2407120001"
c2_id = "2404030005"
s1_id = "24070500301"
s2_id = "24250603602"
nda =" No Data Available "
ndf = "No data found"
enrollment_overall=""
completion_overall=""
|
#cronjob syntax running on server: 0 8 * * * python3 homework-10-gruen-apirequest.py
#API key: yourapikey
#Place in decimal degrees: NYC = {'Latitude': 40.7142700 , 'Longitude': -74.0059700}
import requests
weather_response = requests.get ('https://api.forecast.io/forecast/yourapikey/40.7142700,-74.0059700')
weather_data = weather_response.json()
daily_weather = weather_data['daily']['data']
#TEMPERATURE is the current temperature
temperature = weather_data['currently']['temperature']
#SUMMARY is what it currently looks like (partly cloudy, etc - it's "summary" in the dictionary). Lowercase, please.
summary = daily_weather[0]['summary']
#HIGH_TEMP is the high temperature for the day.
high_temp = daily_weather[0]['temperatureMax']
#LOW_TEMP is the low temperature for the day.
low_temp = daily_weather[0]['temperatureMin']
#TEMP_FEELING is whether it will be hot, warm, cold, or moderate. You will probably use HIGH_TEMP and your own thoughts and feelings to determine this.
hot_threshold = 80
cold_threshold = 68
if high_temp > hot_threshold:
temp_feeling = "quite hot"
if high_temp > cold_threshold and high_temp < hot_threshold:
temp_feeling = "nicely warm"
if high_temp < cold_threshold:
temp_feeling = "pretty chilly"
#rAIN_WARNING is something like "bring your umbrella!" if it is going to rain at some point during the day.
rain_probability = daily_weather[0]['precipProbability']
rain_likelihood = rain_probability * 100
if rain_likelihood < 10:
rain_warning = "no need for an umbrella, but maybe pack sun glasses."
if rain_likelihood > 10 and rain_likelihood < 20:
rain_warning = "no need for an umbrella, but maybe to be on the safe side: don't wear white."
if rain_likelihood > 20 and rain_likelihood < 50:
rain_warning = "take an umbrella with you, it's pretty likely to rain."
if rain_likelihood > 50:
rain_warning = "if you don't have to leave the house, maybe stay inside. Rain ahead."
weather_forecast_output = ["Right now it is " + str(temperature) + " degrees out and " + str(summary) +" Today will be " + str(temp_feeling) + " with a high of " + str(high_temp) + " degrees Fahrenheit and a low of " + str(low_temp) + " degrees Fahrenheit. It's going to rain with a probability of " + str(rain_likelihood) + " percent so " + str(rain_warning)]
# with commas instead of + it gave me a line break at each comma, that's why I replaced it.
def date_in_words(date):
months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
import dateutil.parser
date = dateutil.parser.parse(date)
return months[date.month - 1]
def day_in_number(day):
import dateutil.parser
day = dateutil.parser.parse(day)
return day.day
def year_in_number(year):
import dateutil.parser
year = dateutil.parser.parse(year)
return year.year
import time
datestring = time.strftime("%Y-%m-%d-%H-%M")
date_in_words(datestring)
day_in_number(datestring)
year_in_number(datestring)
final_date = [date_in_words(datestring), day_in_number(datestring), "-", year_in_number(datestring)]
specified_subjectline = ["8AM weather forecast:", final_date]
import requests
key = 'key-3cc8e00b0f71c52c6c307ff9d43e6846'
#sandbox = 'https://api.mailgun.net/v3/sandboxf0b49f8cb01849278457886ebe247d33.mailgun.org/messages'
recipient = 'gcgruen@gmail.com'
request_url = 'https://api.mailgun.net/v3/sandboxf0b49f8cb01849278457886ebe247d33.mailgun.org/messages'
request = requests.post(request_url, auth=('api', key), data={
'from': 'postmaster@sandboxf0b49f8cb01849278457886ebe247d33.mailgun.org',
'to': recipient,
'subject': specified_subjectline,
'text': weather_forecast_output
})
|
from car import Car
my_new_car = Car('audi', 'a4', 2015)
print(my_new_car.get_descriptive_name())
my_new_car.odometer_reading = 23
my_new_car.read_odometer()
|
version=".92B"
date="7/31/2018"
import sys, os, subprocess, shutil
import pandas
from os import listdir
from os.path import isfile, join
def find_charge_range(input_pin_files): #This function is called to return information on the max an min charges considered among a collection of percolator 'pin' input files
all_max_charge=2
all_min_charge=2
for each in onlyfiles:
line_ctr=0
with open(folder+"/"+each,'rb') as pin_reader:
peptide_index=0
index_ctr=0
expmass_index=0
specID_index=0
scan_index=0
label_index=0
charge_indicies={}
charges=[]
pin_lines=[]
for eachline in pin_reader:
if line_ctr==0:
header=eachline.split("\t")
for each_item in header:
if each_item == "Peptide":
peptide_index=index_ctr
elif each_item == "SpecId":
specID_index=index_ctr
elif each_item == "ScanNr":
scan_index=index_ctr
elif each_item == "ExpMass":
expmass_index = index_ctr
elif each_item == "Label":
label_index=index_ctr
elif "Charge" in each_item:
charge_indicies[each_item]=index_ctr
charges.append(int(each_item.replace("Charge","")))
index_ctr+=1
else:
break
line_ctr=0
charge_index_max=max(charge_indicies.values())
charge_index_min=min(charge_indicies.values())
charge_max=max(charges)
charge_min=min(charges)
if charge_max>all_max_charge:
all_max_charge=charge_max
if charge_min<all_min_charge:
all_min_charge=charge_min
return all_max_charge,all_min_charge
arguments=sys.argv
print arguments
arguments=arguments[1:]
independent_perco=arguments[0]
arguments=arguments[1:]
folder=arguments[0]
arguments=arguments[1:]
exp_group_file=arguments[0]
arguments=arguments[1:]
proton_mass=1.007276
onlyfiles = [ f for f in listdir(folder) if isfile(join(folder,f)) ]
startingdir=os.getcwd()
#cmd = " ".join(arguments)
#print " ".join(arguments)
processes=[]
os.mkdir("output")
#print onlyfiles,"onlyfiles"
group_information = pandas.read_csv(exp_group_file,sep='\t')
run_dict={} # Key is file_idx, value is file_name.mzML
run_dict_reverse={}
group_to_run_dict={} # Key is group, value is [1, 2, 3, 4] list of file_idx belonging to runs in the group...
group_to_file_name={} # key is group, value is ["xxxx.mzML", "xxxx.mzML"]
run_to_group_dict={} #Key is run, value is group for pin file
for index,row in group_information.iterrows():
run_dict[str(row['Crux File Integer'])]=row['Original File Name']+".mzML"
run_dict_reverse[row['Original File Name']+".mzML"]=row['Crux File Integer']
run_to_group_dict[row['Original File Name']+".mzML"]=row['Fractionation Group ID String']
if row['Fractionation Group ID String'] in group_to_run_dict:
group_to_run_dict[row['Fractionation Group ID String']].append(str(row['Crux File Integer']))
else:
group_to_run_dict[row['Fractionation Group ID String']] = [str(row['Crux File Integer'])]
if row['Fractionation Group ID String'] in group_to_file_name:
group_to_file_name[row['Fractionation Group ID String']].append(str(row['Original File Name'])+".mzML")
else:
group_to_file_name[row['Fractionation Group ID String']] = [str(row['Original File Name'])+".mzML"]
#Here we actually run percolator
if independent_perco=="T": #If =="T" then we'll run percolator separately for each file or group
for each in onlyfiles:
#print each
cmd=arguments[:]
cmd.append("--fileroot")
cmd.append(each.split(".")[0])
cmd.append(each)
os.mkdir("output/"+each+"_out")
os.rename(folder+"/"+each,"output/"+each+"_out/"+each)
os.chdir("output/"+each+"_out")
print "---------------------------"
print cmd,"this will run"
print "---------------------------"
processes.append(subprocess.Popen(cmd))
os.chdir(startingdir)
for each in processes:
each.wait()
for each in onlyfiles: #here i need to take the group and filter it into the runs that belong TO THIS GROUP so use the group_to_run_dict to get the fileidx set
clean_name=each.rsplit(".",1)[0]
files_to_filter=[clean_name+'.percolator.decoy.peptides.txt',clean_name+'.percolator.target.peptides.txt',clean_name+'.percolator.target.psms.txt',clean_name+'.percolator.decoy.psms.txt']
os.chdir(startingdir+"/output/"+each.replace(".mzML",".pin")+"_out/crux-output")##
for each_newfile in group_to_file_name[each.replace(".pin","")]:#run_dict_reverse:
print "handling {0} and {1}".format(each,each_newfile)
if each_newfile.replace(".mzML","") != each.replace(".pin",""):
if not os.path.isdir(startingdir+"/output/"+each_newfile.replace(".mzML",".pin")+"_out/"):
os.mkdir(startingdir+"/output/"+each_newfile.replace(".mzML",".pin")+"_out/")
shutil.copytree(startingdir+"/output/"+each.replace(".mzML",".pin")+"_out/crux-output",startingdir+"/output/"+each_newfile.replace(".mzML",".pin")+"_out/crux-output")
os.chdir(startingdir+"/output/"+each_newfile.replace(".mzML",".pin")+"_out/crux-output")
#Now we'll load each of the following files and filter them out, and rename them....
for each_filter_file in files_to_filter:
this_pin_df=pandas.read_csv(each_filter_file,sep='\t')
this_pin_df=this_pin_df[this_pin_df['file_idx']==run_dict_reverse[each_newfile]]
this_pin_df.to_csv(each.rsplit(".",1)[0]+'.'+'.'.join(each_filter_file.rsplit(".",4)[1:]),sep='\t',index=False)
os.chdir(startingdir)#
#cleanup
#for each_filter_file in files_to_filter:
# os.remove(each_filter_file)
#os.chdir(startingdir)
#cleanup
#os.chdir("output/")
#os.system("tar -cvf - combined_out/ 2>/dev/null | pigz -9 -p 24 > combined_perco.tar.gz")
#shutil.rmtree("combined_out/")
os.chdir(startingdir)
else: #This assumes =="F", and we'll run percolator ONCE on the AGGREGATE DATA.
#2. Run crux percolator for the aggregated data
#3. Read in the outputs [psms:[targets, decoys],peptides:[targets,decoys] as a single dataframes, and then split them by file and output them to proper folders]
#4. Copy the XML files and other outputs to each folder for completeness.
all_max_charge,all_min_charge=find_charge_range(onlyfiles)
print "Max charge is {0} and min charge is {1}".format(all_max_charge,all_min_charge)
os.mkdir("output/combined_out")
first_file=True
with open("output/combined_out/combined_input.pin",'wb') as pin_writer:
#print run_dict_reverse,"new"
#print onlyfiles,"old"
for each in onlyfiles:
line_ctr=0
with open(folder+"/"+each,'rb') as pin_reader:
peptide_index=0
index_ctr=0
expmass_index=0
specID_index=0
scan_index=0
label_index=0
charge_indicies={}
charges=[]
pin_lines=[]
for eachline in pin_reader:
if line_ctr==0:
header=eachline.split("\t")
for each_item in header:
if each_item == "Peptide":
peptide_index=index_ctr
elif each_item == "SpecId":
specID_index=index_ctr
elif each_item == "ScanNr":
scan_index=index_ctr
elif each_item == "ExpMass":
expmass_index = index_ctr
elif each_item == "Label":
label_index=index_ctr
elif "Charge" in each_item:
charge_indicies[each_item]=index_ctr
charges.append(int(each_item.replace("Charge","")))
index_ctr+=1
else:
break
line_ctr=0
charge_index_max=max(charge_indicies.values())
charge_index_min=min(charge_indicies.values())
charge_max=max(charges)
charge_min=min(charges)
add_before=[]
add_after=[]
add_header_before=[]
add_header_after=[]
add_direction_before=[]
add_direction_after=[]
if charge_min>all_min_charge:
for i in xrange(all_min_charge,charge_min):
add_before.append("0")
add_header_before.append("Charge{0}".format(i))
add_direction_before.append("0")
if charge_max<all_max_charge:
for i in xrange(charge_max+1,all_charge_max+1):
add_after.append("0")
add_header_after.append("Charge{0}".format(i))
add_direction_after.append("0")
pin_reader.seek(0)
for each_line in pin_reader:
if line_ctr <2 and first_file:
if line_ctr==0:
front_half=each_line.split("\t")[:charge_index_min]
charges=each_line.split("\t")[charge_index_min:charge_index_max+1]
back_half=each_line.split("\t")[charge_index_max+1:]
#After this segment, ensure that line is split properly, add charges missing into the charges middle section, and then re-concat them back together and replace each_line
front_half.extend(add_header_before)
front_half.extend(charges)
front_half.extend(add_header_after)
front_half.extend(back_half)
each_line="\t".join(front_half)
else:
front_half=each_line.split("\t")[:charge_index_min]
charges=each_line.split("\t")[charge_index_min:charge_index_max+1]
back_half=each_line.split("\t")[charge_index_max+1:]
#After this segment, ensure that line is split properly, add charges missing into the charges middle section, and then re-concat them back together and replace each_line
front_half.extend(add_direction_before)
front_half.extend(charges)
front_half.extend(add_direction_after)
front_half.extend(back_half)
each_line="\t".join(front_half)
print each_line#################################################
pin_writer.write(each_line)
elif line_ctr >=2:
split_line=each_line.split("\t")
front_half=each_line.split("\t")[:charge_index_min]
charges=each_line.split("\t")[charge_index_min:charge_index_max+1]
back_half=each_line.split("\t")[charge_index_max+1:]
#After this segment, ensure that line is split properly, add charges missing into the charges middle section, and then re-concat them back together and replace each_line
front_half.extend(add_before)
front_half.extend(charges)
front_half.extend(add_after)
front_half.extend(back_half)
each_line="\t".join(front_half)
#each_line="\t".join(split_line)
pin_writer.write(each_line)
if line_ctr == 1 and first_file:
first_file=False #no more headers!
line_ctr+=1
#We only want the header from the first file, top two lines. Everything else we should just write in...
#Then we'll execute percolator on the aggregate data
#Then we'll split it and divy it up between all the different folders per file input
os.mkdir("output/"+each+"_out") # We'll make the folders into which we'll end up sticking the final split outputs
os.rename(folder+"/"+each,"output/"+each+"_out/"+each) #Move each separate pin file into its own folder, too
cmd=arguments[:]
cmd.append("--fileroot")
cmd.append("combined_analysis")
cmd.append("combined_input.pin")
os.chdir("output/combined_out")
print "Running....",cmd
percolator_process=subprocess.Popen(cmd)
percolator_process.wait()
combined_out_folder=os.getcwd()
files_to_filter=['combined_analysis.percolator.decoy.peptides.txt','combined_analysis.percolator.target.peptides.txt','combined_analysis.percolator.target.psms.txt','combined_analysis.percolator.decoy.psms.txt']
for each in run_dict_reverse:
os.chdir(combined_out_folder)
shutil.copytree("crux-output",startingdir+"/output/"+each.replace(".mzML",".pin")+"_out/crux-output")
os.chdir(startingdir+"/output/"+each.replace(".mzML",".pin")+"_out/crux-output")
#Now we'll load each of the following files and filter them out, and rename them....
for each_filter_file in files_to_filter:
print "Reading file {0}".format(each_filter_file)
this_pin_df=pandas.read_csv(each_filter_file,sep='\t')
this_pin_df=this_pin_df[this_pin_df['file_idx']==run_dict_reverse[each.rsplit(".",1)[0]+".mzML"]]
this_pin_df.to_csv(each.rsplit(".",1)[0]+'.'+'.'.join(each_filter_file.rsplit(".",4)[1:]),sep='\t',index=False)
os.remove(each_filter_file)
os.chdir(startingdir)
#cleanup
os.chdir("output/")
os.system("tar -cvf - combined_out/ 2>/dev/null | pigz -9 -p 24 > combined_perco.tar.gz")
shutil.rmtree("combined_out/")
os.chdir(startingdir)
#sys.exit(2)
#First, we'll combine all the pin files together.
#for each in onlyfiles:
for neweach in run_dict_reverse:
each=neweach.replace(".mzML",".pin")
if os.path.exists("output/"+each+"_out/"+each):
pass #it already exists!
else:
#Let's go find the pin data...
#first, what run group is this in?
this_group=run_to_group_dict[neweach]
if not os.path.isdir("output/"+each+"_out/"):
os.mkdir("output/"+each+"_out/")
with open("output/"+each+"_out/"+each,'wb') as filewriter:
with open("output/"+this_group+".pin_out/"+this_group+".pin",'rb') as filereader:
linectr=0
peptide_index=0
index_ctr=0
expmass_index=0
specID_index=0
scan_index=0
label_index=0
charge_indicies={}
pin_lines=[]
for eachline in filereader:
if linectr==0:
header=eachline.split("\t")
for each_item in header:
if each_item == "Peptide":
peptide_index=index_ctr
elif each_item == "SpecId":
specID_index=index_ctr
elif each_item == "ScanNr":
scan_index=index_ctr
elif each_item == "ExpMass":
expmass_index = index_ctr
elif each_item == "Label":
label_index=index_ctr
elif "Charge" in each_item:
charge_indicies[each_item]=index_ctr
index_ctr+=1
#filewriter.write("SpecId\tlabel\tScanNr\tPeptide\tcharge\n")
filewriter.write(eachline)
elif linectr==1:
filewriter.write(eachline)
else:
#print eachline[specID_index]
if eachline.split("\t")[specID_index].split("_")[1]==str(run_dict_reverse[neweach]):
filewriter.write(eachline)
linectr+=1
# os.system("tar -cvf - combined_out/ 2>/dev/null | pigz -9 -p 24 > combined_perco.tar.gz")
for each in onlyfiles:
#if each not in [x.replace(".mzML",".pin") for x in run_dict_reverse.keys()]:
if each.replace(".pin",".mzML") not in run_dict_reverse:
#If it's not there, then we're going to mask the pin file into a tar.gz
os.system("tar -cvf - output/{0}_out/{0} 2>/dev/null | pigz -9 -p 24 > output/{1}.tar.gz".format(each,each.replace(".pin","")))
shutil.rmtree("output/{0}".format(each+"_out"))
#os.mkdir("output/"+each+"_out")
#os.rename(folder+"/"+each,"output/"+each+"_out/"+each)
#os.chdir("output/"+each+"_out")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
################ To prepare for percolator file output, we're going to read in the percolator pin files
################ Once they're read in, we'll have to generate a unique ID of file_idx+"_"+scan to merge the pin sequence on
################ We're only going to bring in the sequence from the percolator input file.
################ ===== As of crux version 3.1, we will also begin to fix the charge column... We also correct the spectral m/z column
os.chdir(startingdir)
pin_list=[]
spectral_expmz={}
#for each in onlyfiles:
for neweach in run_dict_reverse:
each=neweach.replace(".mzML",".pin")
os.chdir("output/"+each+"_out")
with open("temp.tsv",'wb') as filewriter:
with open(each,'rb') as filereader:
linectr=0
peptide_index=0
index_ctr=0
expmass_index=0
specID_index=0
scan_index=0
label_index=0
charge_indicies={}
pin_lines=[]
for eachline in filereader:
if linectr==0:
header=eachline.split("\t")
for each_item in header:
if each_item == "Peptide":
peptide_index=index_ctr
elif each_item == "SpecId":
specID_index=index_ctr
elif each_item == "ScanNr":
scan_index=index_ctr
elif each_item == "ExpMass":
expmass_index = index_ctr
elif each_item == "Label":
label_index=index_ctr
elif "Charge" in each_item:
charge_indicies[each_item]=index_ctr
index_ctr+=1
filewriter.write("SpecId\tlabel\tScanNr\tPeptide\tcharge\n")
elif linectr==1:
pass
else:
thisline=eachline.split("\t")
peptide = thisline[peptide_index].split(".",1)[1]
peptide = peptide.rsplit(".",1)[0]
this_run=run_dict[thisline[specID_index].rsplit("_",4)[1]].rsplit(".",1)[0]
charge=0
for each_charge in charge_indicies:
if thisline[charge_indicies[each_charge]]=="1":
charge=each_charge.replace("Charge","")
if charge == 0 or charge =="0":
print "We couldnt find a charge for "+str(thisline[specID_index])+" so its defaulting to one! This should raise suspicions!"
charge="1"
if this_run not in spectral_expmz:
spectral_expmz[this_run]={}
#print thisline,"charge is",charge
spectral_expmz[this_run][int(thisline[scan_index])]=str((float(thisline[expmass_index])+((float(charge)-1)*proton_mass))/float(charge))
filewriter.write(thisline[specID_index]+"\t"+thisline[label_index]+"\t"+thisline[scan_index]+"\t"+peptide+"\t"+str(charge)+"\n")
linectr+=1
new_in=pandas.read_csv("temp.tsv",sep='\t')
os.remove("temp.tsv")
#print new_in,"new csv..."
#new_in['label']=new_in['label'].astype(int)
new_in=new_in[new_in['label']==1]
pin_list.append(new_in)
os.chdir(startingdir)
print "Done fixing pin files..."
#target_1_58_58_2
def makeUniqueID(x):
return x['SpecId'].split("_")[1]+"_"+str(x['ScanNr'])
def makeUniqueIDpsms(x):
return str(x['file_idx'])+"_"+str(x['scan'])
pin_megaframe=pandas.concat(pin_list)
unique_names=pin_megaframe.apply(makeUniqueID,axis=1)
pin_megaframe['unique_name']=unique_names
pin_megaframe.drop('SpecId',axis=1,inplace=True)
pin_megaframe.drop('ScanNr',axis=1,inplace=True)
pin_megaframe.drop('label',axis=1,inplace=True)
os.chdir(startingdir)
#for each in onlyfiles:
for neweach in run_dict_reverse:
each=neweach.replace(".mzML",".pin")
os.chdir("output/"+each+"_out")
target_psms=[ f for f in listdir("crux-output/") if (isfile(join("crux-output/",f)) and "target.psms.txt" in f)]
decoy_psms=[ f for f in listdir("crux-output/") if (isfile(join("crux-output/",f)) and "decoy.psms.txt" in f)]
#target_peptides=[ f for f in listdir("crux-output/") if (isfile(join("crux-output/",f)) and "target.peptides.txt" in f)]
#decoy_peptides=[ f for f in listdir("crux-output/") if (isfile(join("crux-output/",f)) and "decoy.peptides.txt" in f)]
#os.chdir("crux-output")
corrected=[]
for eachfile in target_psms:
print "About to read in ",eachfile
df=pandas.read_csv("crux-output/"+eachfile,sep='\t')
df['file_idx']=df['file_idx'].astype(int)
#print df,"this is the dataframe! <--------------------------"
fileset=set(df['file_idx'])
for eachone in fileset:
eachint=int(eachone)
mask=df[df['file_idx'] == eachone]
mask['file']=run_dict[str(eachone)]
#print run_dict[str(eachone)]
corrected.append(mask)
#print mask,"this is mask for",str(eachone)
#print "I was handling ",each
#print corrected,"the frame itself"
corrected_df=pandas.concat(corrected)
corrected_df.drop('charge',axis=1,inplace=True)
unique_names_psms=corrected_df.apply(makeUniqueIDpsms,axis=1)
corrected_df['unique_name']=unique_names_psms
merged_df=corrected_df.merge(pin_megaframe,how='inner',on='unique_name')#
corrected_df=merged_df
corrected_df.drop('unique_name',axis=1,inplace=True)
corrected_df.drop('sequence',axis=1,inplace=True)
corrected_df.rename(columns={'Peptide':'sequence'},inplace=True)
#Fixing precursor spectral m/z for targets
corrected_df['scan']=corrected_df['scan'].astype(int)
for index,each_group in corrected_df.groupby('file'):
corrected_df.loc[corrected_df['file']==index,'spectrum precursor m/z']=corrected_df.loc[corrected_df['file']==index,'scan'].map(spectral_expmz[index.rsplit(".",1)[0]])
#HERE WE CAN MAKE UNIQUE IDS
#AND THEN DO A LEFT JOIN TO TAKE IN THE OLD (good) SEQUENCES
#AND THEN GET RID OF THE BAD SEQUENCES AND REPLACE THEM WITH THE GOOD
#SEQUENCES.
corrected_df.to_csv("crux-output/"+eachfile,sep='\t',index=False)
corrected_decoys=[]
for eachfile in decoy_psms:
df=pandas.read_csv("crux-output/"+eachfile,sep='\t')
df['file_idx']=df['file_idx'].astype(int)
#print df,"this is the dataframe!"
fileset=set(df['file_idx'])
for eachone in fileset:
eachint=int(eachone)
mask=df[df['file_idx'] == eachone]
mask['file']=run_dict[str(eachone)]
corrected_decoys.append(mask)
#print mask,"this is mask for",str(eachone)
#Fixing precursor spectral m/z for decoys
corrected_df['scan']=corrected_df['scan'].astype(int)
for index,each_group in corrected_df.groupby('file'):
corrected_df.loc[corrected_df['file']==index,'spectrum precursor m/z']=corrected_df.loc[corrected_df['file']==index,'scan'].map(spectral_expmz[index.rsplit(".",1)[0]])
corrected_df=pandas.concat(corrected_decoys)
corrected_df.to_csv("crux-output/"+eachfile,sep='\t',index=False)
#this_run=
#onlyfiles = [ f for f in listdir(folder) if (isfile(join(folder,f)) and ".txt" in f)]
os.chdir(startingdir)
|
import json
def error(e, msg="", ex=False) :
print("ERROR {} : {}".format(msg, e))
if ex : exit()
def loads(data, cry=True) :
data = None
try :
data = json.loads(data)
if cry : print("LOADS SUCCESS.")
except Exception as e :
error(e, "LOADS")
finally :
return data
def dumps(data, cry=True) :
data = None
try :
data = json.dumps(data)
if cry : print("DUMPS SUCCESS.")
except Exception as e :
error(e, "DUMPS")
finally :
return data
def load(cpath, cry=True, ex=False) :
data = None
try :
with open(cpath, 'r') as openfile :
data = json.load(openfile)
openfile.close()
if cry : print("LOAD SUCCESS FROM [ {} ]".format(cpath))
except UnicodeDecodeError as ude :
with open(cpath, 'r', encoding="UTF8") as openfile :
data = json.load(openfile)
openfile.close()
except Exception as e :
error(e, "LOAD JSON FROM [ {} ]".format(cpath), ex)
finally :
return data
def save(cpath, data, cry=True, ex=False) :
try :
with open(cpath, 'w') as openfile :
json.dump(data, openfile)
openfile.close()
if cry : print("SAVE SUCCESS TO [ {} ]".format(cpath))
except Exception as e :
error(e, "SAVE JSON TO [ {} ]".format(cpath), ex)
|
import torch.nn as nn
import math
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, retain_activation=True, activation='ReLU'):
super(ConvBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels)
)
if retain_activation:
if activation == 'ReLU':
self.block.add_module("ReLU", nn.ReLU(inplace=True))
elif activation == 'LeakyReLU':
self.block.add_module("LeakyReLU", nn.LeakyReLU(0.1))
elif activation == 'Softplus':
self.block.add_module("Softplus", nn.Softplus())
self.block.add_module("MaxPool2d", nn.MaxPool2d(kernel_size=2, stride=2, padding=0))
def forward(self, x):
out = self.block(x)
return out
class ProtoNetEmbedding(nn.Module):
'''
Model as described in the reference paper,
source: https://github.com/jakesnell/prototypical-networks/blob/f0c48808e496989d01db59f86d4449d7aee9ab0c/protonets/models/few_shot.py#L62-L84
'''
def __init__(self, x_dim=3, h_dim=64, z_dim=64, retain_last_activation=True, activation='ReLU', normalize=True):
super(ProtoNetEmbedding, self).__init__()
self.encoder = nn.Sequential(
ConvBlock(x_dim, h_dim, activation=activation),
ConvBlock(h_dim, h_dim, activation=activation),
ConvBlock(h_dim, h_dim, activation=activation),
ConvBlock(h_dim, z_dim, retain_activation=retain_last_activation, activation=activation),
)
self.fc = nn.Linear(1600, 512)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.normalize = normalize
def forward(self, x):
x = self.encoder(x)
if self.normalize:
return nn.functional.normalize(self.fc(x.view(x.size(0), -1)), p=2.0, dim=1)
else:
return self.fc(x.view(x.size(0), -1))
|
import sys
LOCATIONS = "abcdefg"
VALID_DIGITS = {
2: [{"c", "f"}],
3: [{"a", "c", "f"}],
4: [{"b", "c", "d", "f"}],
5: [
{"a", "c", "d", "e", "g"},
{"a", "c", "d", "f", "g"},
{"a", "b", "d", "f", "g"},
],
6: [
{"a", "b", "d", "e", "f", "g"},
{"a", "b", "c", "e", "f", "g"},
{"a", "b", "c", "d", "f", "g"},
],
7: [{"a", "b", "c", "d", "e", "f", "g"}],
}
DIGIT_LOOKUP = {
"abcefg": 0,
"cf": 1,
"acdeg": 2,
"acdfg": 3,
"bcdf": 4,
"abdfg": 5,
"abdefg": 6,
"acf": 7,
"abcdefg": 8,
"abcdfg": 9,
}
def load_data(path):
def process_line(line):
return [x.split(" ") for x in line.strip().split(" | ")]
with open(path) as f:
return [process_line(line) for line in f.readlines()]
def part_1(data):
return sum(len(x) in {2, 3, 4, 7} for _, output in data for x in output)
def check_valid(mapping, numbers):
for num in numbers:
digits = {mapping[d] for d in num if d in mapping}
if not any(digits <= valid for valid in VALID_DIGITS[len(num)]):
return False
return True
def map_segments(numbers):
mapping = {}
def backtrack(i=0):
if i == 7:
return mapping
loc = LOCATIONS[i]
available = set(LOCATIONS) - set(mapping.values())
for candidate in available:
mapping[loc] = candidate
if check_valid(mapping, numbers):
ret = backtrack(i + 1)
if ret is not None:
return ret
del mapping[loc]
return backtrack()
def part_2(data):
total = 0
for numbers, outputs in data:
mapping = map_segments(numbers)
output = 0
for o in outputs:
output = (
output * 10
+ DIGIT_LOOKUP["".join(sorted(mapping[d] for d in o))]
)
total += output
return total
if __name__ == "__main__":
data = load_data(sys.argv[1])
print(f"Part 1: {part_1(data)}")
print(f"Part 2: {part_2(data)}")
|
import cv2
images=cv2.imread('rumahkita3.jpeg')
resize=cv2.resize(images,(824,464))
cv2.imwrite('rumahkita3-resized.jpeg',resize)
|
#!/usr/bin/python3
import sys
import funcs
import json
import auth
def main():
while True:
args = input( "-> " )
splitters = [x.strip() for x in args.split( ' ' ) ]
print( splitters )
fname = splitters[:-1]
try:
arg = json.loads(splitters[-1])
print( str(funcs.parse(fname, arg)))
except Exception as e:
print( e )
if (__name__=="__main__"):
main()
|
#!/usr/bin/env python
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
import tempfile
import unittest
from nuclide_certificates_generator import NuclideCertificatesGenerator
from utils import write_resource_to_file
class NuclideCertificatesGeneratorTest(unittest.TestCase):
def setUp(self):
temp_dir = tempfile.mkdtemp()
NuclideCertificatesGenerator.openssl_cnf = write_resource_to_file('openssl.cnf', temp_dir)
def verify_key_file(self, key_file):
with open(key_file, "r") as f:
text = f.read()
self.assertTrue('BEGIN RSA PRIVATE KEY' in text) # nolint
self.assertTrue('END RSA PRIVATE KEY' in text)
def verify_cert_file(self, cert_file):
with open(cert_file, "r") as f:
text = f.read()
self.assertTrue('BEGIN CERTIFICATE' in text)
self.assertTrue('END CERTIFICATE' in text)
def test_cert_gen(self):
gen = NuclideCertificatesGenerator(tempfile.gettempdir(), 'localhost', 'test')
self.verify_key_file(gen.ca_key)
self.verify_cert_file(gen.ca_cert)
self.verify_key_file(gen.server_key)
self.verify_cert_file(gen.server_cert)
self.verify_key_file(gen.client_key)
self.verify_cert_file(gen.client_cert)
self.assertEquals(
'localhost', NuclideCertificatesGenerator.get_common_name(gen.server_cert))
# Test Subject Alternative Name.
def test_altnames(self):
gen = NuclideCertificatesGenerator(tempfile.gettempdir(), '127.0.0.1', 'test')
text = NuclideCertificatesGenerator.get_text(gen.server_cert)
self.assertTrue('Subject Alternative Name' in text)
self.assertTrue('IP Address:127.0.0.1' in text)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Implementation of Neighbor Based Node Embeddings (NBNE).
Call function with a networkx graph:
>>> train_model(graph, num_permutations, output_filename)
For details read the paper:
Fast Node Embeddings: Learning Ego-Centric Representations
Tiago Pimentel, Adriano Veloso and Nivio Ziviani
ICLR, 2018
'''
import random
import logging
import gensim
class MySentences(object):
def __init__(self, graph, num_permutations, window_size, min_degree=0):
self.graph = graph
self.min_degree = min_degree
self.num_permutations = num_permutations
self.window_size = window_size
def __iter__(self):
for i in range(self.num_permutations):
for src_id in self.graph.nodes():
for sentence in self.get_nodes_senteces(src_id):
yield sentence
def get_nodes_senteces(self, src_id):
src_node = str(src_id)
neighbors = list(self.graph.neighbors(src_id))
if len(neighbors) < self.min_degree:
return
# Get all connected edges
out_nodes = [str(out_id) for out_id in neighbors]
random.shuffle(out_nodes)
for j in range(len(out_nodes))[::self.window_size]:
start_index = min(j, len(out_nodes) - self.window_size)
end_index = start_index + self.window_size
nodes_sentence = [src_node] + out_nodes[start_index: end_index]
yield nodes_sentence
def train_model(graph, num_permutations, output_file=None, embedding_dimension=128, window_size=5, min_count=0, min_degree=0, workers=8):
sentences = MySentences(graph, num_permutations, window_size, min_degree)
model = gensim.models.Word2Vec(sentences, min_count=min_count * num_permutations, size=embedding_dimension, window=window_size, sg=1, workers=workers)
if output_file is not None:
model.wv.save_word2vec_format(output_file)
return model
def build_vocab(graph):
dict_sent = []
for node in graph.nodes():
dict_sent += [[str(node)]]
dict_sentences = gensim.corpora.Dictionary(dict_sent)
return dict_sentences
# Make training verbose
def verbose_training():
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2018 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
import threading
import bpy
import appleseed as asr
from .renderercontroller import FinalRendererController, InteractiveRendererController
from .tilecallbacks import FinalTileCallback
from ..logger import get_logger
from ..translators.preview import PreviewRenderer
from ..translators.scene import SceneTranslator
from ..utils.util import safe_register_class, safe_unregister_class
logger = get_logger()
class RenderThread(threading.Thread):
def __init__(self, renderer):
super(RenderThread, self).__init__()
self.__renderer = renderer
def run(self):
self.__renderer.render()
class SetAppleseedLogLevel(object):
mapping = {'debug': asr.LogMessageCategory.Debug,
'info': asr.LogMessageCategory.Info,
'warning': asr.LogMessageCategory.Warning,
'error': asr.LogMessageCategory.Error,
'fatal': asr.LogMessageCategory.Fatal}
def __init__(self, new_level):
self.__new_level = self.mapping[new_level]
def __enter__(self):
self.__saved_level = asr.global_logger().get_verbosity_level()
asr.global_logger().set_verbosity_level(self.__new_level)
def __exit__(self, type, value, traceback):
asr.global_logger().set_verbosity_level(self.__saved_level)
class RenderAppleseed(bpy.types.RenderEngine):
bl_idname = 'APPLESEED_RENDER'
bl_label = 'appleseed'
bl_use_preview = True
# True if we are doing interactive rendering.
__interactive_session = False
#
# Constructor.
#
def __init__(self):
logger.debug("Creating render engine")
# Common for all rendering modes.
self.__renderer = None
self.__renderer_controller = None
self.__tile_callback = None
self.__render_thread = None
# Interactive rendering.
self.__interactive_scene_translator = None
self.__is_interactive = False
#
# Destructor.
#
def __del__(self):
self.__stop_rendering()
# Sometimes __is_interactive does not exist, not sure why.
try:
if self.__is_interactive:
RenderAppleseed.__interactive_session = False
except:
pass
logger.debug("Deleting render engine")
#
# RenderEngine methods.
#
def render(self, scene):
if self.is_preview:
if bpy.app.background: # Can this happen?
return
# Disable material previews if we are doing an interactive render.
if not RenderAppleseed.__interactive_session:
level = 'error'
with SetAppleseedLogLevel(level):
self.__render_material_preview(scene)
else:
level = scene.appleseed.log_level
with SetAppleseedLogLevel(level):
self.__add_render_passes(scene)
self.__render_final(scene)
def view_update(self, context):
if self.__interactive_scene_translator is None:
self.__start_interactive_render(context)
else:
self.__pause_rendering()
logger.debug("Updating scene")
self.__interactive_scene_translator.update_scene(context.scene, context)
self.__restart_interactive_render()
def view_draw(self, context):
self.__draw_pixels(context)
# Check if view has changed.
view_update, cam_param_update, cam_translate_update = self.__interactive_scene_translator.check_view(context)
if view_update or cam_param_update or cam_translate_update:
self.__pause_rendering()
logger.debug("Updating view")
self.__interactive_scene_translator.update_view(view_update, cam_param_update)
self.__restart_interactive_render()
def update_render_passes(self, scene=None, renderlayer=None):
asr_scene_props = scene.appleseed
if not self.is_preview:
self.register_pass(scene, renderlayer, "Combined", 4, "RGBA", 'COLOR')
if asr_scene_props.diffuse_aov:
self.register_pass(scene, renderlayer, "Diffuse", 4, "RGBA", 'COLOR')
if asr_scene_props.screen_space_velocity_aov:
self.register_pass(scene, renderlayer, "Screen Space Velocity", 3, "RGB", 'COLOR')
if asr_scene_props.direct_diffuse_aov:
self.register_pass(scene, renderlayer, "Direct Diffuse", 4, "RGBA", 'COLOR')
if asr_scene_props.indirect_diffuse_aov:
self.register_pass(scene, renderlayer, "Indirect Diffuse", 4, "RGBA", 'COLOR')
if asr_scene_props.glossy_aov:
self.register_pass(scene, renderlayer, "Glossy", 4, "RGBA", 'COLOR')
if asr_scene_props.direct_glossy_aov:
self.register_pass(scene, renderlayer, "Direct Glossy", 4, "RGBA", 'COLOR')
if asr_scene_props.indirect_glossy_aov:
self.register_pass(scene, renderlayer, "Indirect Glossy", 4, "RGBA", 'COLOR')
if asr_scene_props.albedo_aov:
self.register_pass(scene, renderlayer, "Albedo", 4, "RGBA", 'COLOR')
if asr_scene_props.emission_aov:
self.register_pass(scene, renderlayer, "Emission", 4, "RGBA", 'COLOR')
if asr_scene_props.npr_shading_aov:
self.register_pass(scene, renderlayer, "NPR Shading", 4, "RGBA", 'COLOR')
if asr_scene_props.npr_contour_aov:
self.register_pass(scene, renderlayer, "NPR Contour", 4, "RGBA", 'COLOR')
if asr_scene_props.normal_aov:
self.register_pass(scene, renderlayer, "Normal", 3, "RGB", 'VECTOR')
if asr_scene_props.position_aov:
self.register_pass(scene, renderlayer, "Position", 3, "RGB", 'VECTOR')
if asr_scene_props.uv_aov:
self.register_pass(scene, renderlayer, "UV", 3, "RGB", 'VECTOR')
if asr_scene_props.depth_aov:
self.register_pass(scene, renderlayer, "Z Depth", 1, "Z", 'VALUE')
if asr_scene_props.pixel_time_aov:
self.register_pass(scene, renderlayer, "Pixel Time", 3, "RGB", "VECTOR")
if asr_scene_props.invalid_samples_aov:
self.register_pass(scene, renderlayer, "Invalid Samples", 3, "RGB", "VECTOR")
if asr_scene_props.pixel_sample_count_aov:
self.register_pass(scene, renderlayer, "Pixel Sample Count", 3, "RGB", "VECTOR")
if asr_scene_props.pixel_variation_aov:
self.register_pass(scene, renderlayer, "Pixel Variation", 3, "RGB", "VECTOR")
#
# Internal methods.
#
def __render_material_preview(self, scene):
"""
Export and render the material preview scene.
"""
material_preview_renderer = PreviewRenderer()
material_preview_renderer.translate_preview(scene)
self.__start_final_render(scene, material_preview_renderer.as_project)
def __render_final(self, scene):
"""
Export and render the scene.
"""
scene_translator = SceneTranslator.create_final_render_translator(scene)
self.update_stats("appleseed Rendering: Translating scene", "")
scene_translator.translate_scene()
self.__start_final_render(scene, scene_translator.as_project)
def __start_final_render(self, scene, project):
"""
Start a final render.
"""
# Preconditions.
assert(self.__renderer is None)
assert(self.__renderer_controller is None)
assert(self.__tile_callback is None)
assert(self.__render_thread is None)
self.__tile_callback = FinalTileCallback(self, scene)
self.__renderer_controller = FinalRendererController(self, self.__tile_callback)
self.__renderer = asr.MasterRenderer(project,
project.configurations()['final'].get_inherited_parameters(),
[],
self.__renderer_controller,
self.__tile_callback)
self.__render_thread = RenderThread(self.__renderer)
# While debugging, log to the console. This should be configurable.
log_target = asr.ConsoleLogTarget(sys.stderr)
asr.global_logger().add_target(log_target)
# Start render thread and wait for it to finish.
self.__render_thread.start()
while self.__render_thread.isAlive():
self.__render_thread.join(0.5) # seconds
# Cleanup.
asr.global_logger().remove_target(log_target)
if scene.appleseed.denoise_mode == 'write_outputs':
project.get_frame().write_main_image(os.path.join(scene.appleseed.denoise_output_dir, "output.exr"))
self.__stop_rendering()
def __start_interactive_render(self, context):
"""
Start an interactive rendering session.
"""
# Preconditions.
assert(self.__interactive_scene_translator is None)
assert(self.__renderer is None)
assert(self.__renderer_controller is None)
assert(self.__tile_callback is None)
assert(self.__render_thread is None)
logger.debug("Starting interactive rendering")
self.__is_interactive = True
RenderAppleseed.__interactive_session = True
logger.debug("Translating scene for interactive rendering")
self.__interactive_scene_translator = SceneTranslator.create_interactive_render_translator(context)
self.__interactive_scene_translator.translate_scene()
self.__camera = self.__interactive_scene_translator.camera_translator
project = self.__interactive_scene_translator.as_project
self.__renderer_controller = InteractiveRendererController(self.__camera)
self.__tile_callback = asr.BlenderProgressiveTileCallback(self.tag_redraw)
self.__renderer = asr.MasterRenderer(project,
project.configurations()['interactive'].get_inherited_parameters(),
[],
self.__renderer_controller,
self.__tile_callback)
self.__restart_interactive_render()
def __restart_interactive_render(self):
"""
Restart the interactive renderer.
"""
logger.debug("Start rendering")
self.__renderer_controller.set_status(asr.IRenderControllerStatus.ContinueRendering)
self.__render_thread = RenderThread(self.__renderer)
self.__render_thread.start()
def __pause_rendering(self):
"""
Abort rendering if a render is in progress.
"""
# Signal appleseed to stop rendering.
logger.debug("Pause rendering")
try:
if self.__render_thread:
self.__renderer_controller.set_status(asr.IRenderControllerStatus.AbortRendering)
self.__render_thread.join()
except:
pass
self.__render_thread = None
def __stop_rendering(self):
"""
Abort rendering if a render is in progress and cleanup.
"""
# Signal appleseed to stop rendering.
logger.debug("Abort rendering")
try:
if self.__render_thread:
self.__renderer_controller.set_status(asr.IRenderControllerStatus.AbortRendering)
self.__render_thread.join()
except:
pass
# Cleanup.
self.__render_thread = None
self.__renderer = None
self.__renderer_controller = None
self.__tile_callback = None
def __draw_pixels(self, context):
"""
Draw rendered image in Blender's viewport.
"""
self.bind_display_space_shader(context.scene)
self.__tile_callback.draw_pixels()
self.unbind_display_space_shader()
def __add_render_passes(self, scene):
asr_scene_props = scene.appleseed
if asr_scene_props.screen_space_velocity_aov:
self.add_pass("Screen Space Velocity", 3, "RGB")
if asr_scene_props.diffuse_aov:
self.add_pass("Diffuse", 4, "RGBA")
if asr_scene_props.direct_diffuse_aov:
self.add_pass("Direct Diffuse", 4, "RGBA")
if asr_scene_props.indirect_diffuse_aov:
self.add_pass("Indirect Diffuse", 4, "RGBA")
if asr_scene_props.glossy_aov:
self.add_pass("Glossy", 4, "RGBA")
if asr_scene_props.direct_glossy_aov:
self.add_pass("Direct Glossy", 4, "RGBA")
if asr_scene_props.indirect_glossy_aov:
self.add_pass("Indirect Glossy", 4, "RGBA")
if asr_scene_props.normal_aov:
self.add_pass("Normal", 3, "RGB")
if asr_scene_props.position_aov:
self.add_pass("Position", 3, "RGB")
if asr_scene_props.uv_aov:
self.add_pass("UV", 3, "RGB")
if asr_scene_props.depth_aov:
self.add_pass("Z Depth", 1, "Z")
if asr_scene_props.pixel_time_aov:
self.add_pass("Pixel Time", 3, "RGB")
if asr_scene_props.invalid_samples_aov:
self.add_pass("Invalid Samples", 3, "RGB")
if asr_scene_props.pixel_sample_count_aov:
self.add_pass("Pixel Sample Count", 3, "RGB")
if asr_scene_props.pixel_variation_aov:
self.add_pass("Pixel Variation", 3, "RGB")
if asr_scene_props.albedo_aov:
self.add_pass("Albedo", 4, "RGBA")
if asr_scene_props.emission_aov:
self.add_pass("Emission", 4, "RGBA")
if asr_scene_props.npr_shading_aov:
self.add_pass("NPR Shading", 4, "RGBA")
if asr_scene_props.npr_contour_aov:
self.add_pass("NPR Contour", 4, "RGBA")
def register():
safe_register_class(RenderAppleseed)
def unregister():
safe_unregister_class(RenderAppleseed)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.AccountRecord import AccountRecord
class AlipayUserAccountSearchResponse(AlipayResponse):
def __init__(self):
super(AlipayUserAccountSearchResponse, self).__init__()
self._account_records = None
self._total_pages = None
self._total_results = None
@property
def account_records(self):
return self._account_records
@account_records.setter
def account_records(self, value):
if isinstance(value, list):
self._account_records = list()
for i in value:
if isinstance(i, AccountRecord):
self._account_records.append(i)
else:
self._account_records.append(AccountRecord.from_alipay_dict(i))
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
@property
def total_results(self):
return self._total_results
@total_results.setter
def total_results(self, value):
self._total_results = value
def parse_response_content(self, response_content):
response = super(AlipayUserAccountSearchResponse, self).parse_response_content(response_content)
if 'account_records' in response:
self.account_records = response['account_records']
if 'total_pages' in response:
self.total_pages = response['total_pages']
if 'total_results' in response:
self.total_results = response['total_results']
|
from os.path import join, dirname
from setuptools import setup
setup(
name = 'xmppgcm',
packages = ['xmppgcm'], # this must be the same as the name above
version = '0.2.4',
description = 'Client Library for Firebase Cloud Messaging using XMPP',
long_description = open(join(dirname(__file__), 'README.txt')).read(),
install_requires=['sleekxmpp',],
author = 'Winster Jose',
author_email = 'wtjose@gmail.com',
url = 'https://github.com/winster/xmppgcm',
keywords = ['gcm', 'fcm', 'xmpp', 'xmppgcm', 'xmppfcm'], # arbitrary keywords
classifiers = [],
)
|
input = """
p6|p9|p6|not_p22:-p19,p23.
p2|p24|p15:-not p23.
p1|p12|p2|not_p16:-not p23,not p3.
p3|p20|p18:-p1,not p22.
p3|not_p20|p18:-p13,not p22,not p14.
p15|p4|p3|p1:-p12,p5.
p3|p20|p18|p3:-p1,not p22.
not_p2|not_p24|p9:-p1,p12,not p11.
p15|p11|p24|p3.
not_p4:-not p4.
not_p23|p7|p21|p5:-p13,not p3.
p1|p15|p15|not_p18:-not p5,p24.
p12|not_p15|p16|p17:-p6,p1.
p15|p6:-p19,p5,not p3,not p7.
p24|p9|p24|not_p22:-p12,p23.
p21|p22|p11|p25|not_p20:-p17.
p17|p6|not_p19|not_p8.
p5|p11|p25|not_p20:-p17,not p7.
p12|not_p15|p16|p18:-p9,p1.
:-p16,not_p16.
"""
output = """
{p24, p1, p3, not_p4, not_p8}
{p2, p3, not_p4, not_p8}
{p24, p1, p3, not_p4, not_p19}
{p2, p3, not_p4, not_p19}
{p24, p1, p3, not_p4, p16, p6}
{p24, p1, p3, not_p4, not_p15, p6}
{p24, p1, p12, p3, p9, not_p4, p6}
{p24, p1, p12, p3, not_p24, not_p4, p6}
{p24, p1, p12, p3, not_p2, not_p4, p6}
{p2, p3, not_p4, p6}
{p24, p1, p3, not_p4, p17, not_p20}
{p24, p1, p3, not_p4, p17, p25}
{p24, p1, p3, p11, not_p4, p17}
{p2, p3, not_p4, p17, not_p20}
{p2, p3, not_p4, p17, p25}
{p2, p3, p5, not_p4, p17, p21}
{p2, p3, p22, p5, not_p4, p17}
{p15, not_p16, not_p4, not_p8}
{p15, not_p16, not_p4, not_p19}
{p15, not_p16, not_p4, p6}
{p15, not_p16, not_p4, p17, not_p20}
{p15, not_p16, not_p4, p17, p25}
{p15, not_p16, p11, not_p4, p17}
{p15, not_p16, p5, not_p4, p17, p21}
{p15, not_p16, p22, p5, not_p4, p17}
{p2, p15, not_p4, not_p8}
{p15, p12, not_p4, not_p8}
{p15, p1, p18, not_p4, not_p8}
{p15, p1, p20, not_p4, not_p8}
{p2, p15, not_p4, not_p19}
{p15, p12, not_p4, not_p19}
{p15, p1, p18, not_p4, not_p19}
{p15, p1, p20, not_p4, not_p19}
{p2, p15, not_p4, p6}
{p15, p12, not_p4, p6}
{p15, p1, p18, not_p4, p16, p6}
{p15, p1, p18, not_p4, not_p15, p6}
{p15, p1, p20, not_p4, p16, p6}
{p15, p1, p20, not_p4, not_p15, p6}
{p2, p15, not_p4, p17, not_p20}
{p2, p15, not_p4, p17, p25}
{p2, p15, p5, not_p4, p17, p21}
{p2, p15, p22, p5, not_p4, p17}
{p15, p12, not_p4, p17, not_p20}
{p15, p1, p18, not_p4, p17, not_p20}
{p15, p1, p20, not_p4, p17, not_p20}
{p15, p12, not_p4, p17, p25}
{p15, p1, p18, not_p4, p17, p25}
{p15, p1, p20, not_p4, p17, p25}
{p15, p12, p11, not_p4, p17}
{p15, p1, p18, p11, not_p4, p17}
{p15, p1, p20, p11, not_p4, p17}
{p15, p12, p5, not_p4, p17, p21}
{p15, p12, p22, p5, not_p4, p17}
{p15, p1, p22, p5, not_p4, p17}
{p15, p1, p18, p5, not_p4, p17, p21}
{p15, p1, p20, p5, not_p4, p17, p21}
{p24, not_p16, not_p4, not_p18, not_p8}
{p2, p11, not_p4, not_p8}
{p2, p24, not_p4, not_p18, not_p8}
{p24, p12, not_p4, not_p18, not_p8}
{p24, p1, p18, not_p4, not_p8}
{p24, p1, p20, not_p4, not_p8}
{p24, not_p16, not_p4, not_p18, not_p19}
{p2, p11, not_p4, not_p19}
{p2, p24, not_p4, not_p18, not_p19}
{p24, p12, not_p4, not_p18, not_p19}
{p24, p1, p18, not_p4, not_p19}
{p24, p1, p20, not_p4, not_p19}
{p2, p11, not_p4, p6}
{p2, p11, not_p4, p17}
{p24, not_p16, p5, not_p4, p17, p21}
{p24, not_p16, p22, p5, not_p4, p17}
{p24, not_p16, not_p4, not_p18, p6}
{p24, not_p16, not_p4, not_p18, p17, not_p20}
{p24, not_p16, not_p4, not_p18, p17, p25}
{p24, not_p16, p11, not_p4, not_p18, p17}
{p2, p24, p5, not_p4, p17, p21}
{p2, p24, p22, p5, not_p4, p17}
{p2, p24, not_p4, not_p18, p6}
{p2, p24, not_p4, not_p18, p17, not_p20}
{p2, p24, not_p4, not_p18, p17, p25}
{p24, p1, p22, p5, not_p4, p17}
{p24, p1, p18, not_p4, p16, p6}
{p24, p1, p18, not_p4, not_p15, p6}
{p24, p1, p12, p18, p9, not_p4, p6}
{p24, p1, p12, p18, not_p24, not_p4, p6}
{p24, p1, p12, p18, not_p2, not_p4, p6}
{p24, p1, p20, not_p4, p16, p6}
{p24, p1, p20, not_p4, not_p15, p6}
{p24, p1, p12, p20, p9, not_p4, p6}
{p24, p1, p12, p20, not_p24, not_p4, p6}
{p24, p1, p12, p20, not_p2, not_p4, p6}
{p24, p1, p18, not_p4, p17, not_p20}
{p24, p1, p18, not_p4, p17, p25}
{p24, p1, p18, p11, not_p4, p17}
{p24, p1, p18, p5, not_p4, p17, p21}
{p24, p1, p20, not_p4, p17, not_p20}
{p24, p1, p20, not_p4, p17, p25}
{p24, p1, p20, p11, not_p4, p17}
{p24, p1, p20, p5, not_p4, p17, p21}
{p24, p12, p4, p5, p17, p21}
{p24, p12, p22, p4, p5, p17}
{p24, p12, not_p4, not_p18, p6}
{p24, p12, not_p4, not_p18, p17, not_p20}
{p24, p12, not_p4, not_p18, p17, p25}
{p24, p12, p11, not_p4, not_p18, p17}
"""
|
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
result = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == '1':
self.dfs(grid,i,j)
result += 1
return result
def dfs(self,grid,i,j):
if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[0]) or grid[i][j] == '0':
return
grid[i][j] = '0'
self.dfs(grid, i + 1, j)
self.dfs(grid, i-1, j)
self.dfs(grid, i, j+1)
self.dfs(grid, i , j-1)
|
from flask import Blueprint, render_template, request
from .database import db
from .model import Item
bp = Blueprint('admin',__name__)
@bp.route('/admin', methods=('GET','POST'))
def admin():
if request.method == 'POST':
id = request.form['delete']
Item.query.filter(Item.id==id).delete()
db.session.commit()
items = Item.query.all()
return render_template('admin.html',items = items)
|
# -*- coding: utf-8
from django.core.management.base import BaseCommand
from django.conf import settings
from django.core.exceptions import *
from apps.subject.models import Department, Professor, Lecture, Course, ClassTime, ExamTime
#from otl.apps.timetable.models import ClassTime, ExamTime, Syllabus
#from optparse import make_option
#import argparse
from datetime import time
import sys, getpass, re
#import Sybase
from scholardb_access import execute
import datetime
class Command(BaseCommand):
def add_arguments(self,parser): # from Django 1.10, we don't use OptionParser! So I changed it to ArgumentParser
parser.add_argument('--host', dest='host', help=u'Specifies server address.')
parser.add_argument('--port', dest='port', help=u'Specifies server port.')
parser.add_argument('--user', dest='user', help=u'Specifies user name to log in.')
parser.add_argument('--password', dest='password', help=u'Specifies passowrd to log in.')
parser.add_argument('--encoding', dest='encoding', help=u'Sepcifies character encoding to decode strings from database. (default is cp949)', default='cp949')
parser.add_argument('--exclude-lecture', action='store_true', dest='exclude_lecture', help=u'Don\'t update lecture information when you want to update time information only.', default=False)
parser.add_argument('--year', dest='year', type=int)
parser.add_argument('--semester', dest='semester', type=int)
help = u'Imports KAIST scholar database.'
args = u'--host=143.248.X.Y:PORT --user=USERNAME'
def handle(self, *args, **options):
rx_dept_code = re.compile(ur'([a-zA-Z]+)(\d+)')
host = options.get('host', None)
port = options.get('port', None)
user = options.get('user', None)
password = options.get('password', None)
encoding = options.get('encoding', 'cp949')
exclude_lecture = options.get('exclude_lecture', False)
lecture_count = 0
if options['year']!=None and options['semester']!=None:
next_year = int(options['year'])
next_semester = int(options['semester'])
else:
next_year = settings.CURRENT_YEAR
next_semester = settings.CURRENT_SEMESTER
try:
if password is None:
password = getpass.getpass()
except (KeyboardInterrupt, EOFError):
print
return
if not exclude_lecture:
query = 'SELECT * FROM view_OTL_charge WHERE lecture_year = %d AND lecture_term = %d' % (next_year, next_semester)
professors = execute(host, port, user, password, query)
query = 'SELECT * FROM view_OTL_lecture WHERE lecture_year = %d AND lecture_term = %d ORDER BY dept_id' % (next_year, next_semester)
rows = execute(host, port, user, password, query)
departments = {}
lectures_not_updated = set()
for lecture in Lecture.objects.filter(year=next_year, semester=next_semester):
lectures_not_updated.add(lecture.id)
# Make Staff Professor with ID 830
try:
staff_professor = Professor.objects.get(professor_id=830)
except Professor.DoesNotExist:
staff_professor = Professor.objects.create(professor_id=830)
staff_professor.professor_name = 'Staff'
staff_professor.professor_name_en = 'Staff'
staff_professor.save()
prev_department = None
for row in rows:
myrow = []
for elem in row:
if isinstance(elem, str):
try:
elem = elem.decode(encoding)
except UnicodeDecodeError:
elem = u'%s (???)' % row[20]
print>>sys.stderr, 'ERROR: parsing error on lecture %s' % row[20]
print>>sys.stderr, ' cannot read "%s" in cp949.' % elem
myrow.append(elem)
# Extract department info.
lecture_no = myrow[2]
lecture_code = myrow[20]
lecture_class_no = myrow[3].strip()
department_no = lecture_no[0:2]
department_id = int(myrow[4])
department_code = rx_dept_code.match(lecture_code).group(1)
# Update department info.
if prev_department != department_id:
new_flag = False
try:
department = Department.objects.get(id = department_id)
print 'Updating department: %s' % department
except Department.DoesNotExist:
department = Department(id = department_id)
new_flag = True
print 'Adding department: %s(%d)...' % (department_code, department_id)
department.num_id = department_no
department.code = department_code
department.name = myrow[5]
department.name_en = myrow[6]
department.save()
if new_flag:
departments = Department.objects.filter(code = department_code, visible=True)
for dept in departments:
if dept.id != department.id:
dept.visible = False
dept.save()
prev_department = department_id
# Extract lecture info.
#try:
#print 'Retreiving %s: %s [%s]...' % (lecture_code, myrow[7].encode('utf-8'), lecture_class_no)
#except UnicodeDecodeError:
#print 'Retreiving %s: ??? [%s]...' % (lecture_code, lecture_class_no)
#myrow[7] = u'???'
lecture_key = {
'code': lecture_no,
'year': int(myrow[0]),
'semester': int(myrow[1]),
'deleted': False,
'class_no': lecture_class_no,
}
# Convert the key to a hashable object
lecture_key_hashable = -1
try:
lecture = Lecture.objects.get(**lecture_key)
lecture_key_hashable = lecture.id
print 'Updating existing lecture...'
except Lecture.DoesNotExist:
lecture = Lecture(**lecture_key)
lecture.num_people = 0
print 'Creating new lecture...'
# Update lecture info.
lecture.department = department
lecture.old_code = myrow[20]
lecture.title = myrow[7]
lecture.title_en = myrow[8]
lecture.type = myrow[10] # 과목구분 (한글)
lecture.type_en = myrow[11] # 과목구분 (영문)
lecture.audience = int(myrow[12]) # 학년 구분
lecture.limit= myrow[17] # 인원제한
lecture.credit = myrow[16] # 학점
lecture.credit_au = myrow[13] # AU
lecture.num_classes = int(myrow[14]) # 강의시수
lecture.num_labs = int(myrow[15]) # 실험시수
'''
if myrow[19] != None and len(myrow[19]) >= 190:
myrow[19] = myrow[19][:190]
lecture.notice = myrow[19] # 비고
'''
lecture.is_english = True if myrow[21] == 'Y' else False # 영어강의 여부
lecture.deleted = False
# Course save
try:
course = Course.objects.get(old_code=lecture.old_code)
course.department = department
course.type = lecture.type
course.type_en = lecture.type_en
course.title = lecture.title.split("<")[0].split("[")[0]
course.title_en = lecture.title_en.split("<")[0].split("[")[0]
course.save()
# print "Updating Course ... %s" % course.title
except Course.DoesNotExist:
course = Course()
course.old_code = lecture.old_code
course.department = department
course.type = lecture.type
course.type_en = lecture.type_en
course.title = lecture.title.split("<")[0].split("[")[0]
course.title_en = lecture.title_en.split("<")[0].split("[")[0]
course.grade_average = 0.0
course.load_average= 0.0
course.speech_average = 0.0
course.total_average = 0.0
################################### course total score
'''
course.score_average = 0
course.load_average = 0
course.gain_average = 0
'''
course.save()
# print "Making new Course ... %s" % course.title
lecture.course = course
lecture.save()
lecture_count += 1
# professor save
match_scholar = filter(lambda a: lecture.year == a[0] and lecture.semester == a[1] and lecture.code == a[2] and lecture.class_no.strip() == a[3].strip() and lecture.department_id == a[4], professors)
if len(match_scholar) != 0:
professors_not_updated = set()
for prof in lecture.professor.all():
professors_not_updated.add(prof.id)
for i in match_scholar:
try:
prof_id = i[5]
prof_name = unicode(i[6], 'cp949')
if i[8] is None or i[8]=='':
prof_name_en = ''
else:
prof_name_en = unicode(i[8].strip(),'cp949')
if i[4] is None or i[4]=='':
prof_major = ''
else:
prof_major = i[4]
professor = Professor.objects.get(professor_id=prof_id)
if professor.professor_name != prof_name and prof_id !=830:
professor.professor_name = prof_name
professor.save()
if professor.professor_name_en != prof_name_en and prof_id != 830 and prof_name_en!='':
professor.professor_name_en = prof_name_en
professor.save()
if professor.major != prof_major and prof_id != 830:
professor.major = prof_major
professor.save()
professors_not_updated.remove(professor.id)
except Professor.DoesNotExist:
professor = Professor.objects.create(professor_id=prof_id)
professor.professor_name = prof_name
professor.professor_name_en = prof_name_en
professor.major = prof_major
professor.save()
# print "Making new Professor ... %s" % professor.professor_name
except KeyError:
pass
lecture.professor.add(professor)
if professor.professor_id != 830:
lecture.course.professors.add(professor)
for key in professors_not_updated:
professor = Professor.objects.get(id=key)
lecture.professor.remove(professor)
else:
lecture.professor.add(staff_professor)
try:
lectures_not_updated.remove(lecture_key_hashable)
except KeyError:
pass
# Extract exam-time, class-time info.
print 'Extracting exam time information...'
query = 'SELECT * FROM view_OTL_exam_time WHERE lecture_year = %d AND lecture_term = %d' % (next_year, next_semester)
exam_times = execute(host, port, user, password, query)
print exam_times
ExamTime.objects.filter(lecture__year__exact=next_year, lecture__semester=next_semester).delete()
for row in exam_times:
print row
myrow = []
for elem in row:
if isinstance(elem, str):
try:
elem = elem.decode(encoding)
except UnicodeDecodeError:
elem = u'???'
print>> sys.stderr, 'ERROR: parsing error on lecture. cannot read in cp949.'
myrow.append(elem)
lecture_key = {
'deleted': False,
'code': myrow[2],
'year': int(myrow[0]),
'semester': int(myrow[1]),
'department': Department.objects.filter(id = int(myrow[4]))[0],
'class_no': myrow[3].strip(),
}
try:
lecture = Lecture.objects.get(**lecture_key)
exam_time = ExamTime(lecture=lecture)
exam_time.day = int(myrow[5]) - 1
exam_time.begin = time(hour=myrow[6].hour, minute=myrow[6].minute)
exam_time.end = time(hour=myrow[7].hour, minute=myrow[7].minute)
print 'Updating exam time for %s' % lecture
exam_time.save()
except Lecture.DoesNotExist:
print 'Exam-time for non-existing lecture %s; skip it...' % myrow[2]
# Extract class time.
print 'Extracting class time information...'
query = 'SELECT * FROM view_OTL_time WHERE lecture_year = %d AND lecture_term = %d' % (next_year, next_semester)
class_times = execute(host, port, user, password, query)
# print class_times
ClassTime.objects.filter(lecture__year__exact=next_year, lecture__semester=next_semester).delete()
for row in class_times:
print row
myrow = []
for elem in row:
if isinstance(elem, str):
try:
elem = elem.decode(encoding)
except UnicodeDecodeError:
elem = u'???'
print>> sys.stderr, 'ERROR: parsing error on lecture. cannot read in cp949.'
myrow.append(elem)
lecture_key = {
'deleted': False,
'code': myrow[2],
'year': int(myrow[0]),
'semester': int(myrow[1]),
'department': Department.objects.filter(id=int(myrow[4]))[0],
'class_no': myrow[3].strip(),
}
try:
print (myrow)
lecture = Lecture.objects.get(**lecture_key)
class_time = ClassTime(lecture=lecture)
class_time.day = int(myrow[5]) - 1
class_time.begin = time(hour=myrow[6].hour, minute=myrow[6].minute)
class_time.end = time(hour=myrow[7].hour, minute=myrow[7].minute)
class_time.type = myrow[8]
class_time.building = myrow[9]
class_time.roomNum = myrow[10]
class_time.roomName = myrow[12]
class_time.roomName_en = myrow[13]
try:
class_time.unit_time = int(myrow[11])
except (ValueError, TypeError):
class_time.unit_time = 0
print 'Updating class time for %s' % lecture
class_time.save()
except Lecture.DoesNotExist:
print 'Class-time for non-existing lecture %s; skip it...' % myrow[2]
# Extract Syllabus info.
'''
query = 'SELECT * FROM view_OTL_syllabus WHERE lecture_year = %d AND lecture_term = %d' % (next_year, next_semester)
syllabuses = execute(host, port, user, password, query)
Syllabus.objects.filter(lecture__year__exact=next_year, lecture__semester=next_semester).delete()
for row in syllabuses:
myrow = []
for elem in row:
if isinstance(elem, str):
try:
elem = elem.decode(encoding)
except UnicodeDecodeError:
eleme = u'%s (???)' % row[2]
print>>sys.stderr, 'ERROR: parsing error on lecture %s' % row[2]
print>>sys.stderr, ' cannot read "%s" in cp949.' % elem
myrow.append(elem)
lecture_key = {
'code': myrow[2],
'year': int(myrow[0]),
'semester': int(myrow[1]),
'department': Department.objects.filter(id = int(myrow[4]))[0],
'class_no': myrow[3].strip(),
}
try:
lecture = Lecture.objects.get(**lecture_key)
syllabus = Syllabus(lecture=lecture)
syllabus.professor_info = myrow[5]
syllabus.abstract = myrow[6]
syllabus.evluation = myrow[7]
syllabus.materials = myrow[8]
syllabus.plan = myrow[9]
syllabus.etc = myrow[10]
syllabus.url = myrow[11]
syllabus.attachment = myrow[12]
print 'Updating syllabus information for %s' % lecture
syllabus.save()
except Lecture.DoesNotExist:
print 'Syllabus information for non-existing lecture %s; skip it...' % myrow[2]
'''
if not exclude_lecture:
# Mark deleted lectures to notify users.
print 'Marking deleted lectures...'
for key in lectures_not_updated:
lecture = Lecture.objects.get(id = key)
lecture.deleted = True
# print '%s is marked as deleted...' % lecture
lecture.save()
print '\nTotal number of departments : %d' % Department.objects.count()
print 'Total number of lectures newly added : %d' % lecture_count
|
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Bangladesh"
def get_gen_file(self):
return "{}/bangladesh_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Barisal
2 {} Chittagong
3 {} Dhaka
4 {} Khulna
5 {} Mymensingh
6 {} Rajshahi
7 {} Rangpur
8 {} Sylhet""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Division", 0, 1, 2, 3, ["Barisal","Chittagong","Dhaka","Khulna","Mymensingh","Rajshahi","Rangpur","Sylhet"], [0.0 for i in range(0,8)], {"Barisal":"1","Chittagong":"2","Dhaka":"3","Khulna":"4","Mymensingh":"5","Rajshahi":"6","Rangpur":"7","Sylhet":"8"})
|
#from TestingTemplate import Test
import sys
sys.path.append('../../ExVivo')
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
def calcStress(disp,force,dims):
#Cauchy Stress
stretch = (disp - disp[0])/dims1['length'] + 1
stress = force/(dims1['width']*dims1['thickness'])*stretch
return stress
def calcStrain(disp, dims):
#Engineering Strain
disp = disp-disp[0] # Zero the displacement for the first measurement
strain = disp/dims1['length'] # engineering strain
return strain
def _normalizeData(ydata, xdata):
'''
This intakes 2 numpy arrays and normalizes them.
returns:
numpy arrays
'''
x_norm = (xdata - np.min(xdata))/(np.max(xdata)-np.min(xdata))
y_norm = (ydata - np.min(ydata))/(np.max(ydata)-np.min(ydata))
return y_norm, x_norm
def _setHTM()
# How the X_data Y_data will look once its in the data structure format
#xdata = DataStructure._ProcessedData[0, :]
#ydata = DataStructure._ProcessedData[:, 0]
#Read in Sample Data
df1 = pd.read_csv("NIH_BAV_Z2LC2_L1.csv", skiprows=[0,1,2,4], header = 'infer')
df2 = pd.read_csv("NIH_BAV_Z2LC2_U1.csv", skiprows=[0,1,2,4], header = 'infer')
dims1 = {'width':2.745,'thickness':1.75,'length':9.98}
dims2 = {'width':2.87,'thickness':1.815,'length':8.67}
#Calculate Stress Strain
stress = calcStress(df1['Disp'],df1['Load'],dims1)
strain = calcStrain(df1['Disp'],dims1)
#Plot unprocessed stress v. strain
plt.plot(strain,stress)
plt.xlabel('Strain')
plt.ylabel('Stress')
plt.title('Test Data')
#Normalize Data and Plot
stress_norm, strain_norm = _normalizeData(stress, strain)
plt.plot(strain_norm, stress_norm)
plt.xlabel('strain_norm')
plt.ylabel('stress_norm')
plt.title('Test Data - Normalized')
plt.show()
|
# Jorge Castanon, October 2015
# Data Scientist @ IBM
# run in terminal sitting on YOUR-PATH-TO-REPO:
# ~/Documents/spark-1.5.1/bin/spark-submit mllib-scripts/cluster-words.py
# Replace this line with:
# /YOUR-SPARK-HOME/bin/spark-submit mllib-scripts/cluster-words.py
import numpy as np
import math
from pyspark.context import SparkContext
from pyspark.mllib.clustering import KMeans
# next 2 lines can be reaplced to read from hdfs,
# if the Word2Vec matrix is big
Feat = np.load('mllib-scripts/myW2Vmatrix.npy') # reads model generated by Word2Vec
words = np.load('mllib-scripts/myWordList.npy') # reads list of words
print "\n================================================="
print "Size of the Word2Vec matrix is: ", Feat.shape
print "Number of words in the models: ", words.shape
print "=================================================\n"
## Spark Context
sc = SparkContext('local','cluster-words')
## Read the Word2Vec model
# the next line should be read/stored from hdfs if it is large
Feat = sc.parallelize(Feat)
## K-means clustering with Spark
K = int(math.floor(math.sqrt(float(words.shape[0])/2))) # Number of clusters
# K ~ sqrt(n/2) this is a rule of thumb for choosing K,
# where n is the number of words in the model
# feel free to choose K with a fancier method
maxiters = 100 # may change depending on the data
clusters = KMeans.train(Feat, k = K, maxIterations = maxiters, runs = 10)
print "\n================================================="
print "Number of clusters used: ", K
print "=================================================\n"
## Getting Cluster Labels for each Word and saving to a numpy file
labels = Feat.map(lambda point: clusters.predict(point)) # add labels to each vector (word)
list_labels = labels.collect()
np.save('mllib-scripts/myClusters.npy',list_labels)
sc.stop()
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import stat
import tempfile
import unittest
from hadoop import fs, pseudo_hdfs4
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true
logger = logging.getLogger(__name__)
class LocalSubFileSystemTest(unittest.TestCase):
def setUp(self):
self.root = tempfile.mkdtemp()
self.fs = fs.LocalSubFileSystem(self.root)
def tearDown(self):
if not os.listdir(self.root):
os.rmdir(self.root)
else:
logger.warning("Tests did not clean up after themselves in %s" % self.root)
def test_resolve_path(self):
self.assertEquals(self.root + "/", self.fs._resolve_path("/"))
self.assertEquals(self.root + "/foo", self.fs._resolve_path("/foo"))
self.assertRaises(fs.IllegalPathException, self.fs._resolve_path, "/../foo")
# These are preserved, but that should be ok.
self.assertEquals(self.root + "/bar/../foo", self.fs._resolve_path("/bar/../foo"))
def test_open_and_remove(self):
self.assertRaises(IOError, self.fs.open, "/notfound", "r")
f = self.fs.open("/x", "w")
f.write("Hello world\n")
f.close()
f = self.fs.open("/x")
self.assertEquals("Hello world\n", f.read())
f.close()
self.fs.remove("/x")
def test_rename(self):
# No exceptions means this worked fine.
self.fs.open("/x", "w").close()
self.fs.rename("/x", "/y")
self.fs.remove("/y")
def test_listdir(self):
self.fs.mkdir("/abc")
self.fs.open("/abc/x", "w").close()
self.fs.open("/abc/y", "w").close()
self.assertEquals(["abc"], self.fs.listdir("/"))
self.assertEquals(["x", "y"], sorted(self.fs.listdir("/abc")))
self.fs.remove("/abc/x")
self.fs.remove("/abc/y")
self.fs.rmdir("/abc")
def test_listdir_stats(self):
self.fs.mkdir("/abc")
self.fs.open("/abc/x", "w").close()
self.fs.open("/abc/y", "w").close()
stats = self.fs.listdir_stats("/")
self.assertEquals(["/abc"], [s['path'] for s in stats])
self.assertEquals(["/abc/x", "/abc/y"],
sorted(s['path'] for s in self.fs.listdir_stats("/abc")))
self.fs.remove("/abc/x")
self.fs.remove("/abc/y")
self.fs.rmdir("/abc")
def test_keyword_args(self):
# This shouldn't work!
self.assertRaises(TypeError, self.fs.open, name="/foo", mode="w")
@attr('requires_hadoop')
def test_hdfs_copy():
minicluster = pseudo_hdfs4.shared_cluster()
minifs = minicluster.fs
try:
olduser = minifs.setuser(minifs.superuser)
minifs.chmod('/', 0777)
minifs.setuser(olduser)
data = "I will not make flatuent noises in class\n" * 2000
minifs.create('/copy_test_src', permission=0646, data=data)
minifs.create('/copy_test_dst', data="some initial data")
minifs.copyfile('/copy_test_src', '/copy_test_dst')
actual = minifs.read('/copy_test_dst', 0, len(data) + 100)
assert_equal(data, actual)
sb = minifs.stats('/copy_test_dst')
assert_equal(0646, stat.S_IMODE(sb.mode))
finally:
minifs.do_as_superuser(minifs.rmtree, '/copy_test_src')
minifs.do_as_superuser(minifs.rmtree, '/copy_test_dst')
@attr('requires_hadoop')
def test_hdfs_full_copy():
minicluster = pseudo_hdfs4.shared_cluster()
minifs = minicluster.fs
minifs.setuser('test')
try:
minifs.do_as_superuser(minifs.chmod, '/', 0777)
minifs.mkdir('/copy_test')
minifs.mkdir('/copy_test/src')
minifs.mkdir('/copy_test/dest')
# File to directory copy.
# No guarantees on file permissions at the moment.
data = "I will not make flatuent noises in class\n" * 2000
minifs.create('/copy_test/src/file.txt', permission=0646, data=data)
minifs.copy('/copy_test/src/file.txt', '/copy_test/dest')
assert_true(minifs.exists('/copy_test/dest/file.txt'))
# Directory to directory copy.
# No guarantees on directory permissions at the moment.
minifs.copy('/copy_test/src', '/copy_test/dest', True)
assert_true(minifs.exists('/copy_test/dest/src'))
# Copy directory to file should fail.
try:
minifs.copy('/copy_test/src', '/copy_test/dest/file.txt', True)
except IOError:
pass
except Exception:
raise
finally:
minifs.do_as_superuser(minifs.rmtree, '/copy_test')
@attr('requires_hadoop')
def test_hdfs_copy_from_local():
minicluster = pseudo_hdfs4.shared_cluster()
minifs = minicluster.fs
minifs.setuser('test')
minifs.do_as_superuser(minifs.chmod, '/', 0777)
path = os.path.join(tempfile.gettempdir(), 'copy_test_src')
logging.info(path)
data = "I will not make flatuent noises in class\n" * 2000
f = open(path, 'w')
f.write(data)
f.close()
minifs.copyFromLocal(path, '/copy_test_dst')
actual = minifs.read('/copy_test_dst', 0, len(data) + 100)
assert_equal(data, actual)
if __name__ == "__main__":
logging.basicConfig()
unittest.main()
|
# file : middleware/mymiddleware.py
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
import re
class MyMiddleWare(MiddlewareMixin):
def process_request(self, request):
print("中间件方法 process_request 被调用")
def process_view(self, request, callback, callback_args, callback_kwargs):
print("中间件方法 process_view 被调用")
def process_response(self, request, response):
print("中间件方法 process_response 被调用")
return response
class MyMiddleWare2(MiddlewareMixin):
def process_request(self, request):
print("中间件方法2 process_request 被调用")
def process_view(self, request, callback, callback_args, callback_kwargs):
print("中间件方法2 process_view 被调用")
def process_response(self, request, response):
print("中间件方法2 process_response 被调用")
return response
class MyMW(MiddlewareMixin):
visit_times = {}
def process_request(self, request):
# 1 获取客户端ip地址
cip = request.META['REMOTE_ADDR']
# 2 只有path以/test开头的才做次数的限制
if not re.match(r'^/test', request.path_info):
return
# 获取键为cip的访问次数
times = self.visit_times.get(cip, 0)
if times >= 5:
return HttpResponse('no way!')
# 每多访问一次,访问次数 + 1
self.visit_times[cip] = times + 1
print('%s visit %s times' % (cip, self.visit_times[cip]))
|
from . import vocab
from . import tokenizers
from . import batchify
from .vocab import *
__all__ = ['batchify', 'tokenizers'] + vocab.__all__
|
from sqlalchemy import schema
from sqlalchemy.orm import Session
from fastapi import Depends, APIRouter, HTTPException, status
from fastapi.security.oauth2 import OAuth2PasswordRequestForm
from .. import db, models, schemas, utils, oauth2
router = APIRouter(
tags=["Authentication"],
)
@router.post("/login", response_model=schemas.Token)
def login(user_credentials: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(db.get_db)):
user = db.query(models.User).filter(
models.User.email == user_credentials.username).first()
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
)
if not utils.verify(user_credentials.password, user.password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect email or password",
)
acces_token = oauth2.create_access_token(data={"user_id": user.id})
return {"access_token": acces_token, "token_type": "bearer"}
|
import tensorflow as tf
from tensorflow.python.framework import ops
_op = tf.load_op_library('local_cluster.so')
def LocalCluster(neighbour_idxs, hierarchy_idxs, row_splits):
'''
.Input("neighbour_idxs: int32") //change to distances!!
.Input("hierarchy_idxs: int32")
.Input("global_idxs: int32")
.Input("row_splits: int32")
.Output("out_row_splits: int32")
.Output("selection_idxs: int32")
.Output("backscatter_idxs: int32");
'''
global_idxs = tf.range(hierarchy_idxs.shape[0],dtype='int32')
rs,sel,ggather = _op.LocalCluster(neighbour_idxs=neighbour_idxs,
hierarchy_idxs=hierarchy_idxs,
global_idxs=global_idxs,
row_splits=row_splits)
return rs,sel,ggather
@ops.RegisterGradient("LocalCluster")
def _LocalClusterGrad(op, asso_grad, is_cgrad,ncondgrad):
return [None, None, None, None, None]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
from auxlib.ish import dals
log = logging.getLogger(__name__)
def test_dals():
test_string = """
This little piggy went to the market.
This little piggy stayed home.
This little piggy had roast beef.
"""
assert test_string.count('\n') == 4
assert dals(test_string).count('\n') == 3
def test_dals_keep_space():
test_string = """
This little piggy went to the market.
This little got indented."""
assert test_string.count('\n') == 2
assert dals(test_string).count('\n') == 1
assert dals(test_string).count(' ') == 1
|
from typing import Any, Optional
from great_expectations.core.usage_statistics.anonymizers.base import BaseAnonymizer
from great_expectations.datasource import (
BaseDatasource,
Datasource,
LegacyDatasource,
PandasDatasource,
SimpleSqlalchemyDatasource,
SparkDFDatasource,
SqlAlchemyDatasource,
)
class DatasourceAnonymizer(BaseAnonymizer):
# ordered bottom up in terms of inheritance order
_legacy_ge_classes = [
PandasDatasource,
SqlAlchemyDatasource,
SparkDFDatasource,
LegacyDatasource,
]
# ordered bottom up in terms of inheritance order
_ge_classes = [
SimpleSqlalchemyDatasource,
Datasource,
BaseDatasource,
]
def __init__(
self,
aggregate_anonymizer: "Anonymizer", # noqa: F821
salt: Optional[str] = None,
) -> None:
super().__init__(salt=salt)
self._aggregate_anonymizer = aggregate_anonymizer
def anonymize(self, obj: Optional[object] = None, *args, **kwargs) -> Any:
if obj is not None and isinstance(obj, SimpleSqlalchemyDatasource):
return self._anonymize_simple_sqlalchemy_datasource(*args, **kwargs)
return self._anonymize_datasource_info(*args, **kwargs)
def _anonymize_datasource_info(self, name: str, config: dict) -> dict:
anonymized_info_dict = {}
anonymized_info_dict["anonymized_name"] = self._anonymize_string(name)
# Legacy Datasources (<= v0.12 v2 BatchKwargs API)
if self.get_parent_class_v2_api(config=config) is not None:
self._anonymize_object_info(
anonymized_info_dict=anonymized_info_dict,
object_config=config,
)
# Datasources (>= v0.13 v3 BatchRequest API), and custom v2 BatchKwargs API
elif self.get_parent_class_v3_api(config=config) is not None:
self._anonymize_object_info(
anonymized_info_dict=anonymized_info_dict,
object_config=config,
)
execution_engine_config = config.get("execution_engine")
anonymized_info_dict[
"anonymized_execution_engine"
] = self._anonymize_execution_engine_info(
name=execution_engine_config.get("name", ""),
config=execution_engine_config,
)
data_connector_configs = config.get("data_connectors")
anonymized_info_dict["anonymized_data_connectors"] = [
self._aggregate_anonymizer.anonymize(
name=data_connector_name, config=data_connector_config
)
for data_connector_name, data_connector_config in data_connector_configs.items()
]
return anonymized_info_dict
def _anonymize_simple_sqlalchemy_datasource(self, name: str, config: dict) -> dict:
"""
SimpleSqlalchemyDatasource requires a separate anonymization scheme.
"""
anonymized_info_dict = {}
anonymized_info_dict["anonymized_name"] = self._anonymize_string(name)
if config.get("module_name") is None:
config["module_name"] = "great_expectations.datasource"
self._anonymize_object_info(
anonymized_info_dict=anonymized_info_dict,
object_config=config,
)
# Only and directly provide parent_class of execution engine
anonymized_info_dict["anonymized_execution_engine"] = {
"parent_class": "SqlAlchemyExecutionEngine"
}
# Use the `introspection` and `tables` keys to find data_connectors in SimpleSqlalchemyDatasources
introspection_data_connector_configs = config.get("introspection")
tables_data_connector_configs = config.get("tables")
introspection_data_connector_anonymized_configs = []
if introspection_data_connector_configs is not None:
for (
data_connector_name,
data_connector_config,
) in introspection_data_connector_configs.items():
if data_connector_config.get("class_name") is None:
data_connector_config[
"class_name"
] = "InferredAssetSqlDataConnector"
if data_connector_config.get("module_name") is None:
data_connector_config[
"module_name"
] = "great_expectations.datasource.data_connector"
introspection_data_connector_anonymized_configs.append(
self._aggregate_anonymizer.anonymize(
name=data_connector_name, config=data_connector_config
)
)
tables_data_connector_anonymized_configs = []
if tables_data_connector_configs is not None:
for (
data_connector_name,
data_connector_config,
) in tables_data_connector_configs.items():
if data_connector_config.get("class_name") is None:
data_connector_config[
"class_name"
] = "ConfiguredAssetSqlDataConnector"
if data_connector_config.get("module_name") is None:
data_connector_config[
"module_name"
] = "great_expectations.datasource.data_connector"
tables_data_connector_anonymized_configs.append(
self._aggregate_anonymizer.anonymize(
name=data_connector_name, config=data_connector_config
)
)
anonymized_info_dict["anonymized_data_connectors"] = (
introspection_data_connector_anonymized_configs
+ tables_data_connector_anonymized_configs
)
return anonymized_info_dict
def _anonymize_execution_engine_info(self, name: str, config: dict) -> dict:
anonymized_info_dict = {}
anonymized_info_dict["anonymized_name"] = self._anonymize_string(name)
from great_expectations.data_context.types.base import (
ExecutionEngineConfig,
executionEngineConfigSchema,
)
# Roundtrip through schema validation to remove any illegal fields add/or restore any missing fields.
execution_engine_config: ExecutionEngineConfig = (
executionEngineConfigSchema.load(config)
)
execution_engine_config_dict: dict = executionEngineConfigSchema.dump(
execution_engine_config
)
self._anonymize_object_info(
anonymized_info_dict=anonymized_info_dict,
object_config=execution_engine_config_dict,
)
return anonymized_info_dict
def can_handle(self, obj: Optional[object] = None, **kwargs) -> bool:
return obj is not None and isinstance(obj, BaseDatasource)
@staticmethod
def get_parent_class(config: dict) -> Optional[str]:
return BaseAnonymizer.get_parent_class(
classes_to_check=DatasourceAnonymizer._ge_classes
+ DatasourceAnonymizer._legacy_ge_classes,
object_config=config,
)
@staticmethod
def get_parent_class_v2_api(config: dict) -> Optional[str]:
return BaseAnonymizer.get_parent_class(
classes_to_check=DatasourceAnonymizer._legacy_ge_classes,
object_config=config,
)
@staticmethod
def get_parent_class_v3_api(config: dict) -> Optional[str]:
return BaseAnonymizer.get_parent_class(
classes_to_check=DatasourceAnonymizer._ge_classes,
object_config=config,
)
|
import unittest
import pickle
import numpy as np
import mockredis
from mock import patch
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
def fake_redis(**kwargs):
redis = mockredis.mock_redis_client(**kwargs)
redis.connection_pool = None
redis.response_callbacks = None
return redis
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test__H(self):
'''
Check _H output consistent bytes length given
the same concatenated hash value size
'''
for l in range(2, 128+1, 16):
lsh = MinHashLSH(num_perm=128)
m = MinHash()
m.update("abcdefg".encode("utf8"))
m.update("1234567".encode("utf8"))
lsh.insert("m", m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(sizes[0] == s for s in sizes))
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue("a" in result)
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh2.query(m1)
self.assertTrue("a" in result)
result = lsh2.query(m2)
self.assertTrue("b" in result)
def test_insert_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue(pickle.dumps("a") in items)
self.assertTrue(pickle.dumps("b") in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys[pickle.dumps("a")]):
self.assertTrue(pickle.dumps("a") in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_query_buffer_redis(self):
with patch('redis.Redis', fake_redis) as mock_redis:
lsh = MinHashLSH(threshold=0.5, num_perm=16, storage_config={
'type': 'redis', 'redis': {'host': 'localhost', 'port': 6379}
})
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.query(m1)
lsh.add_to_query_buffer(m1)
result = lsh.collect_query_buffer()
self.assertTrue("a" in result)
lsh.add_to_query_buffer(m2)
result = lsh.collect_query_buffer()
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.add_to_query_buffer, m3)
def test_insertion_session(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
data = [("a", m1), ("b", m2)]
with lsh.insertion_session() as session:
for key, minhash in data:
session.insert(key, minhash)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
def test_get_counts(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
counts = lsh.get_counts()
self.assertEqual(len(counts), lsh.b)
for table in counts:
self.assertEqual(sum(table.values()), 2)
class TestWeightedMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test__H(self):
'''
Check _H output consistent bytes length given
the same concatenated hash value size
'''
mg = WeightedMinHashGenerator(100, sample_size=128)
for l in range(2, mg.sample_size+1, 16):
m = mg.minhash(np.random.randint(1, 99999999, 100))
lsh = MinHashLSH(num_perm=128)
lsh.insert("m", m)
sizes = [len(H) for ht in lsh.hashtables for H in ht]
self.assertTrue(all(sizes[0] == s for s in sizes))
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh2.query(m1)
self.assertTrue("a" in result)
result = lsh2.query(m2)
self.assertTrue("b" in result)
if __name__ == "__main__":
unittest.main()
|
"""
Package providing a module to parse the content from tabbed tree files
"""
__version__ = '0.2.0'
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-user-verification
------------
Tests for verification `generators`.
"""
# Third Party Stuff
from django.test import TestCase
from nose.tools import ok_
# Local Stuff
from verification.generators import NumberGenerator
class TestNumberGenerator(TestCase):
def setUp(self):
self.generator = NumberGenerator()
def test_return_number_generator_returns_number(self):
number = self.generator("test")
ok_(number.isdigit())
|
#(c) 2016-2018 by Authors
#This file is a part of Flye program.
#Released under the BSD license (see LICENSE file)
"""
Modifies repeat graph using the Tresle output
"""
import logging
from itertools import izip, chain
from collections import defaultdict
import flye.utils.fasta_parser as fp
from flye.repeat_graph.graph_alignment import iter_alignments
logger = logging.getLogger()
class Connection:
__slots__ = ("id", "path", "sequence")
def __init__(self, id=None, path=[], sequence=""):
self.id = id
self.path = path
self.sequence = sequence
class RepeatInfo:
__slots__ = ("id", "repeat_path", "all_reads", "in_reads", "out_reads",
"sequences", "multiplicity")
def __init__(self, id, repeat_path, all_reads, in_reads, out_reads,
sequences, multiplicity):
self.id = id
self.repeat_path = repeat_path
self.all_reads = all_reads
self.in_reads = in_reads
self.out_reads = out_reads
self.sequences = sequences
self.multiplicity = multiplicity
def get_simple_repeats(repeat_graph, alignments_file, edge_seqs):
next_path_id = 1
path_ids = {}
repeats_dict = {}
MULT = 2
paths_to_resolve = []
interesting_edges = set()
for path in repeat_graph.get_unbranching_paths():
if not path[0].repetitive or path[0].self_complement:
continue
is_simple = True
inputs = set()
for in_edge in path[0].node_left.in_edges:
inputs.add(in_edge.edge_id)
if in_edge.repetitive:
is_simple = False
outputs = set()
for out_edge in path[-1].node_right.out_edges:
outputs.add(out_edge.edge_id)
if out_edge.repetitive:
is_simple = False
if not is_simple or len(inputs) != MULT or len(outputs) != MULT:
continue
paths_to_resolve.append((path, inputs, outputs))
interesting_edges.update(set(map(lambda e: e.edge_id, path)))
interesting_alignments = []
for read_aln in iter_alignments(alignments_file):
repeat_read = False
for edge_aln in read_aln:
if edge_aln.edge_id in interesting_edges:
repeat_read = True
if repeat_read:
interesting_alignments.append(read_aln)
for path, inputs, outputs in paths_to_resolve:
if path[0].edge_id not in path_ids:
path_ids[path[0].edge_id] = next_path_id
path_ids[-path[-1].edge_id] = -next_path_id
next_path_id += 1
path_id = path_ids[path[0].edge_id]
repeat_edge_ids = set(map(lambda e: e.edge_id, path))
inner_reads = []
input_reads = defaultdict(list)
output_reads = defaultdict(list)
for read_aln in interesting_alignments:
repeat_read = False
for edge_aln in read_aln:
if edge_aln.edge_id in repeat_edge_ids:
repeat_read = True
if not repeat_read:
continue
inner_reads.append(read_aln[0].overlap.cur_id)
for prev_edge, next_edge in izip(read_aln[:-1], read_aln[1:]):
if (prev_edge.edge_id in inputs and
next_edge.edge_id == path[0].edge_id):
input_reads[prev_edge.edge_id].append(prev_edge.overlap.cur_id)
if (prev_edge.edge_id == path[-1].edge_id and
next_edge.edge_id in outputs):
output_reads[next_edge.edge_id].append(next_edge.overlap.cur_id)
if (not len(inner_reads) or len(input_reads) != MULT or
len(output_reads) != MULT):
continue
#add edges sequences:
sequences = {}
for edge in chain(input_reads, output_reads):
seq_id = repeat_graph.edges[edge].edge_sequences[0].edge_seq_name
seq = edge_seqs[seq_id[1:]]
if seq_id[0] == "-":
seq = fp.reverse_complement(seq)
sequences[edge] = seq
template_seq = ""
for edge in path:
seq_id = edge.edge_sequences[0].edge_seq_name
seq = edge_seqs[seq_id[1:]]
if seq_id[0] == "-":
seq = fp.reverse_complement(seq)
template_seq += seq
sequences["template"] = template_seq
#print path_id
#for h, s in sequences.items():
# print h, s[:100]
repeats_dict[path_id] = RepeatInfo(path_id, map(lambda e: e.edge_id, path),
inner_reads, input_reads, output_reads,
sequences, MULT)
return repeats_dict
def dump_repeats(repeats_info, filename):
with open(filename, "w") as f:
for repeat_id, info in repeats_info.iteritems():
f.write("#Repeat {0}\t{1}\n\n".format(repeat_id, info.multiplicity))
f.write("#All reads\t{0}\n".format(len(info.all_reads)))
for read in info.all_reads:
f.write(read + "\n")
f.write("\n")
for in_edge in info.in_reads:
f.write("#Input {0}\t{1}\n".format(in_edge, len(info.in_reads[in_edge])))
for read in info.in_reads[in_edge]:
f.write(read + "\n")
f.write("\n")
for out_edge in info.out_reads:
f.write("#Output {0}\t{1}\n".format(out_edge, len(info.out_reads[out_edge])))
for read in info.out_reads[out_edge]:
f.write(read + "\n")
f.write("\n")
def apply_changes(repeat_graph, trestle_results,
resolved_repeats_fasta):
#repeat_graph.output_dot("before.dot")
connections = _get_connections(trestle_results)
edges_to_remove = set()
for conn in connections:
repeat_graph.separate_path(conn.path, conn.id,
resolved_repeats_fasta[conn.sequence])
edges_to_remove.update(conn.path[1:-1])
for edge_id in edges_to_remove:
edge = repeat_graph.edges[edge_id]
if not edge.self_complement:
repeat_graph.remove_edge(repeat_graph.complement_edge(edge))
repeat_graph.remove_edge(edge)
#repeat_graph.output_dot("after.dot")
def _get_connections(trestle_results):
connections = []
resolved_repeats = set()
with open(trestle_results, "r") as f:
for line in f:
if line.startswith("Repeat"): continue
tokens = line.strip().split()
repeat_id, bridged = int(tokens[0]), tokens[6]
if bridged == "True" and abs(repeat_id) not in resolved_repeats:
resolved_repeats.add(abs(repeat_id))
repeat_path = map(int, tokens[1].split(","))
res_1, res_2 = tokens[10].split(":")
in_1, out_1 = res_1.split(",")
in_2, out_2 = res_2.split(",")
seq_1, seq_2 = tokens[11].split(":")
connection_1 = [int(in_1)] + repeat_path + [int(out_1)]
connection_2 = [int(in_2)] + repeat_path + [int(out_2)]
logger.debug("Repeat {0}: {1}, {2}"
.format(repeat_id, connection_1, connection_2))
new_seq_id = "trestle_resolved_" + str(repeat_id) + "_copy_"
connections.extend([Connection(new_seq_id + "1", connection_1, seq_1),
Connection(new_seq_id + "2", connection_2, seq_2)])
return connections
|
a = 1
b = 2
c = 3
dd = 4
efg = 6666
|
# https://codeforces.com/problemset/problem/1535/A
t = int(input())
cases = [list(map(int, input().split())) for _ in range(t)]
for case in cases:
top = sorted(case, reverse=True)[:2]
if max(case[0], case[1]) in top and max(case[2], case[3]) in top:
print('YES')
else:
print('NO')
|
from PIL import Image
from skimage import measure
import numpy as np
from shapely.geometry import Polygon, MultiPolygon
import json
def create_sub_masks(mask_image):
width, height = mask_image.size
# Initialize a dictionary of sub-masks indexed by RGB colors
sub_masks = {}
for x in range(width):
for y in range(height):
# Get the RGB values of the pixel
pixel = mask_image.getpixel((x, y))[:3]
# If the pixel is not black...
if pixel != (0, 0, 0):
# Check to see if we've created a sub-mask...
pixel_str = str(pixel)
sub_mask = sub_masks.get(pixel_str)
if sub_mask is None:
# Create a sub-mask (one bit per pixel) and add to the dictionary
# Note: we add 1 pixel of padding in each direction
# because the contours module doesn't handle cases
# where pixels bleed to the edge of the image
sub_masks[pixel_str] = Image.new("1", (width + 2, height + 2))
# Set the pixel value to 1 (default is 0), accounting for padding
sub_masks[pixel_str].putpixel((x + 1, y + 1), 1)
return sub_masks
def create_sub_mask_annotation(
sub_mask, image_id, category_id, annotation_id, is_crowd, keypoints=[]
):
# Find contours (boundary lines) around each sub-mask
# Note: there could be multiple contours if the object
# is partially occluded. (E.g. an elephant behind a tree)
contours = measure.find_contours(sub_mask, 0.5, positive_orientation="low")
segmentations = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=True)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().astype(int).tolist()
segmentations.append(segmentation)
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
width = max_x - x
height = max_y - y
bbox = (int(x), int(y), int(width), int(height))
area = multi_poly.area
annotation = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_id,
"segmentation": segmentations,
"area": area,
"bbox": bbox,
"iscrowd": int(bool(is_crowd)),
}
if not keypoints is None:
annotation["keypoints"] = keypoints
annotation["num_keypoints"] = len(keypoints) / 3
return annotation
# mask_img = Image.open(
# "D:/Dokumente/Bachelorarbeit/Image Generator/Assets/mask.png")
# sub_masks = create_sub_masks(mask_img)
# annotation = ""
# for color, sub_mask in sub_masks.items():
# #category_id = category_ids[image_id][color]
# annotation = create_sub_mask_annotation(sub_mask, 0, 0, 0, False)
# # annotations.append(annotation)
# # annotation_id += 1
# # image_id += 1
# print(annotation)
|
"""
18 / 18 test cases passed.
Runtime: 28 ms
Memory Usage: 14.9 MB
"""
class Solution:
def convertToTitle(self, columnNumber: int) -> str:
ans = []
while columnNumber != 0:
columnNumber -= 1
ans.append(chr(65 + columnNumber % 26))
columnNumber //= 26
return ''.join(ans[::-1])
|
"""
2019.08.19, testing the first nasbench search space.
I should finish this within a weekend and should deploy this as soon as possible.
"""
from copy import copy
from functools import partial
import itertools
import logging
import os
from collections import deque, OrderedDict
import IPython
from search_policies.cnn.search_space.nas_bench.nasbench_api_v2 import NASBench_v2, ModelSpec_v2
from search_policies.cnn.search_space.nas_bench.sampler import obtain_full_model_spec
from search_policies.cnn.search_space.nas_bench.util import change_model_spec
from ..api import CNNSearchSpace
import numpy as np
from .model import NasBenchNet
class NASbenchSearchSpace(CNNSearchSpace):
sample_step_for_evaluation = 2
top_K_complete_evaluate = 200
evaluate_ids = None # stores the pool of evaluate ids. Fixed after initialized.
evaluate_model_spec_ids = None # Current pool of ids, can change from time to time.
def __init__(self, args, full_dataset=False):
"""
Transferring from original.
:param args:
"""
# self.args = args
super(NASbenchSearchSpace, self).__init__(args)
self.topology_fn = NasBenchNet
self.sample_step_for_evaluation = 2 if not args.debug else 30
# read nasbench related configs.
args.model_spec = obtain_full_model_spec(args.num_intermediate_nodes + 2)
v = self.args.num_intermediate_nodes + 2
self.nasbench = NASBench_v2(os.path.join(self.args.data, 'nasbench/nasbench_only108.tfrecord'),
config=f'v{v}_e9_op3', only_hash=not full_dataset)
self.nasbench_hashs, self.nasbench_model_specs = self.nasbench.model_hash_rank(full_spec=True)
self.rank_by_mid = [i for i in range(0, len(self.nasbench_hashs))]
self.available_ops = self.nasbench.available_ops
self.initialize_evaluate_pool()
## This belongs to interaction for now, should be removed later.
@property
def topologies(self):
return self.nasbench_model_specs
@property
def hashs(self):
return self.nasbench_hashs
@property
def num_architectures(self):
return len(self.nasbench_hashs)
def initialize_evaluate_pool(self):
# Process evaluation nodes
self.evaluate_ids = [i for i in range(0, self.num_architectures, self.sample_step_for_evaluation)]
# remove the landmark id from eval_ids
self.evaluate_model_spec_ids = deque(self.evaluate_ids)
if len(self.evaluate_model_spec_ids) > self.top_K_complete_evaluate:
self.evaluate_model_spec_ids = deque(sorted(
np.random.choice(self.evaluate_model_spec_ids, self.top_K_complete_evaluate, replace=False).tolist()))
def evaluate_model_spec_id_pool(self):
if len(self.evaluate_model_spec_ids) > self.top_K_complete_evaluate:
self.evaluate_model_spec_ids = deque(sorted(
np.random.choice(self.evaluate_model_spec_ids, self.top_K_complete_evaluate,
replace=True).tolist()))
return self.evaluate_model_spec_ids
def eval_model_spec_id_append(self, mid):
if mid in self.evaluate_model_spec_ids:
self.evaluate_model_spec_ids.remove(mid)
if len(self.evaluate_model_spec_ids) >= self.top_K_complete_evaluate:
old_arch = self.evaluate_model_spec_ids.pop()
logging.debug("Pop arch {} from pool".format(old_arch))
self.evaluate_model_spec_ids.append(mid)
def eval_model_spec_id_rank(self, ids, perfs):
"""
Rank the evaluate id pools by the performance.
:param ids:
:param perfs:
:return: None
"""
# rank the thing, make sure the pop-left will eliminate the poor performed archs.
old_archs_sorted_indices = np.argsort(perfs)[::-1]
rank_ids = [ids[i] for i in old_archs_sorted_indices]
if len(rank_ids) > self.top_K_complete_evaluate:
rank_ids = rank_ids[:self.top_K_complete_evaluate]
self.evaluate_model_spec_ids = deque(rank_ids)
def random_topology(self):
"""
Naive random sampling method.
:return: id, spec
"""
rand_spec_id = self.random_ids(1)[0]
rand_spec = self.nasbench_model_specs[rand_spec_id]
return rand_spec_id, rand_spec
def random_ids(self, number):
return sorted(np.random.choice(np.arange(0, self.num_architectures), number, replace=False).tolist())
def random_eval_ids(self, number):
"""
Random a couple of eval ids from the Evaluation pool, but not the current ids.
:param number:
:return:
"""
return sorted(np.random.choice(self.evaluate_ids, min(number, len(self.evaluate_ids)),
replace=False).tolist())
def hash_by_id(self, i):
""" return model hash by id """
return self.nasbench_hashs[i]
def topology_by_id(self, i):
""" return topoligy by id """
return self.nasbench_model_specs[i]
def validate_model_indices(self, valid_queue_length, sampling=None):
"""
Process for validation step during training supernet.
This step for the current being is a random selection without any prior knowledge.
Possible to support another version.
:param valid_queue_length:
:return: valid_model_pool
"""
nb_models = self.num_architectures
nb_batch_per_model = max(valid_queue_length // nb_models, 1)
if sampling is None:
valid_model_order = np.random.choice(range(nb_models), nb_models, False)
else:
raise NotImplementedError("not yet supported. to add in future.")
if nb_models > valid_queue_length:
valid_model_order = valid_model_order[:valid_queue_length]
nb_models = valid_queue_length
return nb_batch_per_model, nb_models, valid_model_order
def replace_eval_ids_by_random(self, number):
""" Random a subset and replace the bottom performed architectures. """
replace_number = 0
rand_eval_ids = self.random_eval_ids(number)
for eid in rand_eval_ids:
if eid not in self.evaluate_model_spec_ids:
self.eval_model_spec_id_append(eid)
replace_number += 1
return replace_number
def process_archname_by_id(self, arch):
# arch is mid
return f"{arch}, {self.hashs[arch]}"
def generate_new_arch(self, number):
"""
Return the id or not.
:param number:
:return:
"""
archs = []
for _ in range(number):
_, m = self.random_topology()
archs.append(m)
return archs
# for sparse kendall tau
def query_gt_perfs(self, model_ids):
"""
return the testing accuracy.
:param model_ids: ids for the given model
:return: gt performance of this.
"""
return [self.nasbench.perf_rank[i][1] for i in model_ids]
class NasBenchSearchSpaceLinear(NASbenchSearchSpace):
def __init__(self, args):
super(NasBenchSearchSpaceLinear, self).__init__(args)
# process only linear labels
self.original_model_specs = self.nasbench_model_specs
self.original_hashs = self.nasbench_hashs
self.sample_step_for_evaluation = 1
self.process_nasbench_linear()
def process_nasbench_linear(self):
""" Process nasbench linear search space. This is a much simpler search space. """
# only take the linear architectures. a much simpler space.
full_spec = obtain_full_model_spec(self.args.num_intermediate_nodes)
matrix = np.eye(self.args.num_intermediate_nodes + 2, self.args.num_intermediate_nodes + 2, 1).astype(np.int)
# indeed, we need also add ranks.
self.nasbench_hashs = []
self.nasbench_model_specs = []
specs = OrderedDict()
hashs = OrderedDict()
for labeling in itertools.product(*[range(len(self.nasbench.available_ops))
for _ in range(self.args.num_intermediate_nodes)]):
ops = ['input', ] + [self.nasbench.available_ops[i] for i in labeling] + ['output',]
new_spec = ModelSpec_v2(matrix.copy(), copy(ops))
new_hash = new_spec.hash_spec()
_id = self.original_hashs.index(new_hash)
specs[_id] = new_spec
hashs[_id] = new_hash
rank_key = sorted(hashs.keys())
self.nasbench_hashs = [hashs[_id] for _id in rank_key]
self.nasbench_model_specs = [specs[_id] for _id in rank_key]
self.sample_step_for_evaluation = 1
self.initialize_evaluate_pool()
# IPython.embed(header='check this is correct or not')
logging.info("Linear space, totoal architecture number is {}".format(self.num_architectures))
def evaluate_model_spec_id_pool(self):
return self.evaluate_model_spec_ids
class NasBenchSearchSpaceSubsample(NASbenchSearchSpace):
# keep track of original space ids, because new id will be flushed.
rank_id_in_original_nasbench = []
def __init__(self, args):
super(NasBenchSearchSpaceSubsample, self).__init__(args)
self.original_model_specs = self.nasbench_model_specs
self.original_hashs = self.nasbench_hashs
self.sample_step_for_evaluation = 1
self.process_subsample_space()
def process_subsample_space(self):
# raise NotImplementedError('finish later')
sample_num = min(self.args.num_archs_subspace, self.num_architectures)
subspace_ids = sorted([int(a) for a in np.random.choice(
len(self.nasbench_model_specs), sample_num, replace=False)])
self.rank_id_in_original_nasbench = subspace_ids
self.nasbench_hashs = [self.original_hashs[_id] for _id in subspace_ids]
self.nasbench_model_specs = [self.original_model_specs[_id] for _id in subspace_ids]
self.initialize_evaluate_pool()
print("Random subspace with {} architectures: {}".format(self.num_architectures, subspace_ids[:100]))
print("Evaluation architecture pool: {}".format(self.evaluate_model_spec_ids))
def nodes_to_key(nodes):
# always [0, 1, 2, ..., num_intermediate_node]
# nodes = range(len(nodes))
# return ','.join(map(str, nodes))
return len(nodes)
def key_to_nodes(key):
# return [int(a) for a in key.split(',')]
return list(range(key))
def model_spec_to_involving_nodes(spec):
matrix = spec.matrix.copy()
active_nodes = np.argwhere(matrix.sum(axis=1)[1:-1] > 0).reshape(-1)
return active_nodes.tolist(), matrix
def permunate_ops_all(n, OPS):
if n == 0:
yield []
elif n == 1:
for o in OPS:
yield [o,]
else:
for o in OPS:
for rest_ops in permunate_ops_all(n-1, OPS):
yield [o,] + rest_ops
def permunate_ops_last_node(n, OPS, default_pos=0):
for o in OPS:
yield [OPS[default_pos], ] * (n-1) + [o,]
def permutate_ops_given_topology(matrix, OPS, permutate_last=True):
# print('permutate under topology matrix ', matrix)
node = matrix.shape[0] - 2
if permutate_last:
all_ops = permunate_ops_last_node(node, OPS, default_pos=0)
else:
all_ops = permunate_ops_all(node, OPS)
for ops in all_ops:
ops = ['input', ] + ops + ['output',]
copy_matrix = matrix.copy()
a = ModelSpec_v2(copy_matrix, ops)
if a.valid_spec:
yield a
class NasBenchSearchSpaceFairNasTopology(NASbenchSearchSpace):
def __init__(self, args):
super(NasBenchSearchSpaceFairNasTopology, self).__init__(args)
self.nasbench_involving_nodes = OrderedDict()
for ind, spec in enumerate(self.topologies):
active_nodes, matrix = model_spec_to_involving_nodes(spec)
key = nodes_to_key(active_nodes)
if key in self.nasbench_involving_nodes.keys():
self.nasbench_involving_nodes[key].append(ind)
else:
self.nasbench_involving_nodes[key] = [ind, ]
self.nasbench_topo_sample_probs = []
for k, v in self.nasbench_involving_nodes.items():
logging.debug(f'involving nodes {k} : num arch {len(v)}')
self.nasbench_topo_sample_probs.append(len(v))
self.nasbench_topo_sample_probs = list(reversed(self.nasbench_topo_sample_probs))
def nasbench_sample_matrix_from_list(self, nodes, probs):
"""
Recursively sample from the list of data by a prob.
This cooperates with fairnas topology sampler.
Fair sampling.
:param nodes: [1, ... interm,] node id as a list
:param probs: probability to sample an list with length equal to probs, len(probs) == len(data)
:return:
"""
def normalize(probs):
return list(map(lambda x: float(x / sum(probs)), probs))
if len(nodes) == 0:
return [None,]
else:
try:
total = self.args.num_intermediate_nodes
probs = normalize(probs)
num_sample = np.random.choice(np.arange(len(nodes) + 1), 1, p=probs)
sample_nodes = sorted(np.random.choice(nodes, num_sample, replace=False))
rest_nodes = list(set(nodes) - set(sample_nodes))
new_probs = probs[:len(rest_nodes) + 1]
# nasbench matrix including input and output.
topo_matrices_ids = self.nasbench_involving_nodes[nodes_to_key(sample_nodes)]
sample_id = np.random.choice(topo_matrices_ids, 1)[0]
sample_matrix = self.topologies[sample_id].matrix.copy()
if sample_matrix.shape[0] == total + 2:
# terminate the looping.
return [sample_matrix, None]
else:
# Make sample nodes to full matrix spec.
sample_nodes = [0,] + sample_nodes + [total + 1]
# make new_matrix[sample_nodes,:][:, sample_nodes] = sample_matrix
matrix = np.zeros([total + 2, total + 2], dtype=int)
_matrix = matrix[sample_nodes,:]
_matrix[:, sample_nodes] = sample_matrix
matrix[sample_nodes,:] = _matrix
return [matrix,] + self.nasbench_sample_matrix_from_list(rest_nodes, new_probs)
except Exception as e:
logging.error(f'{e}')
IPython.embed(header='Check mistake of nasbench_sample_matrix_from_list')
class NasBenchSearchSpaceICLRInfluenceWS(NasBenchSearchSpaceSubsample):
arch_hash_by_group = {}
arch_ids_by_group = {}
# preprocess this search space.
def process_subsample_space(self):
# composing the linear search space.
# Variates of ops, but topology is sampled from a pool.
nodes = self.args.num_intermediate_nodes
AVAILABLE_OPS = self.nasbench.available_ops
logging.info("Processing NASBench WS influence Search Space ...")
permutate_op_fn = partial(permutate_ops_given_topology,
permutate_last=True)
logging.info("Permutating the last node only? {}".format(
not self.args.nasbench_search_space_ws_influence_full))
subspace_ids = []
subspace_model_specs_dict = {}
# make all possible matrix:
for i in range(nodes):
matrix = np.zeros((nodes + 2, nodes + 2), dtype=np.int)
matrix[nodes, -1] = 1 # connect output to node n-1.
matrix[i, -2] = 1 # connect last node to one of the previous node.
if i > 0:
if i > 1:
matrix[0:i, 1:i + 1] = np.triu(np.ones((i, i), dtype=np.int))
else:
matrix[0, 1] = 1
logging.info(f'Node {i}-{nodes} connection: {matrix}')
self.arch_hash_by_group[i] = []
self.arch_ids_by_group[i] = []
for spec in permutate_op_fn(matrix, AVAILABLE_OPS):
hash = spec.hash_spec()
spec.resume_original()
try:
_id = self.nasbench_hashs.index(hash)
except ValueError as e:
logging.error("Spec is not valid here: {}".format(e))
logging.error(spec)
continue
# subspace_ids.append(_id)
if hash not in subspace_model_specs_dict.keys():
# only keep one spec.
subspace_model_specs_dict[hash] = spec
self.arch_hash_by_group[i].append(hash)
self.arch_ids_by_group[i].append(_id)
subspace_ids.append(_id)
# count = 0
# for i in range(nodes):
# n_g = []
# n_id = []
# # removing the duplicated items
# logging.info(f"Process rank group {i}, original length {len(self.arch_ids_by_group[i])} ... ")
# for _id, h in zip(self.arch_ids_by_group[i], self.arch_hash_by_group):
# if _id not in n_id:
# n_id.append(_id)
# n_g.append(h)
# self.arch_ids_by_group[i] = n_id
# self.arch_hash_by_group[i] = n_g
# assert len(n_id) == len(n_g)
# count += len(n_id)
# logging.info("Length after processing: {}".format(self.arch_ids_by_group[i]))
sort_ids = np.argsort(subspace_ids)
sort_subspace_ids = [subspace_ids[i] for i in sort_ids]
self.nasbench_model_specs_prune = [self.original_model_specs[i] for i in sort_subspace_ids]
self.nasbench_hashs = [self.original_hashs[_id] for _id in sort_subspace_ids]
self.nasbench_model_specs = [subspace_model_specs_dict[h] for h in self.nasbench_hashs]
self.initialize_evaluate_pool()
logging.info("Totally {} architectures: {}".format(self.num_architectures, subspace_ids[:100]))
logging.info("Evaluation architecture pool: {}".format(self.evaluate_model_spec_ids))
|
""""Hacs base setup task."""
# pylint: disable=abstract-method
from __future__ import annotations
from datetime import timedelta
from timeit import default_timer as timer
from homeassistant.core import HomeAssistant
from ..base import HacsBase
from ..enums import HacsStage
from ..mixin import LogMixin
class HacsTask(LogMixin):
"""Hacs task base."""
hass: HomeAssistant
events: list[str] | None = None
schedule: timedelta | None = None
stages: list[HacsStage] | None = None
def __init__(self, hacs: HacsBase, hass: HomeAssistant) -> None:
self.hacs = hacs
self.hass = hass
@property
def slug(self) -> str:
"""Return the check slug."""
return self.__class__.__module__.rsplit(".", maxsplit=1)[-1]
async def execute_task(self, *_, **__) -> None:
"""Execute the task defined in subclass."""
if self.hacs.system.disabled:
self.log.warning(
"Skipping task %s, HACS is disabled - %s",
self.slug,
self.hacs.system.disabled_reason,
)
return
self.log.info("Executing task: %s", self.slug)
start_time = timer()
try:
if task := getattr(self, "execute", None):
await self.hass.async_add_executor_job(task)
elif task := getattr(self, "async_execute", None):
await task() # pylint: disable=not-callable
except BaseException as exception: # pylint: disable=broad-except
self.log.error("Task %s failed: %s", self.slug, exception)
else:
self.log.debug(
"Task %s took " "%.2f seconds to complete",
self.slug,
timer() - start_time,
)
|
import unittest
import json
from ... import create_app
from ...api.v1.model.sales import Sales
class TestInvalidData(unittest.TestCase):
def setUp(self):
self.test = create_app('testing').test_client()
self.content_type = 'application/json'
payload = {'password': 'admin', 'email_address': 'admin@gmail.com'}
response = self.test.post('api/v1/auth/login',content_type=self.content_type,
data=json.dumps(payload))
data =json.loads(response.get_data().decode('UTF-8'))
token = data['result']
self.headers = {'Authorization':'{}'.format(token)}
def tearDown(self):
self.test = None
self.content_type = None
#test if the user entered a valid json payload
def test_invalid_payload(self):
payload = {'Product_name':'Gucci dress','productId':0,'quantity':10,'xyz':''}
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(payload),headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(response.status_code,406)
self.assertEqual(data,{'result':'Payload is invalid'})
#test if user entered a valid data type
def test_invalid_data_type(self):
payload = {'Product_name': 'Gucci dress', 'productId':0,'quantity':'0', 'price': 100}
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(payload),headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(response.status_code,400)
self.assertEqual(data['message'],'Input payload validation failed')
#test if the quantity is less than 1
def test_min_quantity(self):
payload = {'Product_name': 'Gucci dress', 'productId':0,'quantity':-5, 'price': 100}
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(payload),headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(response.status_code,406)
self.assertEqual(data,{'result':'Product quantity cannot be less than 1'})
#test if the product ID is less than 0
def test_minimum_id(self):
payload = {'Product_name': 'Gucci dress', 'productId':-2,'quantity':90, 'price': 100}
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(payload),headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(response.status_code,406)
self.assertEqual(data, {'result':'productId cannot be less than 0'})
class TestValidData(unittest.TestCase):
def setUp(self):
self.test = create_app('testing').test_client()
self.content_type = 'application/json'
payload = {'role': 'admin', 'password': 'admin', 'email': 'admin@gmail.com'}
response = self.test.post('api/v1/auth/login',content_type=self.content_type,
data=json.dumps(payload))
data =json.loads(response.get_data().decode('UTF-8'))
token = data['result']
self.headers = {'Authorization':'{}'.format(token)}
self.product = {'name': 'Gucci dress', 'quantity': 21, 'category': 'dresses','moq':0,'price':100}
self.test.post('api/v1/products/',content_type=self.content_type,
data=json.dumps(self.product),headers=self.headers)
self.payload = {'quantity':10,'productId':0}
def tearDown(self):
self.test = None
self.content_type = None
self.product = None
self.payload = None
def get_one_sales(self):
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(self.payload),headers=self.headers)
self.assertEqual(response.status_code,200)
response = self.test.get('api/v1/sales/{}'.format(0),content_type=self.content_type)
self.assertEqual(response.status_code,200)
def test_add_sales(self):
response = self.test.post('/sales/',content_type=self.content_type,
data=json.dumps(self.payload),headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(data,{'result': 'sales added'})
self.assertEqual(response.status_code,201)
def test_get_all_sales(self):
response = self.test.post('api/v1/sales/',content_type=self.content_type,
data=json.dumps(self.payload),headers=self.headers)
self.assertEqual(response.status_code,201)
response = self.test.get('api/v1/sales/',content_type=self.content_type
,headers=self.headers)
data = json.loads(response.get_data().decode('UTF-8'))
self.assertEqual(response.status_code,200)
if __name__ == '__main__':
unittest.main()
|
from django.utils.formats import localize
from rest_framework.serializers import (
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError,
)
from rest_framework import serializers
from django.contrib.auth import get_user_model
from ...sale.models import (
Terminal,
)
from ...allocate.models import Allocate, AllocatedItem
from ...product.models import (
ProductVariant,
Stock,
)
from decimal import Decimal
from structlog import get_logger
logger = get_logger(__name__)
User = get_user_model()
class TrackSerializer(serializers.ModelSerializer):
class Meta:
model = AllocatedItem
fields = (
'id',
'order',
'sku',
'stock_id',
'allocated_quantity',
'quantity',
'sold',
'unsold',
'unit_cost',
'total_cost',
'product_name',
'product_category',
'tax',
'discount',
)
class ItemsSerializer(serializers.ModelSerializer):
available_stock = SerializerMethodField()
item_pk = SerializerMethodField()
class Meta:
model = AllocatedItem
fields = (
'id',
'order',
'sku',
'stock_id',
'quantity',
'sold',
'unsold',
'allocated_quantity',
'unit_cost',
'total_cost',
'product_name',
'product_category',
'available_stock',
'item_pk',
'tax',
'discount',
)
def get_item_pk(self,obj):
return obj.pk
def get_available_stock(self,obj):
try:
stock = ProductVariant.objects.get(sku=obj.sku)
return stock.get_stock_quantity()
except:
return 0
class CarAllocateListSerializer(serializers.ModelSerializer):
update_url = HyperlinkedIdentityField(view_name='allocate-api:update-allocate')
car_url = HyperlinkedIdentityField(view_name='dashboard:car_transfer_list')
allocated_items = ItemsSerializer(many=True)
allocate_status = SerializerMethodField()
cashier = SerializerMethodField()
car_name = SerializerMethodField()
date = SerializerMethodField()
total_allocated = SerializerMethodField()
total_sold = SerializerMethodField()
car_total_net = SerializerMethodField()
agent_name = SerializerMethodField()
class Meta:
model = Allocate
fields = (
'id',
'user',
'invoice_number',
'total_net',
'sub_total',
'update_url',
'car_url',
'balance',
'terminal',
'amount_paid',
'agent_name',
'car_name',
'mobile',
'customer_name',
'cashier',
'allocate_status',
'total_tax',
'discount_amount',
'due_date',
'date',
'allocated_items',
'total_allocated',
'total_sold',
'car_total_net'
)
def get_car_total_net(self, obj):
return "{:,}".format(Allocate.objects.car_total_net(obj))
def get_total_allocated(self, obj):
return "{:,}".format(Allocate.objects.total_allocated(obj,self.context['date']))
def get_total_sold(self, obj):
return "{:,}".format(Allocate.objects.total_sold(obj))
def get_date(self, obj):
return localize(obj.created)
def get_agent_name(self, obj):
return obj.agent.name
def get_car_name(self, obj):
try:
return str(obj.car.name)+' ('+str(obj.car.number)+')'
except Exception as e:
return ''
def get_allocate_status(self, obj):
if obj.status == 'payment-pending':
return '<span class="badge badge-flat border-warning text-warning-600" > Pending..</span>'
return '<span class ="text-success icon-checkmark-circle" > <i> </i> </span>'
def get_cashier(self, obj):
name = User.objects.get(pk=obj.user.id)
return name.name
class AllocateListSerializer(serializers.ModelSerializer):
update_url = HyperlinkedIdentityField(view_name='allocate-api:update-allocate')
car_allocate_url = HyperlinkedIdentityField(view_name='dashboard:car-allocate-detail')
allocated_items = ItemsSerializer(many=True)
cashier = SerializerMethodField()
total_allocated = SerializerMethodField()
total_sold = SerializerMethodField()
agent_name = SerializerMethodField()
allocate_status = SerializerMethodField()
date = SerializerMethodField()
class Meta:
model = Allocate
fields = (
'id',
'user',
'invoice_number',
'total_net',
'sub_total',
'update_url',
'car_allocate_url',
'balance',
'terminal',
'amount_paid',
'agent',
'agent_name',
'car',
'mobile',
'customer_name',
'cashier',
'status',
'total_tax',
'discount_amount',
'due_date',
'debt',
'total_allocated',
'total_sold',
'allocate_status',
'date',
'allocated_items'
)
def get_date(self, obj):
return localize(obj.created)
def get_allocate_status(self, obj):
if obj.status == 'payment-pending':
return '<span class="badge badge-flat border-warning text-warning-600" > Pending..</span>'
return '<span class ="text-success icon-checkmark-circle" > <i> </i> </span>'
def get_cashier(self, obj):
name = User.objects.get(pk=obj.user.id)
return name.name
def get_total_allocated(self, obj):
return obj.total_allocated()
def get_total_sold(self, obj):
return obj.sold_items()
def get_agent_name(self, obj):
return obj.agent.name
class CreateAllocateSerializer(serializers.ModelSerializer):
update_url = HyperlinkedIdentityField(view_name='allocate-api:update-allocate')
allocated_items = TrackSerializer(many=True)
class Meta:
model = Allocate
fields = (
'id',
'user',
'invoice_number',
'total_net',
'sub_total',
'update_url',
'balance',
'terminal',
'amount_paid',
'agent',
'car',
'mobile',
'customer_name',
'status',
'total_tax',
'allocated_items',
)
def validate_total_net(self,value):
data = self.get_initial()
try:
self.total_net = Decimal(data.get('total_net'))
except:
raise ValidationError('Total Net should be a decimal/integer')
return value
def validate_terminal(self,value):
data = self.get_initial()
self.terminal_id = int(data.get('terminal'))
self.l=[]
terminals = Terminal.objects.all()
for term in terminals:
self.l.append(term.pk)
if not self.terminal_id in self.l:
raise ValidationError('Terminal specified does not exist')
return value
def create(self, validated_data):
try:
total_net = Decimal(validated_data.get('total_net'))
except:
total_net = Decimal(0)
solditems_data = validated_data.pop('allocated_items')
credit = Allocate.objects.create(
user=validated_data.get('user'),
invoice_number=validated_data.get('invoice_number'),
total_net=validated_data.get('total_net'),
sub_total=validated_data.get('sub_total'),
balance=validated_data.get('balance'),
terminal=validated_data.get('terminal'),
amount_paid=validated_data.get('amount_paid'),
agent=validated_data.get('agent'),
car=validated_data.get('car'),
status='payment-pending',
mobile=validated_data.get('mobile'),
debt=validated_data.get('total_net'),
customer_name=validated_data.get('customer_name'))
for solditem_data in solditems_data:
item_temp = AllocatedItem.objects.create(allocate=credit, **solditem_data)
item = item_temp
item_temp.delete()
carry = int(solditem_data['allocated_quantity'])
checker = True
while checker:
stock = Stock.objects.filter(variant__sku=solditem_data['sku']).first()
if stock:
item.id = None
if stock.quantity > 0:
if carry >= stock.quantity:
try:
item.unit_purchase = stock.cost_price.gross
except:
pass
try:
item.total_purchase = stock.cost_price.gross * Decimal(stock.quantity)
except:
pass
item.stock_id = stock.pk
item.allocated_quantity = stock.quantity
item.minimum_price = stock.minimum_price.gross
item.wholesale_override = stock.wholesale_override.gross
item.low_stock_threshold = stock.low_stock_threshold
item.unit_cost = stock.price_override.gross
item.total_cost = stock.price_override.gross * stock.quantity
item.save()
carry -= stock.quantity
stock.delete()
if carry <= 0:
checker = False
else:
# Stock.objects.decrease_stock(stock, carry)
stock.quantity -= carry
stock.save()
try:
item.unit_purchase = stock.cost_price.gross
except:
pass
try:
item.total_purchase = stock.cost_price.gross * Decimal(carry)
except:
pass
item.stock_id = stock.pk
item.allocated_quantity = carry
item.minimum_price = stock.minimum_price.gross
item.wholesale_override = stock.wholesale_override.gross
item.low_stock_threshold = stock.low_stock_threshold
item.unit_cost = stock.price_override.gross
item.total_cost = stock.price_override.gross * carry
item.save()
checker = False
else:
stock.delete()
checker = False
else:
print('stock not found')
checker = False
# try:
# stock = Stock.objects.get(variant__sku=solditem_data['sku'])
# if stock:
# Stock.objects.decrease_stock(stock,solditem_data['allocated_quantity'])
# print stock.quantity
# else:
# print('stock not found')
# except Exception as e:
# logger.error(e)
return credit
class AllocateUpdateSerializer(serializers.ModelSerializer):
allocated_items = TrackSerializer(many=True)
class Meta:
model = Allocate
fields = (
'id',
'invoice_number',
'total_net',
'sub_total',
'balance',
'terminal',
'amount_paid',
'mobile',
'customer_name',
'status',
'total_tax',
'discount_amount',
'debt',
'allocated_items',
)
def validate_total_net(self, value):
data = self.get_initial()
try:
total_net = Decimal(data.get('total_net'))
except:
raise ValidationError('Total Net should be a decimal/integer')
def validate_debt(self, value):
data = self.get_initial()
try:
debt = Decimal(data.get('debt'))
except:
raise ValidationError('Debt should be a decimal/integer')
return value
def validate_amout_paid(self,value):
data = self.get_initial()
try:
amount_paid = Decimal(data.get('amount_paid'))
except:
raise ValidationError('Amount paid should be a decimal/integer')
return value
def update(self, instance, validated_data):
terminal = Terminal.objects.get(pk=instance.terminal_id)
for x in validated_data.get('allocated_items'):
# get old stock
old = instance.item_detail(x['stock_id'])
old.sold += x['quantity']
old.quantity = x['quantity']
old.total_cost = x['total_cost']
unsold = old.allocated_quantity - old.sold
old.unsold = unsold
# handle return unsold product to stock
if validated_data.get('status', instance.status) == 'fully-paid':
try:
stock = Stock.objects.get(pk=x['stock_id'])
stock.quantity += unsold
stock.save()
except Exception as e:
if unsold > 0:
variant = ProductVariant.objects.get(sku=x['sku'])
Stock.objects.create(
variant=variant,
price_override=old.unit_cost,
wholesale_override=old.wholesale_override,
minimum_price=old.minimum_price,
low_stock_threshold=old.low_stock_threshold,
cost_price=old.unit_purchase,
quantity=unsold)
print('stock not found. Recreated new stock')
old.save()
terminal.amount += Decimal(validated_data.get('amount_paid', instance.amount_paid))
terminal.save()
instance.debt = instance.debt-validated_data.get('amount_paid', instance.amount_paid)
instance.total_sale += validated_data.get('amount_paid', instance.amount_paid)
instance.amount_paid = validated_data.get('amount_paid', instance.amount_paid)
instance.status = validated_data.get('status', instance.status)
instance.mobile = validated_data.get('mobile', instance.mobile)
instance.customer_name = validated_data.get('customer_name', instance.customer_name)
instance.save()
return instance
|
from glob import glob
import pickle as pkl
import copy
import math
import os
import random
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from config import cfg
if __name__ == '__main__':
preds_list = []
all_paths = list(glob(cfg.cache + '*.pkl'))
for path in all_paths:
if path.endswith('ensemble.pkl'):
continue
print('Loading cache from %s' % path)
with open(path, 'rb') as f:
cache = pkl.load(f)
preds_list.append(cache['pred_entries'])
res = copy.deepcopy(preds_list[0])
for entry_id, res_entry in tqdm(enumerate(res)):
res_entry['rel_scores'] = []
for i in range(len(preds_list[0][entry_id]['rel_scores'])):
# pick by voting
pred2scores_row = defaultdict(list)
for j in range(len(preds_list)):
rel_scores_row = preds_list[j][entry_id]['rel_scores'][i]
assert rel_scores_row.shape == (51,)
pred = rel_scores_row.argmax()
pred2scores_row[pred].append(rel_scores_row)
max_votes = 0
final_scores_row = None
for k, v in pred2scores_row.items():
votes = len(v)
if votes > max_votes:
max_votes = votes
final_scores_row = random.choice(v)
res_entry['rel_scores'].append(final_scores_row)
res_entry['rel_scores'] = np.stack(res_entry['rel_scores'])
# Sort by scores
obj_scores = res_entry['obj_scores']
rel_inds = res_entry['pred_rel_inds']
pred_scores = res_entry['rel_scores']
obj_scores0 = obj_scores[rel_inds[:, 0]]
obj_scores1 = obj_scores[rel_inds[:, 1]]
pred_scores_max = pred_scores[:, 1:].max(1)
rel_scores_argmaxed = pred_scores_max * obj_scores0 * obj_scores1
rel_scores_vs = np.sort(rel_scores_argmaxed)[::-1]
rel_scores_idx = np.argsort(rel_scores_argmaxed)[::-1]
res_entry['pred_rel_inds'] = rel_inds[rel_scores_idx]
res_entry['rel_scores'] = pred_scores[rel_scores_idx]
assert res_entry['rel_scores'].shape == preds_list[0][entry_id]['rel_scores'].shape
out_path = cfg.cache + 'ensemble.pkl'
with open(out_path, 'wb') as f:
pkl.dump(res, f)
print('Results saved at %s' % out_path)
|
from uuid import UUID
from jose import jwt
from pydantic import BaseModel
from settings import Config
class TokenData(BaseModel):
player_uuid: UUID
game_uuid: UUID
def decode_token(token: str) -> TokenData:
config = Config()
return TokenData(
**jwt.decode(
token, config.jwt_token_secret_key, algorithms=config.jwt_token_algorithm
)
)
|
import pkgutil
import os
import sys
from collections import defaultdict
from multiprocessing import Pool
def tree():
"""Tree data structure.
See https://gist.github.com/hrldcpr/2012250
"""
return defaultdict(tree)
def get_datapath(package, resource):
"""Rewrite of pkgutil.get_data() that just returns the file path.
Taken from https://stackoverflow.com/a/13773912"""
loader = pkgutil.get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return resource_name
def multithreading_map(func, iterable, threads=1, pool=None):
if threads > 1 or pool is not None:
if pool is None:
pool_instance = Pool(threads)
else:
pool_instance = pool
try:
result = pool_instance.map(func, iterable)
except:
pool_instance.close()
raise
pool_instance.close()
if pool is None:
pool_instance.join()
else:
result = map(func, iterable)
return result
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-08 00:06
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ChangeRequest',
fields=[
('sys_id', models.UUIDField(primary_key=True, serialize=False)),
('number', models.CharField(help_text='The Change Order number', max_length=32)),
('title', models.CharField(help_text='Title of the ServiceNow Change Request', max_length=160)),
('description', models.TextField(help_text='Description of the ServiceNow Change Request', validators=[django.core.validators.MaxLengthValidator(4000)])),
('assignment_group_guid', models.UUIDField()),
('state', models.CharField(choices=[('1', 'Open'), ('2', 'In Progress'), ('3', 'Complete'), ('4', 'Complete With Errors')], help_text='The current state the the change order is in.', max_length=1)),
],
options={
'verbose_name': 'service-now change request',
'verbose_name_plural': 'service-now change requests',
},
),
]
|
ThoughtPrefix = '.'
def isThought(message):
return message and message.startswith(ThoughtPrefix)
def removeThoughtPrefix(message):
if isThought(message):
return message[len(ThoughtPrefix):]
else:
return message
def findAvatarName(id):
info = base.cr.identifyAvatar(id)
return info.getName() if info else ''
|
from rest_framework.generics import CreateAPIView
from rest_framework.renderers import TemplateHTMLRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny
from djvue.views import FileUploadView
from .serializers import LoginSerializer, ProfileSerializer, PDFUploadSerializer
class PDFUploadView(FileUploadView):
permission_classes = (AllowAny,)
serializer_class = PDFUploadSerializer
class LoginView(CreateAPIView):
renderer_classes = [TemplateHTMLRenderer, JSONRenderer]
serializer_class = LoginSerializer
# permission_classes = (IsAuthenticated,)
permission_classes = (AllowAny,)
template_name = "login.html"
def get(self, request, *args, **kwargs):
"""
Used only to serve the serializer definition
"""
serializer = self.get_serializer()
data = {"serializer": serializer}
return Response(data)
class ProfileView(CreateAPIView):
renderer_classes = [TemplateHTMLRenderer, JSONRenderer]
serializer_class = ProfileSerializer
permission_classes = (AllowAny,)
template_name = "profile.html"
def get(self, request, *args, **kwargs):
"""
Used only to serve the serializer definition
"""
serializer = self.get_serializer()
data = {"serializer": serializer}
return Response(data)
|
# coding: utf-8
r"""
>>> from django.contrib.comments.models import Comment
>>> from django.contrib.auth.models import User
>>> u = User.objects.create_user('commenttestuser', 'commenttest@example.com', 'testpw')
>>> c = Comment(user=u, comment=u'\xe2')
>>> c
<Comment: commenttestuser: â...>
>>> print c
commenttestuser: â...
"""
|
from config import palette
from colorthief import ColorThief
def runPalette(name, root):
"""
Prints the most used colors (in plural) of an image.
Args:
name (str): File name
root (str): Superior path to file; the folder in which the file
is stored
"""
if palette["enable"]:
print(
f"""\nImage Palette: {
ColorThief(f'{root}/{name}').get_palette(
color_count=palette['colorCount']
)
}"""
)
|
from __future__ import absolute_import
from optparse import make_option
import sys
from behave.configuration import options as behave_options
from behave.__main__ import main as behave_main
from django.core.management.base import BaseCommand
from behave_django.environment import monkey_patch_behave
from behave_django.parser import PassThroughOptionParser
from behave_django.runner import (BehaviorDrivenTestRunner,
ExistingDatabaseTestRunner)
def get_command_options():
return (
make_option(
'--use-existing-database',
action='store_true',
default=False,
help="Don't create a test database. USE AT YOUR OWN RISK!",
),
make_option(
'--keepdb',
action='store_true',
default=False,
help="Preserves the test DB between runs.",
),
)
def get_behave_options():
"""Creates options for the behave management command based on behave"""
new_options = []
conflicts = [
'--no-color',
'--version'
]
for fixed, keywords in behave_options:
# TODO: accept short options too
keywords = keywords.copy()
long_option = None
for option in fixed:
if option.startswith("--"):
long_option = option
break
# Do not add conflicting options
if long_option in conflicts:
continue
if long_option:
# type isn't a valid keyword for make_option
if hasattr(keywords.get('type'), '__call__'):
del keywords['type']
# config_help isn't a valid keyword for make_option
if 'config_help' in keywords:
del keywords['config_help']
new_options.append(
make_option(long_option, **keywords)
)
return tuple(new_options)
class Command(BaseCommand):
help = 'Runs behave tests'
option_list = BaseCommand.option_list + get_behave_options() + \
get_command_options()
def handle(self, *args, **options):
behave_args = self.get_behave_args()
# Configure django environment
if options['dry_run'] or options['use_existing_database']:
django_test_runner = ExistingDatabaseTestRunner()
else:
django_test_runner = BehaviorDrivenTestRunner()
django_test_runner.setup_test_environment()
if options['keepdb']:
django_test_runner.keepdb = True
old_config = django_test_runner.setup_databases()
# Run Behave tests
monkey_patch_behave(django_test_runner)
exit_status = behave_main(args=behave_args)
# Teardown django environment
django_test_runner.teardown_databases(old_config)
django_test_runner.teardown_test_environment()
if exit_status != 0:
sys.exit(exit_status)
def get_behave_args(self, argv=sys.argv):
"""Remove command line arguments not accepted by behave."""
# Override option_list to remove the behave arguments
orig_option_list = self.option_list
self.option_list = BaseCommand.option_list + get_command_options()
# Get an OptionParser from django with the new option_list
django_parser = self.create_parser('manage.py', 'behave')
# Put back the original option_list to minimize side effets
self.option_list = orig_option_list
# Load the option_list generated by django into our custom parser
parser = PassThroughOptionParser()
for option in django_parser.option_list:
if '--help' not in option._long_opts:
parser.add_option(option)
# Our custom parser returns the unrecognized args instead of exploding
options, args = parser.parse_args(argv[2:])
# The unrecognized args are for behave :)
return args
|
from contextlib import contextmanager
import pg8000
from translators import sql_translator
from translators.sql_translator import NGSI_ISO8601, NGSI_DATETIME, \
NGSI_GEOJSON, NGSI_TEXT, NGSI_STRUCTURED_VALUE, TIME_INDEX, \
METADATA_TABLE_NAME, FIWARE_SERVICEPATH, TENANT_PREFIX
import geocoding.geojson.wktcodec
from geocoding.slf.geotypes import *
import geocoding.slf.wktcodec
from utils.cfgreader import *
# POSTGRES TYPES
PG_JSON_ARRAY = 'jsonb'
# Translation
NGSI_TO_SQL = {
"Array": PG_JSON_ARRAY, # NB array of str in Crate backend!
"Boolean": 'boolean',
NGSI_ISO8601: 'timestamp WITH TIME ZONE',
NGSI_DATETIME: 'timestamp WITH TIME ZONE',
"Integer": 'bigint',
NGSI_GEOJSON: 'geometry',
SlfPoint.ngsi_type(): 'geometry',
SlfLine.ngsi_type(): 'geometry',
SlfPolygon.ngsi_type(): 'geometry',
SlfBox.ngsi_type(): 'geometry',
"Number": 'float',
NGSI_TEXT: 'text',
NGSI_STRUCTURED_VALUE: 'jsonb',
# hyper-table requires a non-null time index
TIME_INDEX: 'timestamp WITH TIME ZONE NOT NULL'
}
class PostgresConnectionData:
def __init__(self, host='0.0.0.0', port=5432, use_ssl=False,
db_name='quantumleap',
db_user='quantumleap', db_pass='*'):
self.host = host
self.port = port
self.use_ssl = use_ssl
self.db_name = db_name
self.db_user = db_user
self.db_pass = db_pass
def read_env(self, env: dict = os.environ):
r = EnvReader(env, log=logging.getLogger(__name__).info)
self.host = r.read(StrVar('POSTGRES_HOST', self.host))
self.port = r.read(IntVar('POSTGRES_PORT', self.port))
self.use_ssl = r.read(BoolVar('POSTGRES_USE_SSL', self.use_ssl))
self.db_name = r.read(StrVar('POSTGRES_DB_NAME', self.db_name))
self.db_user = r.read(StrVar('POSTGRES_DB_USER', self.db_user))
self.db_pass = r.read(StrVar('POSTGRES_DB_PASS', self.db_pass,
mask_value=True))
class PostgresTranslator(sql_translator.SQLTranslator):
NGSI_TO_SQL = NGSI_TO_SQL
def __init__(self, conn_data=PostgresConnectionData()):
super(PostgresTranslator, self).__init__(
conn_data.host, conn_data.port, conn_data.db_name)
self.logger = logging.getLogger(__name__)
self.db_user = conn_data.db_user
self.db_pass = conn_data.db_pass
self.ssl = {} if conn_data.use_ssl else None
self.conn = None
self.cursor = None
def setup(self):
pg8000.paramstyle = "qmark"
self.conn = pg8000.connect(host=self.host, port=self.port, ssl_context=self.ssl,
database=self.db_name,
user=self.db_user, password=self.db_pass)
self.conn.autocommit = True
self.cursor = self.conn.cursor()
def dispose(self):
self.cursor.close()
self.conn.close()
@staticmethod
def _svc_to_schema_name(fiware_service):
if fiware_service:
return '"{}{}"'.format(TENANT_PREFIX, fiware_service.lower())
def _compute_type(self, attr_t, attr):
return NGSI_TO_SQL[attr_t]
def _prepare_data_table(self, table_name, table, fiware_service):
schema = self._svc_to_schema_name(fiware_service)
if schema:
stmt = "create schema if not exists {}".format(schema)
self.cursor.execute(stmt)
# NOTE. Postgres identifiers (like column and table names) become case
# sensitive when quoted like we do below in the CREATE TABLE statement.
columns = ', '.join('"{}" {}'.format(cn.lower(), ct)
for cn, ct in table.items())
stmt = "create table if not exists {} ({})".format(table_name, columns)
self.cursor.execute(stmt)
stmt = "select create_hypertable('{}', '{}', if_not_exists => true)" \
.format(table_name, self.TIME_INDEX_NAME)
self.cursor.execute(stmt)
alt_cols = ', '.join('add column if not exists "{}" {}'
.format(cn.lower(), ct)
for cn, ct in table.items())
stmt = "alter table {} {};".format(table_name, alt_cols)
self.cursor.execute(stmt)
ix_name = '"ix_{}_eid_and_tx"'.format(table_name.replace('"', ''))
stmt = f"create index if not exists {ix_name} " +\
f"on {table_name} (entity_id, {self.TIME_INDEX_NAME} desc)"
self.cursor.execute(stmt)
def _preprocess_values(self, e, table, col_names, fiware_servicepath):
values = []
for cn in col_names:
if cn == 'entity_type':
values.append(e['type'])
elif cn == 'entity_id':
values.append(e['id'])
elif cn == self.TIME_INDEX_NAME:
values.append(e[self.TIME_INDEX_NAME])
elif cn == FIWARE_SERVICEPATH:
values.append(fiware_servicepath or '')
else:
# Normal attributes
try:
mapped_type = table[cn]
ngsi_value = e[cn]['value']
if SlfGeometry.is_ngsi_slf_attr(e[cn]):
ast = SlfGeometry.build_from_ngsi_dict(e[cn])
mapped_value = geocoding.slf.wktcodec.encode_as_wkt(ast)
elif mapped_type == NGSI_TO_SQL[NGSI_GEOJSON]:
mapped_value = geocoding.geojson.wktcodec.encode_as_wkt(
ngsi_value)
elif mapped_type == NGSI_TO_SQL[NGSI_STRUCTURED_VALUE]:
mapped_value = pg8000.PGJsonb(ngsi_value)
elif mapped_type == NGSI_TO_SQL[NGSI_TEXT]:
mapped_value = str(ngsi_value)
elif mapped_type == PG_JSON_ARRAY:
mapped_value = pg8000.PGJsonb(ngsi_value)
else:
mapped_value = ngsi_value
values.append(mapped_value)
except KeyError:
# this entity update does not have a value for the column
# so use None which will be inserted as NULL to the db.
values.append(None)
return values
@staticmethod
def _to_db_ngsi_structured_value(data: dict) -> pg8000.PGJsonb:
return pg8000.PGJsonb(data)
def _should_insert_original_entities(self, insert_error: Exception) -> bool:
return isinstance(insert_error, pg8000.ProgrammingError)
def _create_metadata_table(self):
stmt = "create table if not exists {} " \
"(table_name text primary key, entity_attrs jsonb)"
op = stmt.format(METADATA_TABLE_NAME)
self.cursor.execute(op)
def _store_medatata(self, table_name, persisted_metadata):
stmt = "insert into {} (table_name, entity_attrs) values (?, ?) " \
"on conflict (table_name) " \
"do update set entity_attrs = ?"
stmt = stmt.format(METADATA_TABLE_NAME)
entity_attrs_value = pg8000.PGJsonb(persisted_metadata)
self.cursor.execute(stmt, (table_name, entity_attrs_value,
entity_attrs_value))
def _get_geo_clause(self, geo_query):
#TODO implement geo clause
return ""
@contextmanager
def postgres_translator_instance():
conn_data = PostgresConnectionData()
conn_data.read_env()
with PostgresTranslator(conn_data) as trans:
yield trans
|
"""The top level package for the Tetris OpenAI Gym Environment."""
from .tetris_env import TetrisEnv
from ._registration import make
from .wrappers import wrap
# define the outward facing API of this module (none, gym provides the API)
__all__ = [
TetrisEnv.__name__,
make.__name__,
wrap.__name__,
]
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ItemPricing(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'allow_arbitrary_cost': 'bool',
'arbitrary_cost_velocity_code': 'str',
'automatic_pricing_tier_name': 'str',
'automatic_pricing_tier_oid': 'int',
'cogs': 'float',
'cost': 'float',
'currency_code': 'str',
'manufacturer_suggested_retail_price': 'float',
'maximum_arbitrary_cost': 'float',
'minimum_advertised_price': 'float',
'minimum_arbitrary_cost': 'float',
'mix_and_match_group': 'str',
'mix_and_match_group_oid': 'int',
'sale_cost': 'float',
'sale_end': 'str',
'sale_start': 'str',
'tiers': 'list[ItemPricingTier]'
}
attribute_map = {
'allow_arbitrary_cost': 'allow_arbitrary_cost',
'arbitrary_cost_velocity_code': 'arbitrary_cost_velocity_code',
'automatic_pricing_tier_name': 'automatic_pricing_tier_name',
'automatic_pricing_tier_oid': 'automatic_pricing_tier_oid',
'cogs': 'cogs',
'cost': 'cost',
'currency_code': 'currency_code',
'manufacturer_suggested_retail_price': 'manufacturer_suggested_retail_price',
'maximum_arbitrary_cost': 'maximum_arbitrary_cost',
'minimum_advertised_price': 'minimum_advertised_price',
'minimum_arbitrary_cost': 'minimum_arbitrary_cost',
'mix_and_match_group': 'mix_and_match_group',
'mix_and_match_group_oid': 'mix_and_match_group_oid',
'sale_cost': 'sale_cost',
'sale_end': 'sale_end',
'sale_start': 'sale_start',
'tiers': 'tiers'
}
def __init__(self, allow_arbitrary_cost=None, arbitrary_cost_velocity_code=None, automatic_pricing_tier_name=None, automatic_pricing_tier_oid=None, cogs=None, cost=None, currency_code=None, manufacturer_suggested_retail_price=None, maximum_arbitrary_cost=None, minimum_advertised_price=None, minimum_arbitrary_cost=None, mix_and_match_group=None, mix_and_match_group_oid=None, sale_cost=None, sale_end=None, sale_start=None, tiers=None):
"""
ItemPricing - a model defined in Swagger
"""
self._allow_arbitrary_cost = None
self._arbitrary_cost_velocity_code = None
self._automatic_pricing_tier_name = None
self._automatic_pricing_tier_oid = None
self._cogs = None
self._cost = None
self._currency_code = None
self._manufacturer_suggested_retail_price = None
self._maximum_arbitrary_cost = None
self._minimum_advertised_price = None
self._minimum_arbitrary_cost = None
self._mix_and_match_group = None
self._mix_and_match_group_oid = None
self._sale_cost = None
self._sale_end = None
self._sale_start = None
self._tiers = None
self.discriminator = None
if allow_arbitrary_cost is not None:
self.allow_arbitrary_cost = allow_arbitrary_cost
if arbitrary_cost_velocity_code is not None:
self.arbitrary_cost_velocity_code = arbitrary_cost_velocity_code
if automatic_pricing_tier_name is not None:
self.automatic_pricing_tier_name = automatic_pricing_tier_name
if automatic_pricing_tier_oid is not None:
self.automatic_pricing_tier_oid = automatic_pricing_tier_oid
if cogs is not None:
self.cogs = cogs
if cost is not None:
self.cost = cost
if currency_code is not None:
self.currency_code = currency_code
if manufacturer_suggested_retail_price is not None:
self.manufacturer_suggested_retail_price = manufacturer_suggested_retail_price
if maximum_arbitrary_cost is not None:
self.maximum_arbitrary_cost = maximum_arbitrary_cost
if minimum_advertised_price is not None:
self.minimum_advertised_price = minimum_advertised_price
if minimum_arbitrary_cost is not None:
self.minimum_arbitrary_cost = minimum_arbitrary_cost
if mix_and_match_group is not None:
self.mix_and_match_group = mix_and_match_group
if mix_and_match_group_oid is not None:
self.mix_and_match_group_oid = mix_and_match_group_oid
if sale_cost is not None:
self.sale_cost = sale_cost
if sale_end is not None:
self.sale_end = sale_end
if sale_start is not None:
self.sale_start = sale_start
if tiers is not None:
self.tiers = tiers
@property
def allow_arbitrary_cost(self):
"""
Gets the allow_arbitrary_cost of this ItemPricing.
Allow arbitrary cost
:return: The allow_arbitrary_cost of this ItemPricing.
:rtype: bool
"""
return self._allow_arbitrary_cost
@allow_arbitrary_cost.setter
def allow_arbitrary_cost(self, allow_arbitrary_cost):
"""
Sets the allow_arbitrary_cost of this ItemPricing.
Allow arbitrary cost
:param allow_arbitrary_cost: The allow_arbitrary_cost of this ItemPricing.
:type: bool
"""
self._allow_arbitrary_cost = allow_arbitrary_cost
@property
def arbitrary_cost_velocity_code(self):
"""
Gets the arbitrary_cost_velocity_code of this ItemPricing.
Arbitrary cost velocity code
:return: The arbitrary_cost_velocity_code of this ItemPricing.
:rtype: str
"""
return self._arbitrary_cost_velocity_code
@arbitrary_cost_velocity_code.setter
def arbitrary_cost_velocity_code(self, arbitrary_cost_velocity_code):
"""
Sets the arbitrary_cost_velocity_code of this ItemPricing.
Arbitrary cost velocity code
:param arbitrary_cost_velocity_code: The arbitrary_cost_velocity_code of this ItemPricing.
:type: str
"""
if arbitrary_cost_velocity_code is not None and len(arbitrary_cost_velocity_code) > 10000:
raise ValueError("Invalid value for `arbitrary_cost_velocity_code`, length must be less than or equal to `10000`")
self._arbitrary_cost_velocity_code = arbitrary_cost_velocity_code
@property
def automatic_pricing_tier_name(self):
"""
Gets the automatic_pricing_tier_name of this ItemPricing.
Automatic pricing tier name
:return: The automatic_pricing_tier_name of this ItemPricing.
:rtype: str
"""
return self._automatic_pricing_tier_name
@automatic_pricing_tier_name.setter
def automatic_pricing_tier_name(self, automatic_pricing_tier_name):
"""
Sets the automatic_pricing_tier_name of this ItemPricing.
Automatic pricing tier name
:param automatic_pricing_tier_name: The automatic_pricing_tier_name of this ItemPricing.
:type: str
"""
self._automatic_pricing_tier_name = automatic_pricing_tier_name
@property
def automatic_pricing_tier_oid(self):
"""
Gets the automatic_pricing_tier_oid of this ItemPricing.
Automatic pricing tier object identifier
:return: The automatic_pricing_tier_oid of this ItemPricing.
:rtype: int
"""
return self._automatic_pricing_tier_oid
@automatic_pricing_tier_oid.setter
def automatic_pricing_tier_oid(self, automatic_pricing_tier_oid):
"""
Sets the automatic_pricing_tier_oid of this ItemPricing.
Automatic pricing tier object identifier
:param automatic_pricing_tier_oid: The automatic_pricing_tier_oid of this ItemPricing.
:type: int
"""
self._automatic_pricing_tier_oid = automatic_pricing_tier_oid
@property
def cogs(self):
"""
Gets the cogs of this ItemPricing.
Cost of goods sold
:return: The cogs of this ItemPricing.
:rtype: float
"""
return self._cogs
@cogs.setter
def cogs(self, cogs):
"""
Sets the cogs of this ItemPricing.
Cost of goods sold
:param cogs: The cogs of this ItemPricing.
:type: float
"""
self._cogs = cogs
@property
def cost(self):
"""
Gets the cost of this ItemPricing.
Cost
:return: The cost of this ItemPricing.
:rtype: float
"""
return self._cost
@cost.setter
def cost(self, cost):
"""
Sets the cost of this ItemPricing.
Cost
:param cost: The cost of this ItemPricing.
:type: float
"""
self._cost = cost
@property
def currency_code(self):
"""
Gets the currency_code of this ItemPricing.
Currency code
:return: The currency_code of this ItemPricing.
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""
Sets the currency_code of this ItemPricing.
Currency code
:param currency_code: The currency_code of this ItemPricing.
:type: str
"""
if currency_code is not None and len(currency_code) > 3:
raise ValueError("Invalid value for `currency_code`, length must be less than or equal to `3`")
self._currency_code = currency_code
@property
def manufacturer_suggested_retail_price(self):
"""
Gets the manufacturer_suggested_retail_price of this ItemPricing.
Manufacturer suggested retail price
:return: The manufacturer_suggested_retail_price of this ItemPricing.
:rtype: float
"""
return self._manufacturer_suggested_retail_price
@manufacturer_suggested_retail_price.setter
def manufacturer_suggested_retail_price(self, manufacturer_suggested_retail_price):
"""
Sets the manufacturer_suggested_retail_price of this ItemPricing.
Manufacturer suggested retail price
:param manufacturer_suggested_retail_price: The manufacturer_suggested_retail_price of this ItemPricing.
:type: float
"""
self._manufacturer_suggested_retail_price = manufacturer_suggested_retail_price
@property
def maximum_arbitrary_cost(self):
"""
Gets the maximum_arbitrary_cost of this ItemPricing.
Maximum arbitrary cost
:return: The maximum_arbitrary_cost of this ItemPricing.
:rtype: float
"""
return self._maximum_arbitrary_cost
@maximum_arbitrary_cost.setter
def maximum_arbitrary_cost(self, maximum_arbitrary_cost):
"""
Sets the maximum_arbitrary_cost of this ItemPricing.
Maximum arbitrary cost
:param maximum_arbitrary_cost: The maximum_arbitrary_cost of this ItemPricing.
:type: float
"""
self._maximum_arbitrary_cost = maximum_arbitrary_cost
@property
def minimum_advertised_price(self):
"""
Gets the minimum_advertised_price of this ItemPricing.
Minimum advertised price
:return: The minimum_advertised_price of this ItemPricing.
:rtype: float
"""
return self._minimum_advertised_price
@minimum_advertised_price.setter
def minimum_advertised_price(self, minimum_advertised_price):
"""
Sets the minimum_advertised_price of this ItemPricing.
Minimum advertised price
:param minimum_advertised_price: The minimum_advertised_price of this ItemPricing.
:type: float
"""
self._minimum_advertised_price = minimum_advertised_price
@property
def minimum_arbitrary_cost(self):
"""
Gets the minimum_arbitrary_cost of this ItemPricing.
Minimum arbitrary cost
:return: The minimum_arbitrary_cost of this ItemPricing.
:rtype: float
"""
return self._minimum_arbitrary_cost
@minimum_arbitrary_cost.setter
def minimum_arbitrary_cost(self, minimum_arbitrary_cost):
"""
Sets the minimum_arbitrary_cost of this ItemPricing.
Minimum arbitrary cost
:param minimum_arbitrary_cost: The minimum_arbitrary_cost of this ItemPricing.
:type: float
"""
self._minimum_arbitrary_cost = minimum_arbitrary_cost
@property
def mix_and_match_group(self):
"""
Gets the mix_and_match_group of this ItemPricing.
Mix and match group
:return: The mix_and_match_group of this ItemPricing.
:rtype: str
"""
return self._mix_and_match_group
@mix_and_match_group.setter
def mix_and_match_group(self, mix_and_match_group):
"""
Sets the mix_and_match_group of this ItemPricing.
Mix and match group
:param mix_and_match_group: The mix_and_match_group of this ItemPricing.
:type: str
"""
self._mix_and_match_group = mix_and_match_group
@property
def mix_and_match_group_oid(self):
"""
Gets the mix_and_match_group_oid of this ItemPricing.
Mix and match group object identifier
:return: The mix_and_match_group_oid of this ItemPricing.
:rtype: int
"""
return self._mix_and_match_group_oid
@mix_and_match_group_oid.setter
def mix_and_match_group_oid(self, mix_and_match_group_oid):
"""
Sets the mix_and_match_group_oid of this ItemPricing.
Mix and match group object identifier
:param mix_and_match_group_oid: The mix_and_match_group_oid of this ItemPricing.
:type: int
"""
self._mix_and_match_group_oid = mix_and_match_group_oid
@property
def sale_cost(self):
"""
Gets the sale_cost of this ItemPricing.
Sale cost
:return: The sale_cost of this ItemPricing.
:rtype: float
"""
return self._sale_cost
@sale_cost.setter
def sale_cost(self, sale_cost):
"""
Sets the sale_cost of this ItemPricing.
Sale cost
:param sale_cost: The sale_cost of this ItemPricing.
:type: float
"""
self._sale_cost = sale_cost
@property
def sale_end(self):
"""
Gets the sale_end of this ItemPricing.
Sale end
:return: The sale_end of this ItemPricing.
:rtype: str
"""
return self._sale_end
@sale_end.setter
def sale_end(self, sale_end):
"""
Sets the sale_end of this ItemPricing.
Sale end
:param sale_end: The sale_end of this ItemPricing.
:type: str
"""
self._sale_end = sale_end
@property
def sale_start(self):
"""
Gets the sale_start of this ItemPricing.
Sale start
:return: The sale_start of this ItemPricing.
:rtype: str
"""
return self._sale_start
@sale_start.setter
def sale_start(self, sale_start):
"""
Sets the sale_start of this ItemPricing.
Sale start
:param sale_start: The sale_start of this ItemPricing.
:type: str
"""
self._sale_start = sale_start
@property
def tiers(self):
"""
Gets the tiers of this ItemPricing.
Tiers
:return: The tiers of this ItemPricing.
:rtype: list[ItemPricingTier]
"""
return self._tiers
@tiers.setter
def tiers(self, tiers):
"""
Sets the tiers of this ItemPricing.
Tiers
:param tiers: The tiers of this ItemPricing.
:type: list[ItemPricingTier]
"""
self._tiers = tiers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ItemPricing):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import yaml
from vitrage.common.constants import TemplateStatus
from vitrage.common.constants import TemplateTypes as TType
from vitrage.evaluator.template_db.template_repository import \
add_templates_to_db
from vitrage import storage
from vitrage.storage.sqlalchemy import models
class TestConfiguration(object):
def add_db(self):
db_name = "sqlite:///test-%s-%s.db" % (type(self).__name__,
sys.version_info[0])
self.config(group='database', connection=db_name)
self._db = storage.get_connection_from_config()
engine = self._db._engine_facade.get_engine()
models.Base.metadata.drop_all(engine)
models.Base.metadata.create_all(engine)
return self._db
def add_templates(self, templates_dir, templates_type=TType.STANDARD):
yamls = [t for t in TestConfiguration.load_yaml_files(templates_dir)]
templates = add_templates_to_db(self._db, yamls, templates_type)
for t in templates:
if t.status == TemplateStatus.LOADING:
self._db.templates.update(t.uuid, 'status',
TemplateStatus.ACTIVE)
if t.status == TemplateStatus.DELETING:
self._db.templates.update(t.uuid, 'status',
TemplateStatus.DELETED)
return templates
@staticmethod
def load_yaml_files(path):
if os.path.isdir(path):
file_paths = [path + "/" + fn for fn in os.listdir(path)
if os.path.isfile(path + "/" + fn)]
else:
file_paths = [path]
yamls = []
for file_path in file_paths:
try:
yamls.append(TestConfiguration._load_yaml_file(file_path))
except Exception:
continue
return yamls
@staticmethod
def _load_yaml_file(path):
with open(path, 'r') as stream:
return yaml.load(stream, Loader=yaml.BaseLoader)
|
#
# import tree: please see __init.py__
#
from .stdlib import *
from ..meta.decorator import *
from ..meta.listify import *
from ..meta.meta import *
from ..debug.trace import *
from ..debug.profile import *
from ..debug.debug import *
from ..debug.jupyter import *
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
from epimargin.estimators import analytical_MPVS
from epimargin.etl.commons import download_data
from epimargin.etl.covid19india import (get_time_series, load_all_data,
replace_district_names)
from epimargin.etl.devdatalab import district_migration_matrices
from epimargin.model import Model, ModelUnit, gravity_matrix
from epimargin.plots import plot_simulation_range
from epimargin.policy import simulate_adaptive_control, simulate_lockdown
from epimargin.smoothing import convolution
from epimargin.utils import cwd, days, weeks
def estimate(ts, smoothing):
(state_dates, R, *_) = analytical_MPVS(ts.Hospitalized, smoothing = smoothing)
dates = [sd[1] if isinstance(sd, tuple) else sd for sd in state_dates]
return pd.DataFrame({"date": dates, "R": R}).set_index("date")
def get_model(districts, populations, timeseries, seed = 0):
units = [ModelUnit(
name = district,
population = populations[i],
I0 = timeseries.loc[district].iloc[-1]['Hospitalized'] if not timeseries.loc[district].empty and 'Hospitalized' in timeseries.loc[district].iloc[-1] else 0,
R0 = timeseries.loc[district].iloc[-1]['Recovered'] if not timeseries.loc[district].empty and 'Recovered' in timeseries.loc[district].iloc[-1] else 0,
D0 = timeseries.loc[district].iloc[-1]['Deceased'] if not timeseries.loc[district].empty and 'Deceased' in timeseries.loc[district].iloc[-1] else 0,
) for (i, district) in enumerate(districts)]
return Model(units, random_seed = seed)
def run_policies(migrations, district_names, populations, district_time_series, Rm, Rv, gamma, seed, initial_lockdown = 13*days, total_time = 190*days):
# run various policy scenarios
lockdown = np.zeros(migrations.shape)
# 1. release lockdown 31 May
release = get_model(district_names, populations, district_time_series, seed)
simulate_lockdown(release,
lockdown_period = initial_lockdown + 4*weeks,
total_time = total_time,
RR0_mandatory = Rm, RR0_voluntary = Rv,
lockdown = lockdown.copy(), migrations = migrations)
# 3. adaptive release starting 31 may
adaptive = get_model(district_names, populations, district_time_series, seed)
simulate_adaptive_control(adaptive, initial_lockdown, total_time, lockdown, migrations, Rm, {district: R * gamma for (district, R) in Rv.items()}, {district: R * gamma for (district, R) in Rm.items()}, evaluation_period=1*weeks)
return (release, adaptive)
if __name__ == "__main__":
root = cwd()
data = root/"data"
figs = root/"figs"
# model details
gamma = 0.2
prevalence = 1
total_time = 90 * days
release_date = pd.to_datetime("July 28, 2020")
lockdown_period = (release_date - pd.to_datetime("today")).days
smoothing = convolution()
states = ["Maharashtra", "Karnataka", "Andhra Pradesh", "Tamil Nadu", "Madhya Pradesh", "Punjab", "Gujarat", "Kerala"]
# use gravity matrix for states after 2001 census
new_state_data_paths = {
"Telangana": (data/"telangana.json", data/"telangana_pop.csv")
}
# define data versions for api files
paths = { "v3": ["raw_data1.csv", "raw_data2.csv"],
"v4": ["raw_data3.csv", "raw_data4.csv",
"raw_data5.csv", "raw_data6.csv",
"raw_data7.csv", "raw_data8.csv",
"raw_data9.csv", "raw_data10.csv", "raw_data11.csv"] }
# download data from india covid 19 api
for target in paths['v3'] + paths['v4']:
download_data(data, target)
# run rolling regressions on historical national case data
dfn = load_all_data(
v3_paths = [data/filepath for filepath in paths['v3']],
v4_paths = [data/filepath for filepath in paths['v4']]
)
data_recency = str(dfn["date_announced"].max()).split()[0]
tsn = get_time_series(dfn)
grn = estimate(tsn, smoothing)
# disaggregate down to states
tss = get_time_series(dfn, 'detected_state').loc[states]
grs = tss.groupby(level=0).apply(lambda ts: estimate(ts, smoothing))
# voluntary and mandatory reproductive numbers
Rvn = np.mean(grn["2020-03-24":"2020-03-31"].R)
Rmn = np.mean(grn["2020-04-01":].R)
Rvs = {s: np.mean(grs.loc[s].loc["2020-03-24":"2020-03-31"].R) if s in grs.index else Rvn for s in states}
Rms = {s: np.mean(grs.loc[s].loc["2020-04-01":].R) if s in grs.index else Rmn for s in states}
# voluntary and mandatory distancing rates
Bvs = {s: R * gamma for (s, R) in Rvs.items()}
Bms = {s: R * gamma for (s, R) in Rms.items()}
migration_matrices = district_migration_matrices(data/"Migration Matrix - 2011 District.csv", states = states)
# load csv mapping 2011 districts to current district names
district_matches = pd.read_csv(data/"india_district_matches.csv")
# seed range
si, sf = 0, 1000
results = {}
for state in states:
if state in new_state_data_paths.keys():
districts, populations, migrations = gravity_matrix(*new_state_data_paths[state])
else:
districts, populations, migrations = migration_matrices[state]
df_state = dfn[dfn['detected_state'] == state]
# replace covid data district names with 2011 district names
dist_map_state = district_matches[district_matches['state'] == state]
df_state_renamed = replace_district_names(df_state, dist_map_state)
# only keep district names that are present in both migration and api data
districts = list(set(districts).intersection(set(df_state_renamed['detected_district'])))
tsd = get_time_series(df_state_renamed, 'detected_district')
grd = tsd.groupby(level=0).apply(lambda ts: estimate(ts, smoothing))
Rv = {district: np.mean(grd.loc[district].loc["2020-03-24":"2020-03-31"].R) if district in grd.index else Rvs[state] for district in districts}
Rm = {district: np.mean(grd.loc[district].loc["2020-04-01":].R) if district in grd.index else Rms[state] for district in districts}
# fill in missing values
for mapping, default in ((Rv, Rvs[state]), (Rm, Rms[state])):
for key in mapping:
if np.isnan(mapping[key]):
mapping[key] = default
# projections = []
# for district in districts:
# try:
# estimate = grd.loc[district].loc[grd.loc[district].R.last_valid_index()]
# projections.append((district, estimate.R, estimate.R + estimate.gradient*7))
# except KeyError:
# projections.append((district, np.NaN, np.NaN))
# pd.DataFrame(projections, columns = ["district", "R", "Rproj"]).to_csv(data/(state + ".csv"))
simulation_results = [
run_policies(migrations, districts, populations, tsd, Rm, Rv, gamma, seed, initial_lockdown = lockdown_period, total_time = total_time)
for seed in tqdm(range(si, sf))
]
results[state] = simulation_results
plot_simulation_range(simulation_results, ["28 July Release", "Adaptive Controls"], get_time_series(df_state).Hospitalized)\
.title(f"{state} Policy Scenarios: Projected Cases over Time")\
.xlabel("Date")\
.ylabel("Number of new cases")\
.size(11, 8)\
.save(figs/f"oped_{state}90.png")
plt.clf()
|
import os
from dataclasses import dataclass
from pathlib import Path
from re import search
import sys
import logging
from typing import List, MutableMapping, Optional, Tuple
FORMAT = "%(levelname)s: %(message)s"
class DocumentLogAdapter(logging.LoggerAdapter):
def __init__(
self, logger: logging.Logger, document: "Simple.document.Document" # type: ignore
) -> None:
super().__init__(logger, {"document": document})
def process(self, msg: str, kwargs: MutableMapping) -> Tuple[str, MutableMapping]:
from .document import Document
cwd = Path.cwd()
doc: Document = self.extra["document"]
relpath = doc.path.relative_to(cwd)
include_stack = self._get_include_stack()
if len(include_stack) > 0:
incstack_str = "\n\t" + "\n\t".join(
"included from " + str(s.relative_to(cwd)) for s in include_stack
)
else:
incstack_str = ""
extra = {"document": doc, "include-stack": include_stack}
if "pos" in kwargs:
(line, col) = kwargs["pos"]
return f"{relpath}:{line}:{col} {msg}{incstack_str}", {
**kwargs,
"extra": extra,
}
else:
return f"{relpath}: {msg}{incstack_str}", {**kwargs, "extra": extra}
def _get_include_stack(self):
from .document import Document
doc: Document = self.extra["document"]
include_stack: List[Path] = []
while (parent := doc.parent) is not None:
include_stack.append(parent.path)
doc = doc.parent
return include_stack
class ColoredFormatter(logging.Formatter):
COLOR_RED = "\033[1;31m"
COLOR_GREEN = "\033[1;32m"
COLOR_YELLOW = "\033[1;33m"
COLOR_MAGENTA = "\033[1;35m"
COLOR_BLUE = "\033[36m"
COLOR_NEUTRAL = "\033[0m"
COLORS = {
"CRITICAL": COLOR_RED,
"ERROR": COLOR_RED,
"WARNING": COLOR_MAGENTA,
"INFO": COLOR_YELLOW,
"DEBUG": COLOR_BLUE,
}
def __init__(self, msg, use_color=True):
logging.Formatter.__init__(self, msg)
self.use_color = use_color
def format(self, record: logging.LogRecord):
levelname = record.levelname
# Prettify the levelname
name = levelname[0].upper() + levelname[1:].lower()
# Colorize the levelname
if self.use_color:
record.levelname = f"{self.COLORS[levelname]}{name}{self.COLOR_NEUTRAL}"
else:
record.levelname = name
return super().format(record)
def create_logger(log_level: int):
logger = logging.getLogger()
logger.setLevel(log_level)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(ColoredFormatter(FORMAT))
logger.addHandler(handler)
return logger
|
import time
def animate_sentence(word):
for word in word_list:
for char in word:
print(char,end='',flush=True)
time.sleep(0.2)
print(" ",end='',flush=True)
word_list=["Welcome","to","the","World","of","Code"]
animate_sentence(word_list)
|
import re
from API.models import Project
def get_proposal_from_visit(visit):
visit_pattern = '([A-Za-z0-9_]+)(\-[0-9]+)'
p = re.fullmatch(visit_pattern, visit)
try:
return p.group(1)
except AttributeError:
return ""
def get_project_from_visit(visit):
proposal = get_proposal_from_visit(visit)
return Project.objects.get(proposal=proposal)
|
"""sc-githooks - Checks on Git commits
Copyright (c) 2021 Scott Lau
Portions Copyright (c) 2021 InnoGames GmbH
Portions Copyright (c) 2021 Emre Hasegeli
"""
from githooks.config import config
from githooks.base_check import BaseCheck, Severity
from githooks.git import Commit
class CommitCheck(BaseCheck):
"""Parent class for all single commit checks"""
commit = None
def prepare(self, obj):
new = super(CommitCheck, self).prepare(obj)
if not new or not isinstance(obj, Commit):
return new
new = new.clone()
new.commit = obj
return new
def __str__(self):
return '{} 位于提交 {}'.format(type(self).__name__, self.commit)
class CheckCommitMessage(CommitCheck):
"""提交消息(包含标题)检查
检查长度、空行等问题"""
def get_problems(self):
for line_id, line in enumerate(self.commit.get_message_lines()):
if line_id == 0:
continue
elif line_id == 1:
if line.strip():
yield Severity.ERROR, '第二行应为空行'
else:
if line.startswith(' ') or line.startswith('>'):
continue
if line:
for problem in self.get_line_problems(line_id + 1, line):
yield problem
def get_line_problems(self, line_number, line):
# if line.rstrip() != line:
# line = line.rstrip()
# yield (
# Severity.ERROR,
# '行 {}: 右边不应该包含空格字符'.format(line_number)
# )
#
# if line.lstrip() != line:
# line = line.lstrip()
# yield (
# Severity.WARNING,
# '行 {}: 左边包含空格字符'.format(line_number)
# )
if len(line) > config.get("commit_check.commit_line_max_length"):
yield (
Severity.WARNING,
'行 {}: 超过 {} 字符'.format(line_number, config.get("commit_check.commit_line_max_length"))
)
class CheckCommitSummary(CommitCheck):
"""提交标题检查
检查标题的标记、回退等问题"""
commit_tags = {
'BREAKING',
'BUGFIX',
'CLEANUP',
'FEATURE',
'HOTFIX',
'MESS',
'MIGRATE',
'REFACTORING',
'REVIEW',
'SECURITY',
'STYLE',
'TASK',
'TEMP',
'WIP',
'!!',
}
def get_problems(self):
tags, rest = self.commit.parse_tags()
# if rest.startswith('['):
# yield Severity.WARNING, '未结束的提交标记'
# if tags:
# for problem in self.get_commit_tag_problems(tags, rest):
# yield problem
# rest = rest[1:]
#
# if rest.startswith('Revert'):
# for problem in self.get_revert_commit_problems(rest):
# yield problem
# return
for problem in self.get_summary_problems(rest):
yield problem
# def get_revert_commit_problems(self, rest):
# rest = rest[len('Revert'):]
# if not rest.startswith(' "') or not rest.endswith('"'):
# yield Severity.WARNING, '回退的提交消息格式不合法'
#
# def get_commit_tag_problems(self, tags, rest):
# used_tags = []
# for tag in tags:
# tag_upper = tag.upper()
# if tag != tag_upper:
# yield (
# Severity.ERROR,
# '提交标记 [{}] 不是大写字符'.format(tag)
# )
# if tag_upper not in CheckCommitSummary.commit_tags:
# yield (
# Severity.WARNING,
# '提交标记 [{}] 不在清单中 {}'.format(
# tag, ', '.join(
# '[{}]'.format(t)
# for t in CheckCommitSummary.commit_tags
# )
# )
# )
# if tag_upper in used_tags:
# yield Severity.ERROR, '重复的提交标记 [{}]'.format(tag)
# used_tags.append(tag_upper)
#
# if not rest.startswith(' '):
# yield Severity.WARNING, '没有使用空格将提交标记与其他内容分开'
def get_summary_problems(self, rest):
if not rest:
yield Severity.ERROR, '无提交标题'
return
rest_len = len(rest)
if rest_len > config.get("commit_check.commit_line_max_length"):
yield Severity.ERROR, "提交标题不能超过 {} 个字符".format(config.get("commit_check.commit_line_max_length"))
elif rest_len > config.get("commit_check.commit_summary_max_length"):
yield Severity.WARNING, "提交标题超过了 {} 个字符".format(config.get("commit_check.commit_summary_max_length"))
# if ' ' in rest:
# yield Severity.WARNING, '存在多个空格字符'
category_index = rest[:24].find(': ')
rest_index = category_index + len(': ')
if category_index >= 0 and len(rest) > rest_index:
# for problem in self.get_category_problems(rest[:category_index]):
# yield problem
rest = rest[rest_index:]
for problem in self.get_title_problems(rest):
yield problem
#
# def get_category_problems(self, category):
# if not category[0].isalpha():
# yield Severity.WARNING, '提交类型以非字母字符开头'
# if category.lower() != category:
# yield Severity.WARNING, '提交类型包含大写字符'
# if category.rstrip() != category:
# yield Severity.WARNING, '提交类型右边包含空格字符'
def get_title_problems(self, rest):
if not rest:
yield Severity.ERROR, '无提交标题'
return
#
# first_letter = rest[0]
# if not first_letter.isalpha():
# yield Severity.WARNING, '提交标题以非字母开始'
# elif first_letter.upper() != first_letter:
# yield Severity.WARNING, '提交标题首字母不是大写'
#
# if rest.endswith('.'):
# yield Severity.WARNING, "提交标题以'.'字符结尾"
#
# first_word = rest.split(' ', 1)[0]
# if first_word.endswith('ed'):
# yield Severity.WARNING, '提交标题使用的过去式'
# if first_word.endswith('ing'):
# yield Severity.WARNING, '提交标题使用的进行时'
class CheckChangedFilePaths(CommitCheck):
"""Check file names and directories on a single commit"""
def get_problems(self):
for changed_file in self.commit.get_changed_files():
extension = changed_file.get_extension()
if (
extension in ('pp', 'py', 'sh') and
changed_file.path != changed_file.path.lower()
):
yield Severity.WARNING, '{} 文件名使用了大写字母'.format(changed_file)
class CheckBinaryFiles(CommitCheck):
"""Check whether binary files exists on a single commit"""
def get_problems(self):
# project_name = self.commit.get_projects()
# projects_name = config.get("commit_check.unrestricted_projects")
# projects = projects_name.split(",")
# if project_name not in projects:
for binary_file in self.commit.get_binary_files():
yield Severity.WARNING, '文件 {} 是二进制文件'.format(binary_file)
|
# -*- coding: utf-8 -*-
from distutils.core import setup
import py2exe
setup(name="main",
version="1.0",
console=[{"script": "main.py"}]
)
|
from fastapi.testclient import TestClient
from fastapi import status
from tests.test_auth import TestAuth
class TestUser:
def test_post(self, client: TestClient):
payload = {
"username": "testuser",
"password": "12345",
"email": "testuser@example.com",
"first_name": "Test",
"last_name": "User",
"role_id": 1
}
access_token = TestAuth().test_login(client)
response = client.post("/api/users/", json=payload, headers={
'Authorization': f'Bearer {access_token}'
})
assert response.status_code == status.HTTP_201_CREATED
def test_get_all(self, client: TestClient):
access_token = TestAuth().test_login(client)
response = client.get("/api/users", headers={
'Authorization': f'Bearer {access_token}'
})
assert response.status_code == status.HTTP_200_OK
def test_get(self, client: TestClient):
access_token = TestAuth().test_login(client)
response = client.get("/api/users/1", headers={
'Authorization': f'Bearer {access_token}'
})
assert response.status_code == status.HTTP_200_OK
def test_delete(self, client: TestClient):
access_token = TestAuth().test_login(client)
response = client.delete("/api/users/2", headers={
'Authorization': f'Bearer {access_token}'
})
assert response.status_code == status.HTTP_204_NO_CONTENT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.