blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a9a9832a05ba97248db16b48ea4436d5ff6dd46 | 4e1782ae36e9d8db5e063822f4d8d05df13fe16e | /src/fossils/views.py | a510b3130fefb6c2d26e52f3bcc79044d139baae | [] | no_license | Jacob-Simons/poe-flipper | 4c522602ab9587ae202b55f64b9976c4b2537512 | d59741c001ca04dd8b9d3d13fae7ed1c518879ea | refs/heads/main | 2023-08-28T01:59:04.799905 | 2021-11-02T00:58:35 | 2021-11-02T00:58:35 | 400,940,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django.shortcuts import render
from django.http import HttpResponse
import sys
import csv
import os
import pprint
# Create your views here.
def fossils(request):
fossil_list = []
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
file = path + '/Fossil.csv'
file = os.path.normpath(file)
print(file)
if not os.path.exists(file):
return render(request, 'fossils/error.html')
with open(file, "r") as f:
reader = csv.DictReader(f)
for line in reader:
fossil_list.append(line)
print(line)
context = { 'fossil_list': fossil_list }
return render(request, 'fossils/fossils.html', context) | [
"jacob.simons25@gmail.com"
] | jacob.simons25@gmail.com |
31f6cdecf0a01e44ebee343b76b0140baea5d704 | 7276bbe2b0e979ce2fc902b55e9be6361644ec71 | /voicebase/settings.py | 5f087b63520e09f2d2ba4bcad36651e6ef00e3a6 | [
"MIT"
] | permissive | gdoermann/voicebase | 6d67b29adec15ce7a461b931c7cf17ff98a14a3c | 53cb4735327898a7a284dea3a60ace0b3956a8ec | refs/heads/master | 2021-01-10T11:18:17.485851 | 2016-03-08T00:05:31 | 2016-03-08T00:05:31 | 52,379,275 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | import os
from attrdict import AttrDict
try:
import configparser
except ImportError:
import ConfigParser as configparser
# The "default" config for everyone. You can use this as a base.
DEFAULT_CONF = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'default.conf')
# The /etc/default (machine level) configuration
ETC_CONF = '/etc/default/voicebase.conf'
# Overrides for the user in ${HOME}/.voicebase.conf
HOME_CONF = '{}/.voicebase.conf'.format(os.environ.get('HOME', '~'))
# Read home last (so it overrides)
all_files = [ETC_CONF, HOME_CONF]
CONFIG_FILES = [DEFAULT_CONF] # We always read in the default
for f in all_files:
if os.path.exists(f):
CONFIG_FILES.append(f)
parser = configparser.ConfigParser()
parser.read(CONFIG_FILES)
API_KEY = parser.get('authentication', 'API_KEY')
PASSWORD = parser.get('authentication', 'PASSWORD')
BASE_URL = parser.get('default', 'BASE_URL')
API_VERSION = parser.get('default', 'API_VERSION')
URLS = AttrDict()
for key, base_key in parser.items('api'):
base_url = '/'.join([API_VERSION, base_key])
section = AttrDict({'base': base_url})
for url_key, value in parser.items('api.{}'.format(key)):
section[url_key] = '{}/{}'.format(base_url, value)
URLS[key] = section
TESTING = AttrDict()
TESTING.RAISE_NOT_IMPLEMENTED = parser.getboolean('testing', 'raise_not_implemented') | [
"gdoermann@perfectpitchtech.com"
] | gdoermann@perfectpitchtech.com |
784e421a5ea390d9b8c2e1b833fbe70c94b74164 | a0dc9e0dd9f1ca1899bb930f842e015c83fa3f63 | /sal.py | 5a4be396005113a606d064d09890c3b32486b9fd | [] | no_license | saloni-080601/list.py | 4e942c6804e69fbf3343862630a8763a2fd8d13d | 195f8d44b0b433b4cb68533b5b473713efa93d46 | refs/heads/main | 2023-02-27T18:23:19.280321 | 2021-02-08T15:00:13 | 2021-02-08T15:00:13 | 337,108,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | if(0):
print("pune")
if(1):
print("banglore") | [
"noreply@github.com"
] | saloni-080601.noreply@github.com |
bd1f68fd58bd51b632a26b906bdb6d7668e41a53 | be0d7c68aed692b35d3eb6351162a8590f400b49 | /send_email_attachment.py | 2c825c1e88c4d5318c30785bbc26d6b5d054cae9 | [] | no_license | hostpost114/email11 | fb3a9b5d1ea27a51b4555135b34e33a037d45830 | ef40ec4e181a5298ef11fe4cfb4fa16a02553671 | refs/heads/master | 2021-08-31T22:57:44.979698 | 2017-12-23T08:37:01 | 2017-12-23T08:37:01 | 115,182,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | from POM.Testcase import *
from email.mime.text import MIMEText
from email.header import Header
from email.mime.multipart import MIMEMultipart
import smtplib
def sendReport(file_path):
sendfile=open(file_path, 'rb').read()##读取测试报告路径
msg = MIMEText(sendfile,'base64', 'utf-8')
msg['Content-Type']='application/octet-stream'
msg['Content-Disposition']='attachment;filename=test.html'#test.html为邮件附件名称
msgRoot=MIMEMultipart('related')
msgRoot.attach(msg)
msg['Subject'] = Header('自动化测试报告', 'utf-8')
msg['From'] = '1552904****.com' # 发件地址
msg['To'] = 'lixianghah***.com' # 收件人地址,多人以分号分隔
smtp = smtplib.SMTP('smtp.163.com',25)
smtp.set_debuglevel(1)
smtp.login('1552904****@163.com', 'li****') # 登录邮箱的账户和密码
smtp.sendmail(msg['From'], msg['To'].split(';'), msg.as_string())
smtp.quit()
print('test report has send out!')
def newReport(testReport):
lists = os.listdir(testReport)#返回测试报告所在目录下的所有文件列表
lists2 = sorted(lists)#获得按升序排序后的测试报告列表
file_new = os.path.join(testReport, lists2[-1])#获取最后一个即最新的测试报告地址
print(file_new)
return file_new
if __name__=="__main__":
suit =unittest.TestSuite()
x=[Touch('test000'),Touch('test001'), Touch('test002')]
suit.addTests(x)
# test_dir = 'E:\\unittest\\POM' # 测试用例所在目录
test_report = 'E:\\unittest\\POM' # 测试报告所在目录
# suit.addTest(MyTestCase('test_something'),MyTestCase('test_wec'))
runner = unittest.TextTestRunner(verbosity=2)
f =open('test.html','wb')
runner =HTMLTestRunner(stream=f,
title=u'测试报告',
description=u'测试用例执行情况')
runner.run(suit)
new_report = newReport(test_report) # 获取最新报告文件
sendReport(new_report) # 发送最新的测试报告
# sendReport(newReport(te
# -st_report)) | [
"noreply@github.com"
] | hostpost114.noreply@github.com |
e8c14d485310f9be29ffa773a8cdba7c76834de0 | 8149086db250c48854ba652859504611ae319623 | /raifaisen/spiders/spider.py | b4b37254e21ca07085ead38b0be7f4bbba0baf81 | [] | no_license | SimeonYS/raifaisen | d92ffbb6810642af915466dd456f4de16c67c86c | 7759de0c68239ee8f46b7b5a2607b06a28e60bfd | refs/heads/main | 2023-03-08T03:51:17.163267 | 2021-02-25T12:45:23 | 2021-02-25T12:45:23 | 342,241,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | import re
import scrapy
from scrapy.loader import ItemLoader
from ..items import RaifaisenItem
from itemloaders.processors import TakeFirst
pattern = r'(\xa0)?'
class RaifaisenSpider(scrapy.Spider):
name = 'raifaisen'
start_urls = ['https://www.rbb.bg/bg/za-bankata/novini-analizi/novini/']
def parse(self, response):
post_links = response.xpath('//p/a[@class="more"]/@href').getall()
yield from response.follow_all(post_links, self.parse_articles)
def parse_articles(self, response):
links = response.xpath('//div[@class="column gridModule x6 y12 z4"]/a/@href').getall()
yield from response.follow_all(links, self.parse_post)
def parse_post(self, response):
date = response.xpath('//span[@class="news-title-align date"]/text()').get()
title = response.xpath('//h1[@class="news-title-align"]/text()').get()
content = response.xpath('//div[@class="text grid x6 y12 z4 "]//text()').getall()
content = [p.strip() for p in content if p.strip()]
content = re.sub(pattern, "",' '.join(content))
item = ItemLoader(item=RaifaisenItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('link', response.url)
item.add_value('content', content)
item.add_value('date', date)
return item.load_item()
| [
"simeon.simeonov@ADPVT.com"
] | simeon.simeonov@ADPVT.com |
3e2d6fd45a7d19eacb05a80274ca310634c649ee | a25a7f46d5c3b0e90620607ab0288a416d68e37c | /tuplex/python/tests/test_tuples.py | 7d33748085aae6071ef1f963106da9dfb1540d97 | [
"Apache-2.0"
] | permissive | cacoderquan/tuplex | 886ec24a03e12135bb39e960cd247f7526c72917 | 9a919cbaeced536c2a20ba970c14f719a3b6c31e | refs/heads/master | 2023-08-21T06:10:15.902884 | 2021-10-25T16:41:57 | 2021-10-25T16:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,952 | py | #!/usr/bin/env python3
#----------------------------------------------------------------------------------------------------------------------#
# #
# Tuplex: Blazing Fast Python Data Science #
# #
# #
# (c) 2017 - 2021, Tuplex team #
# Created by Leonhard Spiegelberg first on 1/1/2021 #
# License: Apache 2.0 #
#----------------------------------------------------------------------------------------------------------------------#
import unittest
from tuplex import *
class TestTuples(unittest.TestCase):
def setUp(self):
self.conf = {"webui.enable" : False, "driverMemory" : "16MB", "partitionSize" : "256KB"}
def testEmptyTupleI(self):
c = Context(self.conf)
res = c.parallelize([1, 2, 4]).map(lambda x: ()).collect()
assert res == [(), (), ()]
def testEmptyTupleII(self):
c = Context(self.conf)
res = c.parallelize([1, 2, 4]).map(lambda x: ()).collect()
assert res == [(), (), ()]
def testNestedEmptyTupleI(self):
c = Context(self.conf)
res = c.parallelize([('hello', '', (), ('world', ()))]).collect()
assert res == [('hello', '', (), ('world', ()))]
def testNestedTuple(self):
c = Context(self.conf)
res = c.parallelize([(10, 20), (20, 40)]).map(lambda x: (x, x)).collect()
assert res == [((10, 20), (10, 20)), ((20, 40), (20, 40))]
def testTupleMixed(self):
c = Context(self.conf)
res = c.parallelize([1, 2, 4]).map(lambda x: (x, x+1, (), x * x)).collect()
assert res == [(1, 2, (), 1), (2, 3, (), 4), (4, 5, (), 16)]
def testTupleWithStrings(self):
c = Context(self.conf)
res = c.parallelize([(10, 'hello'), (20, 'world')]).map(lambda x: (x, 'test')).collect()
assert res == [((10, 'hello'), 'test'), ((20, 'world'), 'test')]
def testTupleMultiParamUnpacking(self):
c = Context(self.conf)
res = c.parallelize([(10, 20), (40, 50)]).map(lambda a, b: a + b).collect()
assert res == [30, 90]
def testTupleMultiParamUnpackingII(self):
c = Context(self.conf)
res = c.parallelize([(10, (30, 40), 20), (40, (10, 20), 50)]).map(lambda a, b, c: b).map(lambda a, b: a + b).collect()
assert res == [70, 30]
def testTupleMultiLine(self):
# make sure code extraction works over multiple lines
c = Context(self.conf)
res = c.parallelize([1, 3, 5]).map(lambda x: (x,
x+ 1)) \
.collect()
assert res == [(1, 2), (3, 4), (5, 6)]
def testTupleSlice(self):
c = Context(self.conf)
testsets = [[(1, 2, 3, 4, 5, 6), (4, 5, 6, 7, 10, 11), (-10, -12, 0, -1, 2, 4)],
[((), ("hello",), 123, "oh no", (1, 2)), ((), ("goodbye",), 123, "yes", (-10, 2)),
((), ("foobar",), 1443, "no", (100, 0))]]
funcs = [lambda x: x[-10:], lambda x: x[:-10], lambda x: x[::-10],
lambda x: x[-2:], lambda x: x[:-2], lambda x: x[::-2],
lambda x: x[3:], lambda x: x[:3], lambda x: x[::3],
lambda x: x[1:], lambda x: x[:1], lambda x: x[::1],
lambda x: x[10:], lambda x: x[:10], lambda x: x[::10],
lambda x: x[-10:10:], lambda x: x[-10::10], lambda x: x[:-10:10],
lambda x: x[-10:-2:], lambda x: x[-10::-2], lambda x: x[:-10:-2],
lambda x: x[10:-2:], lambda x: x[10::-2], lambda x: x[:10:-2],
lambda x: x[-10:2:], lambda x: x[-10::2], lambda x: x[:-10:2],
lambda x: x[2:-10:], lambda x: x[2::-10], lambda x: x[:2:-10],
lambda x: x[-2:10:], lambda x: x[-2::10], lambda x: x[:-2:10],
lambda x: x[2:10:], lambda x: x[2::10], lambda x: x[:2:10],
lambda x: x[-2:-1:], lambda x: x[-2::-1], lambda x: x[:-2:-1],
lambda x: x[-3:4:], lambda x: x[-3::4], lambda x: x[:-3:4],
lambda x: x[-10:10:2], lambda x: x[-10:10:2], lambda x: x[-10:10:2],
lambda x: x[1:10:2], lambda x: x[1:10:2], lambda x: x[1:10:2],
lambda x: x[1:4:2], lambda x: x[1:4:2], lambda x: x[1:4:2],
lambda x: x[4:1:-2], lambda x: x[4:1:-2], lambda x: x[4:1:-2]]
for testset in testsets:
for func in funcs:
res = c.parallelize(testset).map(func).collect()
assert res == list(map(func, testset))
def test_tupleExpr(self):
c = Context(self.conf)
def f(x):
return x,
res = c.parallelize([1, 2, 3]).map(f).collect()
assert res == [(1,), (2,), (3,)]
def swapI(a, b):
return b, a
res = c.parallelize([('a', 1), ('b', 2)]).map(swapI).collect()
assert res == [(1, 'a'), (2, 'b')]
def swapII(x):
b, a = x
y = a, b
return y
res = c.parallelize([('a', 1), ('b', 2)]).map(swapII).collect()
assert res == [(1, 'a'), (2, 'b')]
def swapIII(x):
a = x[0]
b = x[1]
b, a = a, b
return a, b
res = c.parallelize([('a', 1), ('b', 2)]).map(swapIII).collect()
assert res == [(1, 'a'), (2, 'b')] | [
"leonhard_spiegelberg@brown.edu"
] | leonhard_spiegelberg@brown.edu |
37d2c522d2eca6075e2423a82c941a5473c531b0 | f11e6eb27d3bbdf04a1b0adfae919305b67e9a6b | /venv/Scripts/pip3-script.py | 2323793b139a2faa3094e8e201ae37bc17d2ee97 | [] | no_license | fahadthakur/AlgoLab | f38cac53fcff8773bcb68ed850f34f02e00eb145 | 69570f405a878966b401020e98aba26dc95e6ea8 | refs/heads/master | 2023-04-02T08:53:24.308927 | 2021-03-31T19:07:33 | 2021-03-31T19:07:33 | 348,786,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #!C:\Users\fahad\PycharmProjects\Lab\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"fahad.thakur@yahoo.com"
] | fahad.thakur@yahoo.com |
05376f8c4ce3409af02503c818b4339204529bfd | 7fda584e768e9da8ff5fbe39457eae7c3070f617 | /configs/_base_/datasets/fashion_bboxmask_instance_albu.py | 22851d662a197fcc2c54ed4353e5a4d2ff3662aa | [
"Apache-2.0"
] | permissive | jireh-father/mmdetection | 8a680606b567327357a8af487ae61aa98b2c4ed9 | b797e4d5b81c5a3d7d868ee2dc9aa27dbab23e7d | refs/heads/master | 2023-04-18T01:44:17.149179 | 2021-05-07T07:36:59 | 2021-05-07T07:36:59 | 284,387,557 | 0 | 0 | Apache-2.0 | 2020-08-02T04:06:57 | 2020-08-02T04:06:56 | null | UTF-8 | Python | false | false | 7,426 | py | dataset_type = 'FashionDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
# dict(type='Resize', img_scale=(800, 800), keep_ratio=True),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
# ]
# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(800, 800),
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]
#
train_transforms = [
dict(type='ShiftScaleRotate', border_mode=0,
shift_limit=[-0.15, 0.15], scale_limit=[-0.15, 0.15], rotate_limit=[-13, 13], p=0.3),
dict(type='RandomResizedCrop',
height=800,
width=800,
scale=[0.99, 1.0],
ratio=[0.9, 1.1],
interpolation=0,
p=0.5),
dict(type='Resize', height=800, width=800, p=1.0),
# dict(type='OneOf',
# transforms=[
# dict(type='ShiftScaleRotate', border_mode=0,
# shift_limit=[-0.2, 0.2], scale_limit=[-0.2, 0.2], rotate_limit=[-20, 20], p=0.9),
# dict(type='OpticalDistortion', border_mode=0,
# distort_limit=[-0.5, 0.5], shift_limit=[-0.5, 0.5], p=0.9),
# dict(type='GridDistortion', num_steps=5, distort_limit=[-0., 0.3], border_mode=0, p=0.9),
# dict(type='ElasticTransform', border_mode=0, p=0.9),
# dict(type='IAAPerspective', p=0.9),
# ], p=0.01),
# dict(type='Rotate', limit=[-25, 25], border_mode=0, p=0.1),
# dict(type='OneOf',
# transforms=[
# # dict(type='HueSaturationValue',
# # hue_shift_limit=[-20, 20],
# # sat_shift_limit=[-30, 30],
# # val_shift_limit=[-20, 20], p=0.9),
# dict(type='RandomBrightnessContrast',
# brightness_limit=[-0.3, 0.3],
# contrast_limit=[-0.3, 0.3], p=0.5)
# ], p=0.1),
dict(type='RandomBrightnessContrast',
brightness_limit=[-0.3, 0.3],
contrast_limit=[-0.3, 0.3], p=0.5),
# dict(type='OneOf',
# transforms=[
# dict(type='MotionBlur', blur_limit=[3, 7], p=0.9),
# dict(type='Blur', blur_limit=[3, 7], p=0.9),
# dict(type='MedianBlur', blur_limit=[3, 7], p=0.9),
# dict(type='GaussianBlur', blur_limit=[3, 7], p=0.9),
# ], p=0.05),
# dict(type='OneOf',
# transforms=[
# dict(type='RandomGamma', gamma_limit=[30, 150], p=0.9),
# dict(type='RGBShift', p=0.9),
# dict(type='CLAHE', clip_limit=[1, 15], p=0.9),
# dict(type='ChannelShuffle', p=0.9),
# dict(type='In9vertImg', p=0.9),
# ], p=0.1),
# dict(type='OneOf',
# transforms=[
# dict(type='RandomSnow', p=0.9),
# dict(type='RandomRain', p=0.9),
# dict(type='RandomFog', p=0.9),
# dict(type='RandomSunFlare', num_flare_circles_lower=1, num_flare_circles_upper=2, src_radius=110, p=0.9),
# dict(type='RandomShadow', p=0.9),
# ], p=0.1),
dict(type='OneOf',
transforms=[
dict(type='GaussNoise', var_limit=[1, 10], p=0.5),
dict(type='ISONoise', color_shift=[0.01, 0.06], p=0.5),
# dict(type='MultiplicativeNoise', p=0.9),
], p=0.25),
# dict(type='ToSepia', p=0.05),
# dict(type='Solarize', p=0.05),
# dict(type='Equalize', p=0.05),
# dict(type='Posterize', p=0.05),
# dict(type='FancyPCA', p=0.05),
# dict(type='HorizontalFlip', p=0.2),
# dict(type='VerticalFlip', p=0.2),
# dict(type='GridDropout', p=0.05),
# dict(type='ChannelDropout', p=0.05),
dict(type='Cutout', max_h_size=64, p=0.05),
# dict(type='Downscale', scale_min=0.5, scale_max=0.8, p=0.05),
# dict(type='ImageCompression', quality_lower=60, p=0.05),
dict(type='JpegCompression', quality_lower=80, quality_upper=99, p=0.05)
]
val_transforms = [
dict(type='Resize', height=800, width=800, p=1.0)
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Albu',
transforms=train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'],
# meta_keys=['filename', 'ori_filename', 'ori_shape',
# 'img_shape', 'pad_shape', 'scale_factor', 'img_norm_cfg']
),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(800, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
# classes = ('top',
# 'blouse',
# 't-shirt',
# 'Knitted fabric',
# 'shirt',
# 'bra top',
# 'hood',
# 'blue jeans',
# 'pants',
# 'skirt',
# 'leggings',
# 'jogger pants',
# 'coat',
# 'jacket',
# 'jumper',
# 'padding jacket',
# 'best',
# 'kadigan',
# 'zip up',
# 'dress',
# 'jumpsuit')
data = dict(
samples_per_gpu=3,
workers_per_gpu=3,
train=dict(
type=dataset_type,
ann_file=data_root + 'fashion/train_total.json',
img_prefix=data_root + 'fashion/train_images',
# classes=classes,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'fashion/val_split.json',
img_prefix=data_root + 'fashion/train_images',
# classes=classes,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'fashion/test_pubilc.json',
img_prefix=data_root + 'fashion/test_images',
# classes=classes,
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
| [
"seoilgun@gmail.com"
] | seoilgun@gmail.com |
8df8a00d707454c8c50565abe748ef42d38e74dc | 9b0aa059c43741b4f2e0ba18b3e2a70a9485a3d9 | /src/content_ncf/ncf_local_keras_run.py | 1775e02634b10036229e8ea62377ef28e95e0b59 | [] | no_license | ZWP-FlyZ/PaperMethod | bd5f3fcfa1e4eb5286bc7111ea53c400e51c83ef | e385e1f7d64aec4bb4bd9b681fdb69f918871260 | refs/heads/master | 2020-04-14T10:05:51.546114 | 2019-11-05T22:52:03 | 2019-11-05T22:52:03 | 163,777,559 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,178 | py | # -*- coding: utf-8 -*-
'''
Created on 2019年1月15日
@author: zwp
'''
import numpy as np;
import time;
import math;
import os;
from tools import SysCheck;
from content_ncf.ncf_param import NcfTraParm,NcfCreParam;
from content_ncf.ncf_models import simple_ncf,simple_ncf_pp,simple_ncf_bl,simple_ncf_local
from tools.fwrite import fwrite_append
from content_ncf import localtools;
base_path = r'E:/work';
if SysCheck.check()=='l':
base_path='/home/zwp/work'
origin_data = base_path+'/rtdata.txt';
spas = [1,2,3,4];
case = [1,2,3,4,5];
def run(spa,case):
train_path = base_path+'/Dataset/ws/train_n/sparseness%.1f/training%d.txt'%(spa,case);
test_path = base_path+'/Dataset/ws/test_n/sparseness%.1f/test%d.txt'%(spa,case);
cache_path = base_path+'/Dataset/ncf_values/spa%.1f_case%d.h5'%(spa,case);
result_file= './result/ws_local_spa%.1f_case%d.txt'%(spa,case);
dbug_paht = 'E:/work/Dataset/wst64/rtdata1.txt';
loc_classes = base_path+'/Dataset/ws/localinfo/ws_content_classif_out.txt';
print('开始实验,稀疏度=%.1f,case=%d'%(spa,case));
print ('加载训练数据开始');
now = time.time();
trdata = np.loadtxt(train_path, dtype=float);
ser_class = localtools.load_classif(loc_classes);
classiy_size = len(ser_class);
n = np.alen(trdata);
print ('加载训练数据完成,耗时 %.2f秒,数据总条数%d \n'%((time.time() - now),n));
print ('加载测试数据开始');
tnow = time.time();
ttrdata = np.loadtxt(test_path, dtype=float);
tn = np.alen(ttrdata);
print ('加载测试数据完成,耗时 %.2f秒,数据总条数%d \n'%((time.time() - tnow),tn));
print ('分类数据集开始');
tnow = time.time();
train_sets = localtools.data_split_class(ser_class, trdata);
test_sets = localtools.data_split_class(ser_class, ttrdata);
print ('分类数据集结束,耗时 %.2f秒 \n'%((time.time() - tnow)));
cp = NcfCreParam();
tp = NcfTraParm();
cp.us_shape=(339,5825);
cp.hid_feat=32;
cp.hid_units=[32,12];
cp.drop_p=0
cp.reg_p=0.000001
# 处理用户访问服务记录
R = np.zeros(cp.us_shape);
u = trdata[:,0].astype(np.int32);
s = trdata[:,1].astype(np.int32);
R[u,s]=trdata[:,2];
umean = np.sum(R,axis=1)/np.count_nonzero(R, axis=1);
smean = np.sum(R,axis=0)/np.count_nonzero(R, axis=0);
R[np.where(R>0)]=1.0;
# print(umean);
# print(smean);
# us_invked = [];
# for cla in ser_class:
# hot = np.zeros([cp.us_shape[1]],np.float32);
# hot[cla]=1.0;
# usi = R*hot;
# nonzeroes = np.sqrt(np.count_nonzero(usi, axis=1));
# noz = np.divide(1.0,nonzeroes,
# out=np.zeros_like(nonzeroes),where=nonzeroes!=0);
# noz = np.reshape(noz,[-1,1]);
# us_invked.append((usi*noz).astype(np.float32));
tp.train_data=train_sets;
tp.test_data=test_sets;
tp.epoch=15;
tp.batchsize=5;
'''
Adagrad lr 0.05 zui hao
RMSprop lr 0.005
'''
tp.learn_rate=[0.045]*classiy_size;
tp.lr_decy_rate=1.0
tp.lr_decy_step=int(n/tp.batch_size);
tp.cache_rec_path=cache_path;
tp.result_file_path= result_file;
tp.load_cache_rec=False;
tp.classif_size = classiy_size;
tp.us_invked= R;
tp.umean=umean;
tp.smean=smean;
print ('训练模型开始');
tnow = time.time();
model = simple_ncf_local(cp);
mae,nmae = model.train(tp);
print ('训练模型结束,耗时 %.2f秒 \n'%((time.time() - tnow)));
print('实验结束,总耗时 %.2f秒,稀疏度=%.1f\n'%((time.time()-now),spa));
return mae,nmae;
if __name__ == '__main__':
for sp in spas:
s = 0;s2=0;cot=0;
for ca in case:
for i in range(1):
mae,nmae = run(sp,ca);
s+=mae;
s2+=nmae;
cot+=1;
out_s = 'spa=%.1f mae=%.6f nmae=%.6f time=%s'%(sp,s/cot,s2/cot,time.asctime());
print(out_s);
fwrite_append('./simple_ncf_local.txt',out_s); | [
"1258271806@qq.com"
] | 1258271806@qq.com |
7e2f652de9a2956e4af33dbb82fb0ae541e0e190 | 0a900c26f163cbcb592b94a88ced63cd751f6f74 | /projects/cgp_update_cluster/sandbox/payload_dict_test.py | bc00aee3f53172d999d544c7e634a98060d76dd8 | [] | no_license | choco1911/try_py | b56e7818498b848cbb4f79a6f434c00597041f91 | 5ca2960402d28cc30ffbefb420032450a8d3e4a0 | refs/heads/master | 2021-01-12T05:35:49.177898 | 2017-02-09T16:45:17 | 2017-02-09T16:45:17 | 77,140,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py | #!/usr/bin/env python
ipaddr = '10.10.10.10'
hosts = [ 'mail1110','mail1111', 'mail1112', 'mail1113' ]
fallback_ip = '81.19.78.105'
payload = {'FormCharset' : 'utf-8',
'ControlAddress' : '[{0}]'.format(ipaddr),
'BackendAddresses': '',
'FrontendAddresses': '',
'LogLevel': '3',
'BalancerGroup': '',
'DeliveryPort': '25',
'DeliveryCache': '10',
'SubmitPort': '25',
'SubmitCache': '5',
'POPPort': '110',
'IMAPPort': '143',
'HTTPUserPort': '8100',
'HTTPAdminPort': '8010',
'XIMSSPort': '11024',
'XMPPPort': '5222',
'PWDPort': '106',
'ACAPPort': '674',
'LDAPPort': '389',
'AdminLogLevel': '2',
'AdminCache': '5',
'MailboxLogLevel': '5',
'MailboxCache': '10',
'SingleImageLogLevel': '3',
'SubmitMode': 'Locally',
'SubmitLogLevel': '2',
'SIPFarmMode': 'Auto',
'SignalHostMode': 'Auto',
'LegHostMode': 'Auto',
'HTTPClientMode': 'Auto',
'RPOPClientMode': 'Auto'
}
counter = 0
payload['StaticElem'] = []
for item in hosts:
payload['StaticElem'].append(str(counter))
payload['o{0}'.format(counter)] = str(counter)
payload['k{0}'.format(counter)] = item
payload['v{0}'.format(counter)] = fallback_ip
counter += 1
payload['DirectoryCluster'] = '1'
payload['Update'] = 'Update'
for key, val in payload.items():
print key, val
| [
"choco@rambler-co.ru"
] | choco@rambler-co.ru |
c428ecf8b458671cc12801d8204292a9aa603e7a | 67063668c781d5b1fd1c1d69c79a045ed4f10176 | /python/openpyxl/chart/title.py | 9fa3953fc203ac0c01d734738cb4995f4c7977d9 | [
"MIT"
] | permissive | jrockway/tichu-tournament | eacb08b2dd502578fec3780d692648f0d58e90f4 | 6335b8fab89b76c42ac5a078176a500a11f0e4ff | refs/heads/master | 2020-03-09T21:09:31.431690 | 2018-04-19T23:31:44 | 2018-04-19T23:31:44 | 129,002,367 | 0 | 0 | MIT | 2018-04-10T22:31:00 | 2018-04-10T22:31:00 | null | UTF-8 | Python | false | false | 1,927 | py | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
from openpyxl.compat import basestring
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Alias,
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import NestedBool
from .text import Text, RichTextProperties
from .layout import Layout
from .shapes import GraphicalProperties
from openpyxl.drawing.text import (
Paragraph,
RegularTextRun,
LineBreak
)
class Title(Serialisable):
tagname = "title"
tx = Typed(expected_type=Text, allow_none=True)
text = Alias('tx')
layout = Typed(expected_type=Layout, allow_none=True)
overlay = NestedBool(allow_none=True)
spPr = Typed(expected_type=GraphicalProperties, allow_none=True)
graphicalProperties = Alias('spPr')
txPr = Typed(expected_type=RichTextProperties, allow_none=True)
body = Alias('txPr')
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('tx', 'layout', 'overlay', 'spPr', 'txPr')
def __init__(self,
tx=None,
layout=None,
overlay=None,
spPr=None,
txPr=None,
extLst=None,
):
if tx is None:
tx = Text()
self.tx = tx
self.layout = layout
self.overlay = overlay
self.spPr = spPr
self.txPr = txPr
def title_maker(text):
title = Title()
paras = [Paragraph(r=RegularTextRun(t=s)) for s in text.split("\n")]
title.tx.rich.paragraphs = paras
return title
class TitleDescriptor(Typed):
expected_type = Title
allow_none = True
def __set__(self, instance, value):
if isinstance(value, basestring):
value = title_maker(value)
super(TitleDescriptor, self).__set__(instance, value)
| [
"teytanna@gmail.com"
] | teytanna@gmail.com |
43ad314c5e321a994a1c786c96fbcc21036b6ac7 | 9bc9885e9500083afc2cd6be4ff93ee2eb4fbfbb | /neuropower/apps/designtoolbox/views.py | 5b3530d4d6effaed83a6444d7d86a7299c986994 | [
"MIT"
] | permissive | jokedurnez/neuropower | 50297af01bef55fe2c01355f038a9d184cde493d | ed8c1cf29d447b41dfbfbc7a8345443454e62a96 | refs/heads/master | 2021-01-15T08:36:45.191330 | 2016-11-20T00:56:30 | 2016-11-20T00:56:30 | 51,338,446 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 23,978 | py | from __future__ import unicode_literals
import sys
sys.path = sys.path[1:]
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response
from django.core.mail import send_mail
from django.conf import settings
from scipy.stats import norm, t
import os
from django.contrib.sessions.backends.db import SessionStore
from utils import get_session_id, probs_and_cons, get_design_steps, weights_html, combine_nested,textify_code
from .forms import DesignMainForm, DesignConsForm, DesignReviewForm, DesignWeightsForm, DesignProbsForm, DesignOptionsForm, DesignRunForm, DesignDownloadForm, ContactForm, DesignNestedForm,DesignNestedConsForm, DesignSureForm, DesignMailForm, DesignCodeForm
from .models import DesignModel
from .tasks import GeneticAlgorithm
import numpy as np
import time
import json
import pandas as pd
import csv
import zipfile
import StringIO
import shutil
import urllib2
from celery import task
from celery.task.control import revoke, inspect
from celery.result import AsyncResult
def end_session(request):
# Get the session ID and database entry
sid = get_session_id(request)
'''ends a session so the user can start a new one.'''
try:
desdata = DesignModel.objects.get(SID=sid)
revoke(desdata.taskID,terminate=True,signal='KILL')
except KeyError or DoesNotExist:
pass
try:
request.session.flush()
except KeyError:
pass
try:
DesignModel.objects.filter(SID=sid).delete()
except KeyError:
pass
return start(request, end_session=True)
def DFAQ(request):
return render(request, "design/DFAQ.html", {})
def tutorial(request):
return render(request, "design/tutorial.html", {})
def methods(request):
return render(request, "design/methods.html", {})
def package(request):
return render(request, "design/pythonpackage.html", {})
def start(request, end_session=False):
# Get the template/step status
template = "design/start.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
# initiate
fbform = ContactForm(request.POST or None)
context["form"] = fbform
if request.method == "POST":
if fbform.is_valid():
subject = "feedback neurodesign"
sender = fbform.cleaned_data['contact_name']
sendermail = fbform.cleaned_data['contact_email']
message = fbform.cleaned_data['content']
recipient = ['joke.durnez@gmail.com']
key = settings.MAILGUN_KEY
command = "curl -s --user '" + key + "' https://api.mailgun.net/v3/neuropowertools.org/messages -F from='" + sender + \
" <" + sendermail + ">' -F to='joke.durnez@gmail.com' -F subject='design toolbox feedback' -F text='" + message + "'"
os.system(command)
context['thanks'] = True
return render(request, template, context)
def maininput(request):
# Get the template/step status
template = "design/input.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
except DesignModel.DoesNotExist:
desdata = None
# Define form
inputform = DesignMainForm(request.POST or None, instance=desdata)
if end_session == True:
context["message"] = "Session has been successfully reset."
# If page was not result of POST or not valid: show form with db entries
# Else: go to next page
if not request.method == "POST" or not inputform.is_valid():
context["inputform"] = inputform
return render(request, template, context)
else:
# initial save
form = inputform.save(commit=False)
form.shareID = sid
form.SID = sid
form.mainpars = True
form.onsetsfolder = os.path.join(settings.MEDIA_ROOT, "design_"+str(sid))
form.desfile = os.path.join(form.onsetsfolder, "design.json")
form.genfile = os.path.join(form.onsetsfolder,"metrics.json")
form.statusfile = os.path.join(form.onsetsfolder,"status.txt")
form.codefilename = "GeneticAlgorithm_"+str(sid)+".py"
form.codefile = os.path.join(form.onsetsfolder, form.codefilename)
form.save()
if not os.path.exists(form.onsetsfolder):
os.mkdir(form.onsetsfolder)
# if os.path.exists(form.onsetsfolder):
# files = os.listdir(form.onsetsfolder)
# for f in files:
# if os.path.isdir(os.path.join(form.onsetsfolder,f)):
# shutil.rmtree(os.path.join(form.onsetsfolder,f))
# else:
# os.remove(os.path.join(form.onsetsfolder,f))
# else:
# os.mkdir(form.onsetsfolder)
# get data and change parameters
desdata = DesignModel.objects.get(SID=sid)
weightsform = DesignWeightsForm(None, instance=desdata)
weightsform = weightsform.save(commit=False)
W = np.array([desdata.W1, desdata.W2, desdata.W3, desdata.W4])
if np.sum(W) != 1:
W = W / np.sum(W)
weightsform.W = W
if not desdata.TR%desdata.resolution == 0:
resfact = np.ceil(desdata.TR/desdata.resolution)
weightsform.resolution = desdata.TR/resfact
# get duration in seconds
if desdata.duration_unitfree:
if desdata.duration_unit == 2:
weightsform.duration = desdata.duration_unitfree*60
elif desdata.duration_unit == 1:
weightsform.duration = desdata.duration_unitfree
weightsform.save()
if desdata.nested and desdata.nest_classes == None:
context['message'] = "For a nested design, please specify the number of classes."
context["inputform"] = inputform
return render(request, "design/input.html", context)
if desdata.nested:
return HttpResponseRedirect('../nested/')
else:
return HttpResponseRedirect('../consinput/')
def nested(request):
# Get the template/step status
template = "design/nested.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
except DesignModel.DoesNotExist:
return HttpResponseRedirect('../maininput/')
# Define form
nestedform = DesignNestedForm(
request.POST or None, instance=desdata, stim=desdata.S)
inputform = DesignMainForm(request.POST or None, instance=desdata)
# If page was result of POST or not valid: show form with db entries
# Else: go to next page
if not request.method == "POST" or not nestedform.is_valid():
context["nestedform"] = nestedform
return render(request, template, context)
else:
form = nestedform.save(commit=False)
form.SID = sid
form.nestpars = True
form.save()
# get data and change parameters
matrices = combine_nested(sid)
if matrices['empty'] == True:
context['message'] = "Please fill out all stimuli"
context["nestedform"] = DesignNestedForm(
request.POST or None, instance=desdata, stim=desdata.S)
return render(request, "design/nested.html", context)
if np.max(matrices['G']) > desdata.nest_classes:
context['message'] = "There are more classes than was specified in the previous screen."
context["nestedform"] = DesignNestedForm(
request.POST or None, instance=desdata, stim=desdata.S)
return render(request, "design/nested.html", context)
form.nest_structure = matrices['G']
form.save()
return HttpResponseRedirect('../consinput/')
def consinput(request):
# Get the template/step status
template = "design/cons.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
except DesignModel.DoesNotExist:
return HttpResponseRedirect('../maininput/')
# Define form
# If page was result of POST or not valid: show form with db entries
# Else: go to next page
if desdata.nested == True:
a = np.array(['P0','P1','P2','P3','P4','P5','P6','P7','P8','P9'])
b = np.array(desdata.nest_structure)
Pmat = [a[b==(i+1)].tolist() for i in xrange(desdata.nest_classes)]
consform = DesignNestedConsForm(
request.POST or None, instance=desdata, stim=desdata.S, cons=desdata.Clen, structure=Pmat, classes=desdata.nest_structure)
else:
consform = DesignConsForm(
request.POST or None, instance=desdata, stim=desdata.S, cons=desdata.Clen)
if not request.method == "POST":
context["consform"] = consform
return render(request, template, context)
else:
form = consform.save(commit=False)
form.SID = sid
form.conpars = True
form.save()
# get data and change parameters
consform = DesignProbsForm(None, instance=desdata)
consform = consform.save(commit=False)
matrices = probs_and_cons(sid)
if matrices['empty'] == True:
context['message'] = "Please fill out all probabilities and contrasts"
context["consform"] = DesignConsForm(
request.POST or None, instance=desdata, stim=desdata.S, cons=desdata.Clen)
return render(request, "design/cons.html", context)
consform.P = matrices['P']
consform.C = matrices['C']
if desdata.HardProb == True:
consform.G = 200
consform.I = 100
consform.save()
return HttpResponseRedirect('../review/')
def review(request):
# Get the template/step status
template = "design/review.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
except DesignModel.DoesNotExist:
return HttpResponseRedirect('../maininput/')
# Define form
revform = DesignReviewForm(request.POST or None, instance=desdata)
context["revform"] = revform
# Set summary variables in context
matrices = probs_and_cons(sid)
context["Phtml"] = matrices["Phtml"]
context["Chtml"] = matrices["Chtml"]
context["Whtml"] = weights_html(desdata.W)
context['desdata'] = desdata
context["message"] = ""
if desdata.HardProb == True:
context["message"] = context["message"] + \
"<br><p><b>Warning:</b> Because of the hard limit on the frequencies, we increased the size of the generation and the number of random designs per generation. This might slow down the optimisation. </p>"
if desdata.MaxRepeat < 10 and desdata.S == 2:
context["message"] = context["message"] + "<br><p><b>Warning:</b> With only 2 stimuli, many random designs have repetitions larger than " + \
str(desdata.MaxRepeat) + \
". We increased the number of random designs per generation, but this might slow down the optimisation. </p>"
if desdata.S>5 and desdata.L>200 and desdata.ITImax>3 and (desdata.Restnum<30 and desdata.Resdur>30) and desdata.C.shape[0]>5:
context['message'] = context['message']+"<br><p><b>Warning:</b>This is a long and complex design. Be aware that the optimisation will take a <b>long</b> time.</p>"
# Duration
if desdata.ITImodel == 1:
context['ITImodel'] = "fixed"
mean = desdata.ITIfixed
context['ITI'] = "The ITI's are equal to "+str(mean)+" seconds."
elif desdata.ITImodel == 2:
context['ITImodel'] = 'truncated exponential'
mean = desdata.ITItruncmean
context['ITI'] = "The ITI's are between "+str(desdata.ITItruncmin)+" and "+str(desdata.ITItruncmax)+" seconds and on average "+str(mean)+" seconds."
elif desdata.ITImodel == 3:
context['ITImodel'] = 'uniform'
mean = (desdata.ITIunifmin+desdata.ITIunifmax)/2.
context['ITI'] = "The ITI's are between "+str(desdata.ITIunifmin)+" and "+str(desdata.ITIunifmax)+" seconds and on average "+str(mean)+" seconds."
if desdata.L:
dur = mean*desdata.L+desdata.RestNum*desdata.RestDur
elif desdata.duration:
dur = desdata.duration
if dur > 1800:
context['message'] = context['message'] + "<p><b>Warning:</b> The run you request is longer dan 30 minutes. This optimisation will take <b>a long</b> time. You could set the resolution lower, or split the experiment in multiple shorter runs. Or you could grab a coffee and wait a few hours for the optimisation to complete.</p>"
# If page was result of POST: show summary
# Else: go to next page
if not request.method == "POST":
return render(request, template, context)
else:
form = revform.save(commit=False)
form.SID = sid
form.save()
return HttpResponseRedirect('../runGA/')
def options(request):
# Get the template/step status
template = "design/options.html"
context = {}
# Get the session ID and database entry
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
except DesignModel.DoesNotExist:
pass
# Define form
if "desdata" in locals():
opsform = DesignOptionsForm(request.POST or None, instance=desdata)
else:
opsform = DesignOptionsForm(request.POST or None)
context["opsform"] = opsform
# If page was result of POST: show summary
# Else: go to next page
if not request.method == "POST":
return render(request, template, context)
else:
form = opsform.save(commit=False)
form.SID = sid
form.save()
return HttpResponseRedirect('../review/')
def runGA(request):
# Get the template/step status
template = "design/runGA.html"
context = {}
if not inspect().reserved() == None:
context['tasks_queued'] = len(list(inspect().reserved().values())[0])
context['tasks_running'] = float(len(list(inspect().active().values())[0]))/settings.CELERYD_CONCURRENCY
else:
context['tasks_queued'] = 0
context['tasks_running'] = 0
# Get the session ID
sid = get_session_id(request)
context["steps"] = get_design_steps(template, sid)
# retrieve session information
retrieve_id = request.GET.get('retrieve','')
if retrieve_id:
desdata = DesignModel.objects.get(shareID=retrieve_id)
desdata.SID=sid
desdata.save()
context["steps"] = get_design_steps(template, sid)
try:
desdata = DesignModel.objects.get(SID=sid)
context['no_data'] = False
except DesignModel.DoesNotExist:
context['no_data']=True
return render(request, template, context)
# Do we know email?
mailform = DesignMailForm(request.POST or None, instance=desdata)
runform = DesignRunForm(request.POST, instance=desdata)
if not desdata.email:
context["mailform"] = mailform
else:
context['runform'] = runform
# check status of job
form = runform.save(commit=False)
if desdata.taskID:
task = AsyncResult(desdata.taskID)
if task.status == "PENDING":
form.taskstatus = 1
form.running = 0
elif task.status == "STARTED":
form.taskstatus = 2
elif ((task.status == "RETRY"
or task.status == "FAILURE"
or task.status == "SUCCESS")):
form.taskstatus = 3
form.running = 0
else:
form.taskstatus = 0
form.running = 0
else:
form.taskstatus = 0
form.running = 0
form.save()
# pass results for visualisation
if os.path.isfile(desdata.genfile):
jsonfile = open(desdata.genfile).read()
try:
data = json.loads(jsonfile)
data = json.dumps(data)
context['optim'] = data
except ValueError:
pass
if os.path.isfile(desdata.desfile):
jsonfile = open(desdata.desfile).read()
try:
data = json.loads(jsonfile)
data = json.dumps(data)
context['design'] = data
context['stim'] = desdata.S
except ValueError:
pass
# show downloadform if results are available
desdata = DesignModel.objects.get(SID=sid)
if desdata.taskstatus == 3:
downform = DesignDownloadForm(
request.POST or None, instance=desdata)
context["downform"] = downform
if desdata.taskstatus>1:
codeform = DesignCodeForm(
request.POST or None, instance=desdata)
context['codeform'] = codeform
# Responsive loop
if request.method == "POST":
someonesure = False
# if mail is given
if request.POST.get("Mail") == "Submit":
if mailform.is_valid():
email=mailform.cleaned_data['email']
name=mailform.cleaned_data['name']
desdata = DesignModel.objects.get(SID=sid)
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.email = email
form.name = name
form.taskID = ""
form.save()
desdata = DesignModel.objects.get(SID=sid)
context['mailform'] = None
context['runform'] = runform
return render(request, template, context)
# If stop is requested
if request.POST.get("GA") == "Stop":
if not (desdata.taskstatus == 2 or desdata.taskstatus == 1):
context['message'] = "You want to stop the optimisation, but nothing is running."
else:
revoke(desdata.taskID,terminate=True,signal='KILL')
desdata = DesignModel.objects.get(SID=sid)
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.taskstatus = 0
form.taskID = ""
form.save()
context["message"] = "The optimisation has been terminated."
return render(request, template, context)
if request.POST.get("Sure") == "I'm sure about this":
someonesure = True
desdata = DesignModel.objects.get(SID=sid)
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
form.taskstatus = 0
form.taskID = ""
form.convergence = False
form.save()
# If run is requested
if request.POST.get("GA") == "Run" or someonesure:
desdata = DesignModel.objects.get(SID=sid)
if desdata.taskstatus > 0:
if desdata.taskstatus == 1:
context['message'] = "There is already an optimisation process queued. You can only queue or run one design optimisation at a time."
elif desdata.taskstatus == 2:
context['message'] = "There is already an optimisation process running. You can only queue or run one design optimisation at a time."
elif desdata.taskstatus == 3:
context['sure'] = True
sureform = DesignSureForm(
request.POST or None, instance=desdata)
context['sureform'] = sureform
return render(request, template, context)
else:
desdata = DesignModel.objects.get(SID=sid)
runform = DesignRunForm(None, instance=desdata)
res = GeneticAlgorithm.delay(sid)
form = runform.save(commit=False)
form.taskID = res.task_id
form.save()
desdata = DesignModel.objects.get(SID=sid)
context['refresh'] = True
context['status'] = "PENDING"
context['message'] = "Job succesfully submitted."
return render(request, template, context)
# If request = download
if request.POST.get("Code") == "Download script":
cmd = textify_code(sid)
desdata = DesignModel.objects.get(SID=sid)
resp = HttpResponse(
cmd
)
resp['Content-Disposition'] = 'attachment; filename=%s' % desdata.codefilename
return resp
# If request = download
if request.POST.get("Download") == "Download optimal sequence":
desdata = DesignModel.objects.get(SID=sid)
print(desdata.zipfile)
resp = HttpResponse(
desdata.zipfile.getvalue(),
content_type="application/x-zip-compressed"
)
resp['Content-Disposition'] = 'attachment; filename=%s' % desdata.zip_filename
return resp
else:
#check_status
desdata = DesignModel.objects.get(SID=sid)
runform = DesignRunForm(None, instance=desdata)
form = runform.save(commit=False)
if os.path.isfile(desdata.statusfile):
f = open(desdata.statusfile)
status = f.read()
f.close()
if status == "Fe":
form.running = 2
elif status == "Fd":
form.running = 3
elif status == "optimalisation":
form.running = 4
form.save()
desdata = DesignModel.objects.get(SID=sid)
context["preruns"] = desdata.preruncycles
context["runs"] = desdata.cycles
context["refrun"] = desdata.running
context['status'] = "NOT RUNNING"
if desdata.taskstatus==1:
context['status'] = "PENDING"
if desdata.taskstatus==2:
context['status'] = "RUNNING"
if desdata.preruncycles<1000 or desdata.cycles<1000 or desdata.resolution>0.2:
context['alert'] = "Please be aware that the number of iterations for the optimisation is low. These values are perfect for trying out the application but the results will be sub-optimal. For a good optimisation, go to the settings and change the number of runs and preruns and the resolution. Some reasonable values are: 10,000 preruns, 10,000 runs and a resolution of 0.1s."
if desdata.taskstatus==3:
context['refrun'] = 5
context['status'] = "STOPPED"
context["message"] = ""
if desdata.running == 1:
context["message"] = "Design optimisation initiated."
elif desdata.running == 2:
context["message"] = "Running first pre-run to find maximum efficiency."
elif desdata.running == 3:
context["message"] = "Running second pre-run to find maximum power."
elif desdata.running == 4:
context["message"] = "Running design optimisation."
elif desdata.taskstatus == 3 and desdata.convergence:
context['message'] = 'Design optimisation finished after convergence.'
elif desdata.taskstatus == 3:
context['message'] = 'Design optimisation finished, convergence not reached. Consider increasing the number of generations.'
return render(request, template, context)
def updatepage(request):
return render(request, "design/updatepage.html", {})
| [
"joke.durnez@gmail.com"
] | joke.durnez@gmail.com |
e02603bd18ba6e3032c486b19f7236a2e134ec78 | 472bf8355b2722f24c623200b056546f35577428 | /MIT-intrinsic/intrinsic.py | 3dc1f9c7cf4769bc7c29405f1058571044e19570 | [] | no_license | alexch1/ImageProcessing | 10373a32651bb70b74a86b839941eb02b72285a6 | f1ebde878e63d9a49aab9fd71d296c9039ae34f9 | refs/heads/master | 2021-07-04T16:27:41.439316 | 2021-05-28T09:19:00 | 2021-05-28T18:18:49 | 56,864,246 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 10,519 | py | import itertools
import numpy as np
import os
import png
import sys
import poisson
############################### Data ###########################################
def load_png(fname):
reader = png.Reader(fname)
w, h, pngdata, params = reader.read()
image = np.vstack(itertools.imap(np.uint16, pngdata))
if image.size == 3*w*h:
image = np.reshape(image, (h, w, 3))
return image.astype(float) / 255.
def load_object_helper(tag, condition):
"""Load an image of a given object as a NumPy array. The values condition may take are:
'mask', 'original', 'diffuse', 'shading', 'reflectance', 'specular'
'shading' returns a grayscale image, and all the other options return color images."""
assert condition in ['mask', 'original', 'diffuse', 'shading', 'reflectance', 'specular']
obj_dir = os.path.join('data', tag)
if condition == 'mask':
filename = os.path.join(obj_dir, 'mask.png')
mask = load_png(filename)
return (mask > 0)
if condition == 'original':
filename = os.path.join(obj_dir, 'original.png')
return load_png(filename)
if condition == 'diffuse':
filename = os.path.join(obj_dir, 'diffuse.png')
return load_png(filename)
if condition == 'shading':
filename = os.path.join(obj_dir, 'shading.png')
return load_png(filename)
if condition == 'reflectance':
filename = os.path.join(obj_dir, 'reflectance.png')
return load_png(filename)
if condition == 'specular':
filename = os.path.join(obj_dir, 'specular.png')
return load_png(filename)
# cache for efficiency because PyPNG is pure Python
cache = {}
def load_object(tag, condition):
if (tag, condition) not in cache:
cache[tag, condition] = load_object_helper(tag, condition)
return cache[tag, condition]
def load_multiple(tag):
"""Load the images of a given object for all lighting conditions. Returns an
m x n x 3 x 10 NumPy array, where the third dimension is the color channel and
the fourth dimension is the image number."""
obj_dir = os.path.join('data', tag)
filename = os.path.join(obj_dir, 'light01.png')
img0 = load_png(filename)
result = np.zeros(img0.shape + (10,))
for i in range(10):
filename = os.path.join(obj_dir, 'light%02d.png' % (i+1))
result[:,:,:,i] = load_png(filename)
return result
############################# Error metric #####################################
def ssq_error(correct, estimate, mask):
"""Compute the sum-squared-error for an image, where the estimate is
multiplied by a scalar which minimizes the error. Sums over all pixels
where mask is True. If the inputs are color, each color channel can be
rescaled independently."""
assert correct.ndim == 2
if np.sum(estimate**2 * mask) > 1e-5:
alpha = np.sum(correct * estimate * mask) / np.sum(estimate**2 * mask)
else:
alpha = 0.
return np.sum(mask * (correct - alpha*estimate) ** 2)
def local_error(correct, estimate, mask, window_size, window_shift):
"""Returns the sum of the local sum-squared-errors, where the estimate may
be rescaled within each local region to minimize the error. The windows are
window_size x window_size, and they are spaced by window_shift."""
M, N = correct.shape[:2]
ssq = total = 0.
for i in range(0, M - window_size + 1, window_shift):
for j in range(0, N - window_size + 1, window_shift):
correct_curr = correct[i:i+window_size, j:j+window_size]
estimate_curr = estimate[i:i+window_size, j:j+window_size]
mask_curr = mask[i:i+window_size, j:j+window_size]
ssq += ssq_error(correct_curr, estimate_curr, mask_curr)
total += np.sum(mask_curr * correct_curr**2)
assert -np.isnan(ssq/total)
return ssq / total
def score_image(true_shading, true_refl, estimate_shading, estimate_refl, mask, window_size=20):
return 0.5 * local_error(true_shading, estimate_shading, mask, window_size, window_size//2) + \
0.5 * local_error(true_refl, estimate_refl, mask, window_size, window_size//2)
################################## Algorithms ##################################
def retinex(image, mask, threshold, L1=False):
image = np.clip(image, 3., np.infty)
log_image = np.where(mask, np.log(image), 0.)
i_y, i_x = poisson.get_gradients(log_image)
r_y = np.where(np.abs(i_y) > threshold, i_y, 0.)
r_x = np.where(np.abs(i_x) > threshold, i_x, 0.)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = mask * np.exp(log_refl)
return np.where(mask, image / refl, 0.), refl
def project_gray(i_y):
i_y_mean = np.mean(i_y, axis=2)
result = np.zeros(i_y.shape)
for i in range(3):
result[:,:,i] = i_y_mean
return result
def project_chromaticity(i_y):
return i_y - project_gray(i_y)
def color_retinex(image, mask, threshold_gray, threshold_color, L1=False):
image = np.clip(image, 3., np.infty)
log_image = np.log(image)
i_y_orig, i_x_orig = poisson.get_gradients(log_image)
i_y_gray, i_y_color = project_gray(i_y_orig), project_chromaticity(i_y_orig)
i_x_gray, i_x_color = project_gray(i_x_orig), project_chromaticity(i_x_orig)
image_grayscale = np.mean(image, axis=2)
image_grayscale = np.clip(image_grayscale, 3., np.infty)
log_image_grayscale = np.log(image_grayscale)
i_y, i_x = poisson.get_gradients(log_image_grayscale)
norm = np.sqrt(np.sum(i_y_color**2, axis=2))
i_y_match = (norm > threshold_color) + (np.abs(i_y_gray[:,:,0]) > threshold_gray)
norm = np.sqrt(np.sum(i_x_color**2, axis=2))
i_x_match = (norm > threshold_color) + (np.abs(i_x_gray[:,:,0]) > threshold_gray)
r_y = np.where(i_y_match, i_y, 0.)
r_x = np.where(i_x_match, i_x, 0.)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.exp(log_refl)
return image_grayscale / refl, refl
def weiss(image, multi_images, mask, L1=False):
multi_images = np.clip(multi_images, 3., np.infty)
log_multi_images = np.log(multi_images)
i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
r_y = np.median(i_y_all, axis=2)
r_x = np.median(i_x_all, axis=2)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.where(mask, np.exp(log_refl), 0.)
shading = np.where(mask, image / refl, 0.)
return shading, refl
def weiss_retinex(image, multi_images, mask, threshold, L1=False):
multi_images = np.clip(multi_images, 3., np.infty)
log_multi_images = np.log(multi_images)
i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
r_y = np.median(i_y_all, axis=2)
r_x = np.median(i_x_all, axis=2)
r_y *= (np.abs(r_y) > threshold)
r_x *= (np.abs(r_x) > threshold)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.where(mask, np.exp(log_refl), 0.)
shading = np.where(mask, image / refl, 0.)
return shading, refl
#################### Wrapper classes for experiments ###########################
class BaselineEstimator:
"""Assume every image is entirely shading or entirely reflectance."""
def __init__(self, mode, L1=False):
assert mode in ['refl', 'shading']
self.mode = mode
def estimate_shading_refl(self, image, mask, L1=False):
if self.mode == 'refl':
refl = image
shading = 1. * mask
else:
refl = 1. * mask
shading = image
return shading, refl
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'mode': m} for m in ['shading', 'refl']]
class GrayscaleRetinexEstimator:
def __init__(self, threshold):
self.threshold = threshold
def estimate_shading_refl(self, image, mask, L1=False):
return retinex(image, mask, self.threshold, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'threshold': t} for t in np.logspace(-3., 1., 15)]
class ColorRetinexEstimator:
def __init__(self, threshold_gray, threshold_color, L1=False):
self.threshold_gray = threshold_gray
self.threshold_color = threshold_color
def estimate_shading_refl(self, image, mask, L1=False):
return color_retinex(image, mask, self.threshold_gray, self.threshold_color, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'threshold_gray': tg, 'threshold_color': tc}
for tg in np.logspace(-1.5, 0., 5)
for tc in np.logspace(-1.5, 0., 5)]
class WeissEstimator:
def estimate_shading_refl(self, image, multi_images, mask, L1=False):
return weiss(image, multi_images, mask, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
multi_images = load_multiple(tag)
multi_images = np.mean(multi_images, axis=2)
return image, multi_images, mask
@staticmethod
def param_choices():
return [{}]
class WeissRetinexEstimator:
def __init__(self, threshold=0.1, L1=False):
self.threshold = threshold
def estimate_shading_refl(self, image, multi_images, mask, L1=False):
return weiss_retinex(image, multi_images, mask, self.threshold, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
multi_images = load_multiple(tag)
multi_images = np.mean(multi_images, axis=2)
return image, multi_images, mask
@staticmethod
def param_choices():
return [{'threshold': t} for t in np.logspace(-3., 1., 15)]
| [
"chiji.zju@gmail.com"
] | chiji.zju@gmail.com |
20ed33eabf7df2fd4277edf6b17d110ebfd6cf26 | e7208f2e6c03c171b18708991b30829095284d1c | /class7/napalm_ex4.py | f178a29de4811bc03fb52c5b476baf0fb459662c | [
"Apache-2.0"
] | permissive | TechButton/python_course | 409a333fad447c1803d4f483e9bff00bafaf7473 | db288d2978b3d244a0d7d51a79dfbb5afc5dcbe8 | refs/heads/master | 2020-03-30T21:06:28.724938 | 2018-09-19T18:15:49 | 2018-09-19T18:15:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,374 | py | #!/usr/bin/env python
"""
Using NAPALM retrieve get_interfaces from Arista switch2 (pynet_sw2). Find all of the interfaces
that are in an UP-UP state (is_enabled=True, and is_up=True). Print all of these UP-UP interfaces
to standard output.
"""
from __future__ import print_function, unicode_literals
from napalm import get_network_driver
from my_devices import pynet_sw2
def check_up_up(intf_data):
if intf_data['is_enabled'] and intf_data['is_up']:
return True
return False
def main():
"""Retrieve get_interfaces and find the up-up interfaces."""
for a_device in (pynet_sw2,):
device_type = a_device.pop('device_type')
driver = get_network_driver(device_type)
device = driver(**a_device)
print()
print(">>>Device open")
device.open()
print("-" * 50)
hostname = a_device['hostname']
print("{hostname}:\n".format(hostname=hostname))
intf_info = device.get_interfaces()
print()
print("UP-UP Interfaces: ")
print("-" * 50)
intf_list = []
for intf_name, intf_data in intf_info.items():
if check_up_up(intf_data):
intf_list.append(intf_name)
# Sort by nam
intf_list.sort()
for intf in intf_list:
print(intf)
print()
if __name__ == "__main__":
main()
| [
"ktbyers@twb-tech.com"
] | ktbyers@twb-tech.com |
d7844e8081cdb62e35c0b53dcb1ea87afd876e07 | 749fa5a348def37a142ba15b3665ff1525c50321 | /randomcrop.py | abbe0218a85f029623bf4a99b554fd3492349a03 | [] | no_license | rahmakaichi1/Object_Detection | 163bd5ed19fa6e8ae9704d093c216734142c80a8 | aecd6346cf5baf94e1ecac0c4df42d1c5a254d4e | refs/heads/master | 2022-10-31T04:25:18.713807 | 2019-08-23T12:19:23 | 2019-08-23T12:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | import random
TRAIN ="/content/Tensorflow_object_detector/workspace/training_OD/images/reseizedImages"
for file_name in os.listdir(TRAIN):
print("Processing %s" % file_name)
im = Image.open(os.path.join(TRAIN, file_name))
size = im.size
print(size)
w = size[0]
h = size[1]
l = random.randint(0,w)
u = random.randint(0, h)
print(l,u)
new_size = (l , u, w + l , h + u)
print(new_size)
region = im.crop(new_size)
#save crops in a specific folder
output_dir = "/content/Tensorflow_object_detector/workspace/training_OD/images/crops"
output_file_name = os.path.join(output_dir, "cropped_" + file_name)
region.save(output_file_name) | [
"rahma.akaichi@ensi-uma.tn"
] | rahma.akaichi@ensi-uma.tn |
a6c28494304e936c6871917367b1b10d2ad4be4a | ee1f83c070c2564c3e3d2f9a9fbd30be5fec79fa | /roach2_production_test/defs_max16071.py | 8ff5b39f8f58d0b58b91b2d4f1d37b71bb8346f9 | [] | no_license | ska-sa/roach2_testing | c7d5dac789fbe5da236140ec502c17f2520d09bd | 79afd1e0c43d559f51316ab71b006f1dd8db584f | refs/heads/master | 2021-06-21T20:32:04.667098 | 2021-02-12T16:08:57 | 2021-02-12T16:08:57 | 4,830,832 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | #defs_max16071.py
#I2C command
MON_1_MSB = 0x00
MON_1_LSB = 0x01
MON_2_MSB = 0x02
MON_2_LSB = 0x03
MON_3_MSB = 0x04
MON_3_LSB = 0x05
MON_4_MSB = 0x06
MON_4_LSB = 0x07
MON_5_MSB = 0x08
MON_5_LSB = 0x09
MON_6_MSB = 0x0a
MON_6_LSB = 0x0b
MON_7_MSB = 0x0c
MON_7_LSB = 0x0d
MON_8_MSB = 0x0e
MON_8_LSB = 0x0f
MON_C = 0x18
MON_CSP_MSB = 0x19
MON_CSP_LSB = 0x1a
GPIO_INPUT_STATE = 0x1e
GPIO_CONF_123 = 0x3f
GPIO_CONF_3456 = 0x40
GPIO_CONF_678 = 0x41
ADC_CONF_4321 = 0x43
ADC_CONF_8765 = 0x44
OCPT_CSC1 = 0x47
SW_EN_CONF = 0x73
CH_ARRAY = (MON_1_MSB, MON_1_LSB, MON_2_MSB, MON_2_LSB, MON_3_MSB, MON_3_LSB, MON_4_MSB, MON_4_LSB, MON_5_MSB, MON_5_LSB, MON_6_MSB, MON_6_LSB, MON_7_MSB, MON_7_LSB, MON_8_MSB, MON_8_LSB)
#Current sense gain lookup table
CURR_SNS_GAIN = 6,12,24,48
#Full-scale ADC range lookup table
ADC_RNG = 5.6,2.8,1.4,0.0
#Current sense resistors
SNS_RES_12V = 0.005
SNS_RES_5V0 = 0.002
SNS_RES_3V3 = 0.002
SNS_RES_2V5 = 0.005
SNS_RES_1V8 = 0.002
SNS_RES_1V5 = 0.001
SNS_RES_1V0 = 0.00025
SNS_RES = (SNS_RES_3V3, SNS_RES_2V5, SNS_RES_1V8, SNS_RES_1V5, SNS_RES_1V0)
#INA333 Amplifier gain resistors
GAIN_RES_3V3_REV1 = 200.0
GAIN_RES_2V5_REV1 = 499.0
GAIN_RES_1V8_REV1 = 200.0
GAIN_RES_1V5_REV1 = 100.0
GAIN_RES_1V0_REV1 = 100.0
GAIN_RES_REV1 = (GAIN_RES_3V3_REV1, GAIN_RES_2V5_REV1, GAIN_RES_1V8_REV1, GAIN_RES_1V5_REV1, GAIN_RES_1V0_REV1)
GAIN_RES_3V3_REV2 = 1200.0
GAIN_RES_2V5_REV2 = 768.0
GAIN_RES_1V8_REV2 = 1200.0
GAIN_RES_1V5_REV2 = 1200.0
GAIN_RES_1V0_REV2 = 470.0
GAIN_RES_REV2 = (GAIN_RES_3V3_REV2, GAIN_RES_2V5_REV2, GAIN_RES_1V8_REV2, GAIN_RES_1V5_REV2, GAIN_RES_1V0_REV2)
#12V voltage dividor
V_DIV_12V = (19600 + 10000)/10000.0
#Configuration
#Overcurrent secondary threshold deglitch = 4ms, Watchdog timer boots after
#sequence completes, Early warning is undervoltage, Margin mode disabled, Enabled
SW_EN_CONF_VAL = 0x21
#Overcurrent primary threshold and current-sense gain setting = 25mV & 48V/V,
#CSP full-scale range is 14V, Current sense enabled.
OCPT_CSC1_VAL = 0x0F
#ADC full-scale config, all to 5.6V
ADC_CONF_4321_VAL = 0x00
ADC_CONF_8765_VAL = 0x00
| [
"alec.rust@ska.ac.za"
] | alec.rust@ska.ac.za |
18290b371f497fa771429eaeacde68b0d9a9af76 | e57d7785276053332c633b57f6925c90ad660580 | /sdk/identity/azure-identity/tests/recording_processors.py | 3ddba5cbd7d720352a254f4d3c3d9a3220f6d4c9 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 3,603 | py | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import base64
import binascii
import hashlib
import json
import time
from azure_devtools.scenario_tests import RecordingProcessor
import six
from helpers import FAKE_CLIENT_ID
ID_TOKEN_PII_CLAIMS = ("email", "name", "preferred_username", "unique_name")
SECRET_FIELDS = frozenset(
{
"access_token",
"client_secret",
"code",
"device_code",
"message",
"password",
"refresh_token",
"user_code",
}
)
# managed identity headers are not dangerous to record but redacting them prevents anyone worrying whether they are
SECRET_HEADERS = frozenset(
{
"secret",
"X-IDENTITY-SECRET",
}
)
def set_jwt_claims(jwt, claims):
header, encoded_payload, signed = jwt.split(".")
decoded_payload = base64.b64decode(encoded_payload + "=" * (-len(encoded_payload) % 4))
payload = json.loads(six.ensure_str(decoded_payload))
for name, value in claims:
if name in payload:
payload[name] = value
new_payload = six.ensure_binary(json.dumps(payload))
return ".".join((header, base64.b64encode(new_payload).decode("utf-8"), signed))
class RecordingRedactor(RecordingProcessor):
"""Removes authentication secrets from recordings.
:keyword bool record_unique_values: Defaults to False. Set True for tests requiring unique, consistent fake values.
"""
def __init__(self, record_unique_values=False):
super(RecordingRedactor, self).__init__()
self._record_unique_values = record_unique_values
def process_request(self, request):
# bodies typically contain secrets and are often formed by msal anyway, i.e. not this library's responsibility
request.body = None
for header in SECRET_HEADERS:
if header in request.headers:
fake_value = self._get_fake_value(request.headers[header])
request.headers[header] = fake_value
return request
def process_response(self, response):
try:
body = json.loads(response["body"]["string"])
except (KeyError, ValueError):
return response
for field in body:
if field == "id_token":
scrubbed = set_jwt_claims(body["id_token"], [(claim, "redacted") for claim in ID_TOKEN_PII_CLAIMS])
body["id_token"] = scrubbed
elif field in SECRET_FIELDS:
fake_value = self._get_fake_value(body[field])
body[field] = fake_value
response["body"]["string"] = json.dumps(body)
return response
def _get_fake_value(self, real_value):
redacted_value = "redacted"
if self._record_unique_values:
digest = hashlib.sha256(six.ensure_binary(real_value)).digest()
redacted_value += six.ensure_str(binascii.hexlify(digest))[:6]
return redacted_value
class IdTokenProcessor(RecordingProcessor):
def process_response(self, response):
"""Modifies an id token's claims to pass MSAL validation during playback"""
try:
body = json.loads(six.ensure_str(response["body"]["string"]))
new_jwt = set_jwt_claims(body["id_token"], [("exp", int(time.time()) + 3600), ("aud", FAKE_CLIENT_ID)])
body["id_token"] = new_jwt
response["body"]["string"] = six.ensure_binary(json.dumps(body))
except KeyError:
pass
return response
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
813c49ad69128d73bbf2c507dcc2c9d1d598d3ea | baf3996414315ffb60470c40c7ad797bf4e6897f | /17_boilerplates/dashing-master/examples/datatable_crud_columns_plus_heatmap.py | a3df45758aef973180fe648c9bf311594fe24318 | [
"MIT"
] | permissive | thiago-allue/portfolio | 8fbbecca7ce232567aebe97c19944f444508b7f4 | 0acd8253dc7c5150fef9b2d46eead3db83ca42de | refs/heads/main | 2023-03-15T22:10:21.109707 | 2022-09-14T17:04:35 | 2022-09-14T17:04:35 | 207,919,073 | 0 | 0 | null | 2019-11-13T18:18:23 | 2019-09-11T22:40:46 | Python | UTF-8 | Python | false | false | 2,104 | py | import dash
from dash.dependencies import Input, Output, State
import dash_table
import dash_core_components as dcc
import dash_html_components as html
app = dash.Dash(__name__)
app.layout = html.Div(
[
html.Div(
[
dcc.Input(
id='editing-columns-name',
placeholder='Enter a column name...',
value='',
style={'padding': 10},
),
html.Button('Add Column', id='editing-columns-button', n_clicks=0),
],
style={'height': 50},
),
dash_table.DataTable(
id='editing-columns',
columns=[
{
'name': 'Column {}'.format(i),
'id': 'column-{}'.format(i),
'deletable': True,
'renamable': True,
}
for i in range(1, 5)
],
data=[
{'column-{}'.format(i): (j + (i - 1) * 5) for i in range(1, 5)}
for j in range(5)
],
editable=True,
),
dcc.Graph(id='editing-columns-graph'),
]
)
@app.callback(
Output('editing-columns', 'columns'),
[Input('editing-columns-button', 'n_clicks')],
[State('editing-columns-name', 'value'), State('editing-columns', 'columns')],
)
def update_columns(n_clicks, value, existing_columns):
if n_clicks > 0:
existing_columns.append(
{'id': value, 'name': value, 'renamable': True, 'deletable': True}
)
return existing_columns
@app.callback(
Output('editing-columns-graph', 'figure'),
[Input('editing-columns', 'data'), Input('editing-columns', 'columns')],
)
def display_output(rows, columns):
return {
'data': [
{
'type': 'heatmap',
'z': [[row.get(c['id'], None) for c in columns] for row in rows],
'x': [c['name'] for c in columns],
}
]
}
if __name__ == '__main__':
app.run_server(debug=True)
| [
"thiago.allue@yahoo.com"
] | thiago.allue@yahoo.com |
3b78f880a9f1f8064da728b93a85dee7b8a60ec8 | 9593b2662c7a88f21734006c1fe1b860bf662b23 | /libxml2-2.7.7/python/setup.py | c29f97ee1fcf853673f35d523b4fea36ae536733 | [
"LicenseRef-scancode-x11-xconsortium-veillard",
"MIT"
] | permissive | faizalzakaria/GstreamerOMXSigma | aae6ab943c2aa901699efde3abf086cc386e0b8a | ee67face07aa65ad12009fa3f57d4880c231554c | refs/heads/master | 2021-03-12T23:20:03.362298 | 2013-04-29T18:35:58 | 2013-04-29T18:35:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,714 | py | #!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/users/phai/share/GSTReamer_compilation'
# Thread-enabled libxml2
with_threads = 0
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.7.7",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| [
"faizal_zakaria@sdesigns.eu"
] | faizal_zakaria@sdesigns.eu |
f5609f19aaa811622b7896b5ee417ab07a4b4ff6 | d7ec67a5ba315103fa6a6bae6dc045f1fecf7add | /normal/PythonAI/chap05/sec04/Ptna/dictionary.py | 187a9058376add81bf6ab7ff4b94534018ece357 | [] | no_license | munezou/PycharmProject | cc62f5e4278ced387233a50647e8197e009cc7b4 | 26126c02cfa0dc4c0db726f2f2cabb162511a5b5 | refs/heads/master | 2023-03-07T23:44:29.106624 | 2023-01-23T16:16:08 | 2023-01-23T16:16:08 | 218,804,126 | 2 | 1 | null | 2023-02-28T23:58:22 | 2019-10-31T15:57:22 | Jupyter Notebook | UTF-8 | Python | false | false | 5,237 | py | import random
import re
class Dictionary:
def __init__(self):
self.random = [] # (1)
# ランダム辞書ファイルオープン
rfile = open('dics/random.txt', 'r', encoding = 'utf_8')
# 各行を要素としてリストに格納
r_lines = rfile.readlines()
rfile.close()
# 末尾の改行と空白文字を取り除いて
# インスタンス変数(リスト)に格納
self.random = []
for line in r_lines:
str = line.rstrip('\n')
if (str!=''):
self.random.append(str)
# パターン辞書オープン
pfile = open('dics/pattern.txt', 'r', encoding = 'utf_8')
# 各行を要素としてリストに格納
p_lines = pfile.readlines()
pfile.close()
# 末尾の改行と空白文字を取り除いて
# インスタンス変数(リスト)に格納
self.new_lines = []
for line in p_lines:
str = line.rstrip('\n')
if (str!=''):
self.new_lines.append(str)
# リスト型のインスタンス変数を用意
self.pattern = []
# 辞書データの各行をタブで切り分ける
# ptn 正規表現のパターン
# prs 応答例
# ParseItemオブジェクトを生成(引数はptn、prs)して
# インスタンス変数pattern(リスト)に追加
for line in self.new_lines:
ptn, prs = line.split('\t')
self.pattern.append(ParseItem(ptn, prs))
class ParseItem:
SEPARATOR = '^((-?\d+)##)?(.*)$'
def __init__(self, pattern, phrases):
""" @param pattern パターン
@param phrases 応答例
"""
# 辞書のパターンの部分にSEPARATORをパターンマッチさせる
m = re.findall(ParseItem.SEPARATOR, pattern)
# インスタンス変数modifyに0を代入
self.modify = 0
# マッチ結果の整数の部分が空でなければ値を再代入
if m[0][1]:
self.modify =(m[0][1])
# インスタンス変数patternにマッチ結果のパターン部分を代入
self.pattern = m[0][2]
self.phrases = [] # 応答例を保持するインスタンス変数
self.dic = {} # インスタンス変数
# 引数で渡された応答例を'|'で分割し、
# 個々の要素に対してSEPARATORをパターンマッチさせる
# self.phrases[ 'need' : 応答例の整数部分
# 'phrase': 応答例の文字列部分 ]
for phrase in phrases.split('|'):
# 応答例に対してパターンマッチを行う
m = re.findall(ParseItem.SEPARATOR, phrase)
# 'need'キーの値を整数部分m[0][1]にする
# 'phrase'キーの値を応答文字列m[0][1]にする
self.dic['need'] = 0
if m[0][1]:
self.dic['need'] = m[0][1]
self.dic['phrase'] = m[0][2]
# 作成した辞書をphrasesリストに追加
self.phrases.append(self.dic.copy())
def match(self, str):
"""self.pattern(各行ごとの正規表現)を
インプット文字列にパターンマッチ
"""
return re.search(self.pattern, str)
def choice(self, mood):
"""インスタンス変数phrases(リスト)の
要素('need''phrase'の辞書)
'need':数値を
@ptam mood
"""
choices = []
for p in self.phrases:
#print(self.phrases)
#print('p======', p['phrase'])
# 'need'キーの数値とパラメーターmoodをsuitable()に渡し、
# 結果がTrueであれば
# choicesリストに'phrase'キーの文字列を追加
# a=self.suitable(p['need'], mood)
# print('応答例の整数部分',a)
if (self.suitable(p['need'], mood)):
choices.append(p['phrase'])
# choicesリストが空であればNoneを返す
if (len(choices) == 0):
return None
# choicesリストが空でなければランダムに
# 応答文字列を選択して返す
#print('最終choice==',choices)
return random.choice(choices)
def suitable(self, need, mood):
"""インスタンス変数phrases(リスト)の
要素('need''phrase'の辞書)
'need':数値を
@ptam need 応答例の数値
@ptam mood 現在値
"""
#print('need=========',need)
#print('need type==',type(need))
# 必要機嫌値が0であればTrueを返す
if (int(need) == 0):
return True
# 必要機嫌値がプラスの場合は機嫌値が必要機嫌値を超えているか判定
elif (int(need) > 0):
return (mood > int(need))
# 応答例の数値がマイナスの場合は機嫌値が下回っているか判定
else:
return (mood < int(need))
#obj = Dictionary()
| [
"kazumikm0119@pi5.fiberbit.net"
] | kazumikm0119@pi5.fiberbit.net |
d0a81878c0472116130b576d2afe9f6dbd0c320f | 02e7181726dc227654c40c0e6454306c5f8de727 | /function/scope.py | 217f0b376facc690fafbf39886499a0788b9db4d | [] | no_license | arifikhsan/python-dicoding | eeec8d9f1356632c6bd3d055e2fbf04846fd2be0 | 20b5624cb61ef6a9fe16ea7646e0d1a0737513fc | refs/heads/master | 2022-04-21T18:50:21.974986 | 2020-04-23T10:43:23 | 2020-04-23T10:43:23 | 258,090,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | total = 0
def sum(numone, numtwo):
total = numone + numtwo
print('inside function', total)
return total
sum(2, 3) # 5
print('outside function', total) # 0
| [
"arif.ikhsanudin.developer@gmail.com"
] | arif.ikhsanudin.developer@gmail.com |
3878cf12468ae9da76688ba3c8749159a53c56ca | 3b9ae6d07f422f2ed60ea8295c25ed8b13380b81 | /comprasnet/pages/statuse_auction_detail.py | 25096f708f44fc42272c8ef7780d31f190f85779 | [
"Apache-2.0"
] | permissive | GabrielRocha/python-comprasnet | ffdb6c5036244160af0f2741ba87acc6c616cb0b | 606097b7bb997b19a4040b8a5aaff5efa084a80b | refs/heads/master | 2020-03-17T23:57:56.584293 | 2018-05-22T14:28:28 | 2018-05-22T14:28:28 | 134,070,832 | 0 | 0 | Apache-2.0 | 2018-05-21T22:51:49 | 2018-05-19T14:48:07 | HTML | UTF-8 | Python | false | false | 3,130 | py | from . import BaseDetail, log
from urllib.parse import urlencode
from bs4 import BeautifulSoup
import requests
class StatuseAuctionDetail(BaseDetail):
"""Retrive information from statuse details, in this page:
http://comprasnet.gov.br/ConsultaLicitacoes/download/download_editais_detalhe.asp"""
DETAIL_URL = "http://comprasnet.gov.br/ConsultaLicitacoes/download/download_editais_detalhe.asp"
@property
def url(self):
return "{}?{}".format(self.DETAIL_URL, urlencode(self.get_params()))
def get_params(self):
"""Forcing that params stay ordained"""
params = {
'coduasg': self.uasg_code,
'numprp': self.auction_code,
'modprp': 5,
}
return self._order_dict(params)
def get_data(self):
response = requests.get(self.DETAIL_URL, params=self.get_params())
if not response.status_code == requests.codes.ok:
log.error('error trying to get statuse from auction {}/{}. Status code: {}'.format(
self.uasg_code, self.auction_code, response.status_code
))
return
return response.text
def scrap_data(self):
output = {
'codigo-da-uasg': int(self.uasg_code),
'pregao-eletronico': int(self.auction_code),
}
data = self.get_data()
bs_object = BeautifulSoup(data, "html.parser")
for span in bs_object('span', class_='tex3b'):
if 'Itens de ' in span.text:
items_table = span.find_next('table')
output['itens'] = []
for items in items_table.find_all('tr'):
item = {}
header = items.find('span', class_='tex3b')
description = items.find('span', class_='tex3')
try:
item_number, title = header.text.split(' - ')[:2]
item['numero'] = int(item_number)
item['titulo'] = title.strip()
description = str(description).split('<br/>')
description_text = description[0].split('<br/>')
description_text = description_text[0].split('<span class="tex3">')[1]
diff_treattment = description[1].split(':')
item['tratamento-diferenciado'] = diff_treattment[1].strip()
item['aplicabilidade-decreto'] = description[2].split(':')[1].strip()
item['aplicabilidade-margem-de-preferencia'] = description[3].split(':')[1].strip()
item['quantidade'] = int(description[4].split(':')[1].strip())
item['unidade-de-fornecimento'] = description[5].split(':')[1].strip('</span>').split()[0]
output['itens'].append(item)
except (ValueError, IndexError) as e:
log.error('error on extract description in "{}". {}'.format(
items, self.url))
log.exception(e)
return output
| [
"gabrielrocha@catho.com.br"
] | gabrielrocha@catho.com.br |
3452e9c57e93a0d4be6e4ca790f8aefbddbe116c | 5821d882ef4ee38170fc539a085752d3b554f6b5 | /get_pileups/get_pileup.py | cda6026a8452401a39c3fc065f675a3288baa0e0 | [] | no_license | mchowdh200/exome-copy | f2c8e307de73c0dc4cd034763a7a9c7e06f4a057 | d38808f7234f1121de21e42350b69cb700d9e4ab | refs/heads/master | 2020-04-21T10:13:29.765434 | 2019-06-09T06:50:54 | 2019-06-09T06:50:54 | 169,480,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | import sys
from collections import Counter
import pysam
import pandas as pd
REGIONS_BED = sys.argv[1]
BAM_FILE = sys.argv[2]
# Used to filter out info in idxstats
chromosomes = {
'1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13',
'14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y'
}
# Get the total number of reads/million in the BAM ----------------------------
# line format: chr sequence-length num-mapped-reads num-unmapped-reads
idxstats = pysam.idxstats(BAM_FILE) # pylint: disable=no-member
num_reads = 0
for chrm in idxstats.rstrip().split('\n'):
chrm = chrm.rstrip().split()
if chrm[0] in chromosomes:
num_reads += int(chrm[2])
num_reads /= 1e6
# Get pileup sequences --------------------------------------------------------
with open(REGIONS_BED, 'r') as regions, \
pysam.AlignmentFile(BAM_FILE, 'rb') as samfile:
data = []
for region in regions:
chrom, start, end, genotype = region.rstrip().split()
pileups = samfile.pileup(chrom, int(start), int(end), truncate=True)
data.append((chrom, start, end, genotype,
pd.DataFrame.from_records(
[Counter(column.get_query_sequences())
for column in pileups],
columns=['A', 'T', 'C', 'G',
'a', 't', 'c', 'g']
)))
# Write results ---------------------------------------------------------------
for chrom, start, end, genotype, df in data:
values = df.fillna(value=0).values/num_reads
print(chrom, start, end, genotype,
','.join(str(n) for n in values.flatten().tolist()),
sep='\t')
| [
"mchowdh200@gmail.com"
] | mchowdh200@gmail.com |
dfe38908b2e59c3c6ba9bb3b5d297ec542b383a0 | fa9ea72d3ef2cd7473f9b7f171ef017769f4da17 | /src/model_predictions.py | 7d35f2d92ade124b08c84ac4e3eed22c55c7993e | [] | no_license | josiah-d/Predict-Directional-Thought | 9b133363be2ae04975e32146d0b848fcd7cc0443 | 01eced4e9bbb3ef17e60106f14a992f64fd0f7bb | refs/heads/main | 2023-07-10T07:17:12.162012 | 2021-08-12T23:35:19 | 2021-08-12T23:35:19 | 381,900,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from glob import glob
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from load_data import master_load
# load data
X_train, X_test, y_train, y_test = master_load()
# get best model
best_numpy_model = sorted(glob('models/numpy/*.model'))[-1]
model_numpy = load_model(best_numpy_model)
# make predictions and reformat test data
y_pred = np.argmax(model_numpy.predict(X_test), axis=-1)
y_true = np.argmax(y_test, axis=-1)
# build dataframe to stow results
df = pd.DataFrame({'y_true': y_true, 'y_pred': y_pred})
df = df.sample(frac=1).reset_index()
df.drop('index', axis=1, inplace=True)
if __name__ == '__main__':
# save results
df.to_csv('best_model_results.csv')
| [
"duhaimejosiah@gmail.com"
] | duhaimejosiah@gmail.com |
c4083bb05417a506a91fa9cbe92257e23bff28cb | a75e326ea02d6f9fd865fd58db4eabd999519343 | /12-authentication/myproject/__init__.py | a25a709f51ebf7ef6f11a02936eb2bf69b0a152d | [] | no_license | maralexv/learn_flask | c392be636f46cc921d5745ee2969a586b065c0e4 | ca99052f4b7c892b8638215ff5967ead40700022 | refs/heads/master | 2022-09-27T19:09:19.726616 | 2020-09-12T09:06:28 | 2020-09-12T09:06:28 | 154,628,546 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
login_manager = LoginManager()
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config ["SECRET_KEY"] = 'S\qr0g8qbi1.63b$&)!.<%/'
app.config ["SQLALCHEMY_DATABASE_URI"] = 'sqlite:///'+os.path.join(basedir, 'data.sqlite')
app.config ["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
Migrate(app, db)
login_manager.init_app(app)
login_manager.login_view = 'login'
| [
"marchenko.alexander@gmail.com"
] | marchenko.alexander@gmail.com |
b48031b06dbb958e5d5ef37d088a4b46f343e105 | 1ea05a473da2fd7de4448428777bc1e0eb026658 | /lib/rf_outcomeclass_upload.py | 2308ab4c833806feaad2e579ca1c6dc592dd8174 | [] | no_license | ebeth-chin/stool_consistency_pub | feea1733c3e9d348f6f138ba89ea77e123bbc5a5 | b2680bb9a8d35a7c6ef92e2a5fd565be9dbe428d | refs/heads/master | 2023-03-14T22:24:53.063504 | 2021-03-24T16:01:41 | 2021-03-24T16:01:41 | 291,159,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,845 | py | #!/usr/bin/python3
print('Loading modules...')
import os, sys, getopt, datetime
import warnings
warnings.simplefilter("ignore", category=DeprecationWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score, cross_val_predict, train_test_split, GridSearchCV
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder,StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.metrics import make_scorer, balanced_accuracy_score,confusion_matrix, classification_report
from pickle import load, dump
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from get_transformer_feature_names import *
#set the working directory
os.chdir('/project/lemay_diet_guthealth/echin/stoolcon/')
#set seed
np.random.seed(0)
# OUTCOME CLASS w/ STANDARD SCALING ONLY#
def main():
if len(sys.argv) < 3 :
print("Not enough arguments specified\n Usage: lasso.py <x features path> <y target path> <savepath>")
sys.exit (1)
else:
# print command line arguments
for arg in sys.argv[0:]:
print(arg)
#Load X features data
X_path = sys.argv[1]
print('Loading the X features at {}'.format(X_path))
X_in = pd.read_csv(X_path, index_col = 0, header= 0)
X_in = X_in.sort_index(axis = 0)
#get the model name from the input x features
base = os.path.basename(X_path)
mod_name = os.path.splitext(base)[0]
#print("\n\n\n:",mod_name)
#numerical_features = data.columns[1:-1]
#Load Y target data- this should be just one of the dxa outputs
Y_path= sys.argv[2]
print('Loading Y target at {}'.format(Y_path))
y_in = pd.read_csv(Y_path, index_col = 0, header = 0)
y_in = y_in.sort_index(axis=0)
y_target = y_in[:-1].columns
#Load the numeric and categorical feature names
num_feat = pd.read_csv("data/input/numeric_features.csv", delimiter=',', header=0)
cat_feat = pd.read_csv("data/input/categorical_features.csv", delimiter=',', header=0)
#Define the numeric and categorical features
numerical_features = [col for col in X_in.columns if col in num_feat.values]
categorical_features= [col for col in X_in.columns if col in cat_feat.values]
#numeric transformer - scaling only
ss_transformer = Pipeline(steps = [('ss', StandardScaler())])
#set up categorical transformer
X_cat = X_in[categorical_features]
enc = OneHotEncoder(handle_unknown="error", sparse=False)
enc.fit(X_cat)
enc.transform(X_cat)
cat_levels=enc.categories_
categorical_transformer = Pipeline(steps = [
('onehot', OneHotEncoder(handle_unknown='error',sparse=False, categories=cat_levels))
])
#Set up ColumnTransformer
prep = ColumnTransformer(
transformers=[
('ss', ss_transformer, numerical_features),
('cat', categorical_transformer, categorical_features)
]
)
#make the model
model = RandomForestClassifier(random_state = 0,
oob_score = True,
# warm_start = True,
n_jobs = -1)
#setting up pipeline
pipeline= Pipeline(steps = [(
'preprocessor', prep),
('rf', model)])
#print('trying cross validation w/o gridsearch...')
#cross_validate(pipeline, X_in, y_in.values.ravel(), cv = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 0))
#set up the parameter grid
param_grid = {
'rf__max_features':np.arange(0.1,1,0.05),
'rf__max_samples':np.arange(0.1, 1, 0.05),
'rf__n_estimators':np.arange(100,1000,50)
}
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=0)
scoring = {'accuracy','balanced_accuracy'}
refit = 'balanced_accuracy'
grid_search = GridSearchCV(estimator = pipeline, param_grid = param_grid, n_jobs = -1, cv= cv,
scoring = scoring, refit = refit, return_train_score = True)
#hyperparameter tune and refit the model to the entire dataset
grid_search.fit(X_in, y_in.values.ravel())
print("Best params:", grid_search.best_params_)
print("Best Validation Balanced Accuracy Score:", grid_search.best_score_)
print("Best Validation Vanilla Accuracy Score:", grid_search.cv_results_['mean_test_accuracy'][grid_search.best_index_])
print("OOB Score (vanilla accuracy):", grid_search.best_estimator_.named_steps['rf'].oob_score_)
#save the results
savepath = sys.argv[3]
#get the predictions
#these are the probabilities:
preds = pd.DataFrame(grid_search.best_estimator_.named_steps['rf'].oob_decision_function_)
#convert the highest probability to a 1:
preds_binary = preds.eq(preds.where(preds != 0).max(1), axis=0).astype(int)
preds_binary = preds_binary.rename(columns={0: "hard", 1: "normal", 2:"soft"})
#collapse into predicted classes:
conditions = [
(preds_binary['hard'] == 1),
(preds_binary['normal']==1),
(preds_binary['soft'] == 1)]
choices = ['hard', 'normal', 'soft']
preds_binary['predicted_class'] = np.select(conditions, choices, default='normal')
preds_binary['pred_binary_class'] = np.argmax(grid_search.best_estimator_.named_steps['rf'].oob_decision_function_, axis=1)
#merge all the prediction outputs into a single df for saving
preds_merged = preds.merge(preds_binary, left_index=True, right_index=True)
preds_merged = preds_merged.rename(columns={0: "hard_prob", 1: "normal_prob", 2:"soft_prob"})
preds_merged['subjectid'] = y_in.index
preds_merged = preds_merged.set_index(['subjectid'])
preds_name = 'oob_predictions_for_{}'.format(y_target[0])+"_{}".format(mod_name)+'.csv'
preds_path = savepath + preds_name
preds_merged.to_csv(preds_path, index = True)
print("oob balanced accuracy:", balanced_accuracy_score(y_in, preds_binary['pred_binary_class']))
#save the oob score
oob_score = []
oob_bacc = []
oob_score.append(grid_search.best_estimator_.named_steps['rf'].oob_score_)
oob_bacc.append(balanced_accuracy_score(y_in, preds_binary['pred_binary_class']))
bacc_df = pd.DataFrame(oob_bacc)
bacc_df = bacc_df.rename(columns = {0:'oob_balanced_acc'})
oob_df = pd.DataFrame(oob_score)
oob_df = oob_df.rename(columns={0:'oob_accuracy'})
oob_accs = pd.concat([oob_df, bacc_df], axis = 1)
oob_name = 'oob_accuracy_for_{}'.format(y_target[0])+'_{}'.format(mod_name)+'.csv'
oob_path = savepath + oob_name
oob_accs.to_csv(oob_path, index = True)
#save the confusion matrix
pred_class = preds_binary[["pred_binary_class"]]
print("Confusion Matrix:\n\n", confusion_matrix(y_in, pred_class))
cmat = pd.DataFrame(confusion_matrix(y_in, pred_class))
cmat_name = 'confusion_matrix_for_{}'.format(y_target[0])+"_{}".format(mod_name)+'.csv'
cmat_path = savepath + cmat_name
cmat.to_csv(cmat_path, index=True)
#save the classification report
print("Classification Report:\n\n", classification_report(y_in, pred_class, digits = 3))
crep = pd.DataFrame(classification_report(y_in, pred_class, digits = 3, output_dict = True)).T
crep_name = 'classification_report_for_{}'.format(y_target[0])+"_{}".format(mod_name)+'.csv'
crep_path = savepath + crep_name
crep.to_csv(crep_path, index = True)
#save the CV results
results = pd.DataFrame(grid_search.cv_results_)
name = 'cv_results_for_{}'.format(y_target[0])+"_{}".format(mod_name)+'.csv'
path = savepath + name
results.to_csv(path, index=True)
# save the cv predictions
train_pred = cross_val_predict(grid_search.best_estimator_, X_in, y_in.values.ravel(), cv = cv)
train_actual = y_in
predictions = pd.DataFrame(index=X_in.index)
predictions['train_pred'] = train_pred
predictions['train_actual'] = y_in
pred_name = 'cv_predictions_for_{}'.format(y_target[0])+"_{}".format(mod_name)+'.csv'
pred_path = savepath +pred_name
predictions.to_csv(pred_path,index=True)
print("\nResults saved to {}".format(savepath))
print("\nModel saved to {}".format(filename))
if __name__ == "__main__":
main()
| [
"elichin@ucdavis.edu"
] | elichin@ucdavis.edu |
312c4d29501dca463c4af031ea3b1f91c69f137c | e0270cb5622e9b0fc4b7104fc979551dd31836ba | /src/mysite/urls.py | dcb8fa008ef319bebb5bcffce0e8edbdeb99604f | [] | no_license | U-g1N/DjangoLearn | 5033c7fa8a02952ff38d5605a4ef4be58699e5ee | 3abec371f1ff4683eb8b60bf6c0467e9926c7f84 | refs/heads/master | 2023-03-26T03:11:25.772115 | 2021-03-26T06:53:09 | 2021-03-26T06:53:09 | 350,376,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from personal.views import (
home_screen_view,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_screen_view),
]
| [
"eujinjoseph@gmail.com"
] | eujinjoseph@gmail.com |
90cc939c6da4c334f63838ab0648cfaf0cf634e0 | b03346953ae3e5852020d12d486ef60aee66f28f | /mywebsite/settings.py | 2a64076747f2a265aa573258e328e30d4c8765c3 | [] | no_license | martaswiatkowska/symmetrical-succotash-mywebsite | 355e0c5ecdb8e289224c4e3f3f92285256e1eed0 | 92bcc80aa6be771158cd97713a5854c4f1ab4387 | refs/heads/master | 2021-06-10T12:52:01.530923 | 2016-09-23T20:27:34 | 2016-09-23T20:27:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,179 | py | """
Django settings for mywebsite project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import twitter_bootstrap
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
my_app_less = os.path.join(BASE_DIR, 'my_app', 'static', 'less')
# For apps outside of your project, it's simpler to import them to find their root folders
bootstrap_less = os.path.join(os.path.dirname(twitter_bootstrap.__file__), 'static', 'less')
PIPELINE_LESS_ARGUMENTS = u'--include-path={}'.format(os.pathsep.join([bootstrap_less, my_app_less]))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a)$f61jyd8t-nmt2u%fotao^3mo-4f1tm!%st2^$cgy3im)(a_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'MyApp',
'colorful',
'twitter_bootstrap',
'pipeline',
'djangojs'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mywebsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'MyApp/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mywebsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'public', 'static')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
PIPELINE = {
'PIPELINE_ENABLED': True,
'JAVASCRIPT': {
'stats': {
'source_filenames': (
'js/jquery.js',
'js/d3.js',
'js/collections/*.js',
'js/application.js',
),
'output_filename': 'js/stats.js',
}
}
}
PIPELINE_CSS = {
'bootstrap': {
'source_filenames': (
'twitter_bootstrap/less/bootstrap.less',
),
'output_filename': 'css/b.css',
'extra_context': {
'media': 'screen,projection',
},
},
}
PIPELINE_JS = {
'bootstrap': {
'source_filenames': (
'twitter_bootstrap/js/transition.js',
'twitter_bootstrap/js/modal.js',
'twitter_bootstrap/js/dropdown.js',
'twitter_bootstrap/js/scrollspy.js',
'twitter_bootstrap/js/tab.js',
'twitter_bootstrap/js/tooltip.js',
'twitter_bootstrap/js/popover.js',
'twitter_bootstrap/js/alert.js',
'twitter_bootstrap/js/button.js',
'twitter_bootstrap/js/collapse.js',
'twitter_bootstrap/js/carousel.js',
'twitter_bootstrap/js/affix.js',
),
'output_filename': 'js/b.js',
},
}
PIPELINE_COMPILERS = (
'pipeline.compilers.less.LessCompiler',
)
| [
"martaswiatkowska8@gmail.com"
] | martaswiatkowska8@gmail.com |
7c071c9f98c9899543ba25e0b157cfdc92a34d67 | 0bdb8514c76c74d9d52637e4e9bca11c6d9f4a7b | /Unit 2/tuple(元组).py | 1c1bb7ae01310510e0518d573ccc6598da7c0ef2 | [] | no_license | Tespera/Python-study-notes | 24389fa838de69a452e042cb8d52074c8f849c0a | 8602cc49bd922275b97be61206ebf86359502c4f | refs/heads/master | 2021-01-22T20:18:44.555498 | 2017-08-18T11:06:08 | 2017-08-18T11:06:08 | 100,703,876 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | # Author:Tespera
# Date:2017.8.15
# 元组:只读列表,不允许修改
names = ('Tespera','VV')
names.count()
names.index()
| [
"noreply@github.com"
] | Tespera.noreply@github.com |
2c1f8a805233cad2aa3e634457c4e7de745bfd4a | 7d7d8f79e8bae80a8c99240b158c6f3d2abbf94d | /website/sitemaps.py | 8a87994a09ed53d0bc73c9c11e80878f5cb62a57 | [
"MIT"
] | permissive | ecss-soton/ecssweb | feeb208a504bc80b9453ba306c51cae6da3718cd | 06ddda86863ddb85e5da39a6f7b7fb29af902b16 | refs/heads/master | 2022-12-16T02:59:45.147472 | 2022-12-11T22:13:04 | 2022-12-11T22:13:04 | 133,257,221 | 4 | 3 | MIT | 2022-12-11T22:13:06 | 2018-05-13T16:58:48 | HTML | UTF-8 | Python | false | false | 1,503 | py | from django.contrib.sitemaps import Sitemap
from django.urls import reverse
from .models import CommitteeRoleMember, Society, Sponsor
class CommitteeSitemap(Sitemap):
changefreq = 'yearly'
def items(self):
return CommitteeRoleMember.objects.all()
def location(self, item):
return reverse('website:committee-member', kwargs={'role': item.role_codename})
class SocietySitemap(Sitemap):
changefreq = 'yearly'
def items(self):
return Society.objects.all()
def location(self, item):
return reverse('website:societies-details', kwargs={'society': item.codename})
class SponsorSitemap(Sitemap):
changefreq = 'monthly'
def items(self):
return Sponsor.objects.all()
def location(self, item):
return '{}?sponsor={}'.format(reverse('website:sponsors'), item.codename)
class StaticViewSitemap(Sitemap):
def items(self):
return [
'home',
'committee-overview',
'societies',
'sponsors',
'events',
'socials',
'gaming-socials',
'welfare',
'sports',
'football',
'netball',
'running',
'sports-others',
'about',
'contact',
'media-notice',
'jumpstart-2018',
'jumpstart-2019',
'campus-hack-19',
]
def location(self, item):
return reverse('website:{}'.format(item))
| [
"i@cjxol.com"
] | i@cjxol.com |
ef1a2736eb1d45ecfe4b5293ed8f88eb6ca2148e | b492cb89a41ad805a818e3440fb16e9d045d4fed | /extraction.py | a53f1dfcda5b164b48183e5dec10207c3ec34d3a | [] | no_license | praveendandu/caterpillar-tube-pricing | fd204e91c55edac898b3f76baa812aff1e5c16d3 | 484cd926995e56898c30999c7d2963970f19d93e | refs/heads/master | 2021-01-18T13:51:31.220961 | 2015-09-05T19:48:25 | 2015-09-05T19:48:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,998 | py |
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
import csv
import pickle
'''
Extracts variables from the training and test sets and pickles the resulting
data frames for use in modelling.py.
'''
def get_adjacent_tube_assembly_ids(tube_assembly_id):
'''
Helper function to calculate the tube assembly ids adjacent
to tube_assembly_id. Used to help calculate min_adjacent_cost,
max_adjacent_cost, and average_adjacent_cost.
'''
tube_assembly_num = int(tube_assembly_id[3:])
tube_assembly_down = str(tube_assembly_num - 1)
tube_assembly_up = str(tube_assembly_num + 1)
while len(tube_assembly_down) < 5:
tube_assembly_down = '0' + tube_assembly_down
while len(tube_assembly_up) < 5:
tube_assembly_up = '0' + tube_assembly_up
return ('TA-' + tube_assembly_down, 'TA-' + tube_assembly_up)
def extract(data):
'''
Extracts data from train and test. Only calculates features
that don't require aggregating data across a combined train/test set.
'''
tube_data = pd.read_csv('competition_data/tube.csv').fillna("")
data_merged = pd.merge(left = data, right = tube_data, how='inner', on='tube_assembly_id')
bill_of_materials = pd.read_csv('competition_data/bill_of_materials.csv')
data_merged = pd.merge(left = data_merged, right = bill_of_materials, how='inner', on='tube_assembly_id')
data_merged['bracket_pricing'] = LabelEncoder().fit_transform(data_merged['bracket_pricing'])
data_merged['quote_year'] = [int(x.split('-')[0]) for x in data_merged['quote_date']]
data_merged['quote_month'] = [int(x.split('-')[1]) for x in data_merged['quote_date']]
data_merged['end_a_1x'] = LabelEncoder().fit_transform(data_merged['end_a_1x'])
data_merged['end_a_2x'] = LabelEncoder().fit_transform(data_merged['end_a_2x'])
data_merged['end_x_1x'] = LabelEncoder().fit_transform(data_merged['end_x_1x'])
data_merged['end_x_2x'] = LabelEncoder().fit_transform(data_merged['end_x_2x'])
end_form = pd.read_csv('competition_data/tube_end_form.csv')
data_merged.loc[data_merged['end_a'] == "NONE", 'end_a_forming'] = -1
data_merged.loc[data_merged['end_x'] == "NONE", 'end_x_forming'] = -1
for idx,row in end_form.iterrows():
if row['forming'] == 'Yes':
end_forming_value = 1
if row['forming'] == 'No':
end_forming_value = 0
data_merged.loc[data_merged['end_a'] == row['end_form_id'], 'end_a_forming'] = end_forming_value
data_merged.loc[data_merged['end_x'] == row['end_form_id'], 'end_x_forming'] = end_forming_value
quantity_vars = [x for x in data_merged.columns.values if x[:9] == 'quantity_']
data_merged[quantity_vars] = data_merged[quantity_vars].fillna(0, axis = 1)
data_merged['total_quantity_components'] = data_merged[quantity_vars].sum(axis = 1)
data_merged = data_merged.fillna("")
'''
The remainder of this function adds features from each of the component files (i.e.
csv data files beginning with 'comp_'). Variables common across component files are
aggregated in various ways, including taking the sum, average, or creating a separate
indicator variable for each value of a categorical variable.
'''
comp_files = ['comp_adaptor.csv', 'comp_boss.csv', 'comp_elbow.csv', 'comp_float.csv', 'comp_hfl.csv', 'comp_nut.csv', 'comp_other.csv', 'comp_sleeve.csv', 'comp_straight.csv', 'comp_tee.csv', 'comp_threaded.csv']
component_names = ['component_id_' + str(x) for x in range(1, 9)]
concat_final_output = []
#Build a dictionary with component_id as key, and value a dictionary of variables an values it can take.
comp_variables = {}
for comp_filename in comp_files:
comp_file = pd.read_csv('competition_data/' + comp_filename)
with open('competition_data/' + comp_filename) as csvfile:
comp_reader = csv.DictReader(csvfile)
for row in comp_reader:
comp_variables[row['component_id']] = row
#Scans through each tube record in the merged data, examines the components that tube has,
#and assigns variables aggregated from the component data.
for idx, row in data_merged.iterrows():
weight = 0.0
names = []
orientation_yes = 0
orientation_no = 0
diameter = -1
total_nominal_size = 0
thread_pitch = 0
adaptor_angle = 0
total_length = 0
head_diameter = 0
corresponding_shell = []
outside_shape = []
outside_shape_round = 0
outside_shape_hex = 0
outside_shape_na = 0
total_base_diameter = 0
total_thread_size = 0
thread_count = 0
connection_type_counter = {'B-001':0, 'B-002':0,'B-003':0,'B-004':0,'B-005':0,'B-006':0,'B-007':0,'B-008':0,'B-009':0,'B-010':0,'B-011':0,'B-012':0,'B-013':0, '9999':0}
connection_type_ids = ["connection_type_id_" + str(x) for x in range(1,5)]
connection_type_ids.append("connection_type_id")
nut_and_sleeve_length = 0
thickness = 0
mj_plug_class_code = 0
base_type_flatbottom = 0
base_type_shoulder = 0
base_type_saddle = 0
hex_size = 0
seat_angle = 0
thread_size = 0
type_boss = 0
type_stud = 0
end_form_ids = {'A-00' + str(x):0 for x in range(1,8)}
intended_nut_pitch = 0
bolt_pattern_wide = 0
plug_diameter = 0
hex_nut_size = 0
height_over_tube = 0
shoulder_diameter = 0
overall_length = 0
extension_length = 0
intended_nut_thread = 0
mj_class_code_exists = 0
unique_feature_yes = 0
unique_feature_no = 0
drop_length = 0
elbow_angle = 0
bolt_pattern_long = 0
thread_pitches = {'thread_pitch_' + str(x):0 for x in range(1,5)}
groove_no = 0
groove_yes = 0
num_components = 0
#Get the component names you'll be working with for the row
for name in component_names:
if row[name] != '' and row[name] != '9999':
names.append(row[name])
if row[name] != '':
num_components += 1
if names:
for name in names:
if comp_variables[name]['weight'] != 'NA':
weight += float(comp_variables[name]['weight'])
if 'orientation' in comp_variables[name]:
if comp_variables[name]['orientation'] == 'Yes':
orientation_yes += 1
if comp_variables[name]['orientation'] == 'No':
orientation_no += 1
if 'diameter' in comp_variables[name]:
if comp_variables[name]['diameter'] != 'NA':
diameter = float(comp_variables[name]['diameter'])
if 'nominal_size_1' in comp_variables[name]:
#Get nominal sizes (there are 4 in one file and 2 in another)
nominal_sizes = [x for x in comp_variables[name] if x[0:13] == 'nominal_size_']
total_nominal_size = sum(float(comp_variables[name][x]) for x in nominal_sizes if comp_variables[name][x] != 'NA' and comp_variables[name][x] != 'See Drawing')
if 'thread_pitch' in comp_variables[name]:
thread_pitch = comp_variables[name]['thread_pitch']
if 'adaptor_angle' in comp_variables[name]:
if comp_variables[name]['adaptor_angle'] != 'NA':
adaptor_angle = comp_variables[name]['adaptor_angle']
if 'length_1' in comp_variables[name]:
#Get lengths (there are 4 in one file and 2 in another)
lengths = [x for x in comp_variables[name] if x[0:7] == 'length_']
total_length = sum(float(comp_variables[name][x]) for x in lengths if comp_variables[name][x] != 'NA')
if 'head_diameter' in comp_variables[name]:
if comp_variables[name]['head_diameter'] != 'NA':
head_diameter = comp_variables[name]['head_diameter']
#There are no tubes with multiple components with corresponding shell
if 'corresponding_shell' in comp_variables[name]:
corresponding_shell.append(comp_variables[name]['corresponding_shell'])
#There are some tubes with multiple components with corresponding shell
if 'outside_shape' in comp_variables[name]:
if comp_variables[name]['outside_shape'] == "Round":
outside_shape_round += 1
if comp_variables[name]['outside_shape'] == "NA":
outside_shape_na += 1
if comp_variables[name]['outside_shape'] == "Hex":
outside_shape_hex += 1
if 'base_diameter' in comp_variables[name]:
if comp_variables[name]['base_diameter'] != 'NA':
total_base_diameter += float(comp_variables[name]['base_diameter'])
if 'thread_size' in comp_variables[name]:
thread_count += 1
if comp_variables[name]['thread_size'][0] == 'M':
#http://www.engineeringtoolbox.com/metric-threads-d_777.html
total_thread_size += float(comp_variables[name]['thread_size'][1:]) * 0.03937
else:
total_thread_size += float(comp_variables[name]['thread_size'])
if 'connection_type_id_1' in comp_variables[name] or 'connection_type_id' in comp_variables[name]:
for connection_type in connection_type_ids:
if connection_type in comp_variables[name] and comp_variables[name][connection_type] != 'NA':
connection_type_counter[comp_variables[name][connection_type]] += 1
if 'length' in comp_variables[name]:
if comp_variables[name]['length'] != '9999':
nut_and_sleeve_length += float(comp_variables[name]['length'])
if 'thickness' in comp_variables[name]:
if comp_variables[name]['thickness'] != 'NA':
thickness += float(comp_variables[name]['thickness'])
if 'mj_plug_class_code' in comp_variables[name] and comp_variables[name]['mj_plug_class_code'] != 'NA':
mj_plug_class_code += 1
if 'base_type' in comp_variables[name]:
if comp_variables[name]['base_type'] == "Shoulder":
base_type_shoulder += 1
if comp_variables[name]['base_type'] == "Flat Bottom":
base_type_flatbottom += 1
if comp_variables[name]['base_type'] == "Saddle":
base_type_saddle += 1
if 'hex_size' in comp_variables[name] and comp_variables[name]['hex_size'] != 'NA':
hex_size += float(comp_variables[name]['hex_size'])
if 'seat_angle' in comp_variables[name] and comp_variables[name]['seat_angle'] != 'NA':
seat_angle += float(comp_variables[name]['seat_angle'])
if 'thread_size_1' in comp_variables[name]:
thread_size_varnames = ['thread_size_1', 'thread_size_2', 'thread_size_3', 'thread_size_4']
for size in thread_size_varnames:
if size in comp_variables[name] and comp_variables[name][size] != 'NA':
thread_size += float(comp_variables[name][size])
if 'type' in comp_variables[name]:
if comp_variables[name]['type'] == 'Boss':
type_boss += 1
elif comp_variables[name]['type'] == 'Stud':
type_stud += 1
if 'end_form_id_1' in comp_variables[name]:
end_form_id_names = ['end_form_id_1', 'end_form_id_2', 'end_form_id_3', 'end_form_id_4']
for id_name in end_form_id_names:
if id_name in comp_variables[name] and comp_variables[name][id_name] != 'NA' and comp_variables[name][id_name] != "9999":
end_form_ids[comp_variables[name][id_name]] += 1
#Skipped "material" variable.
if 'intended_nut_pitch' in comp_variables[name]:
intended_nut_pitch += float(comp_variables[name]['intended_nut_pitch'])
if 'bolt_pattern_wide' in comp_variables[name] and comp_variables[name]['bolt_pattern_wide'] != "NA":
bolt_pattern_wide += float(comp_variables[name]['bolt_pattern_wide'])
if 'plug_diameter' in comp_variables[name] and comp_variables[name]['plug_diameter'] != "NA":
plug_diameter += float(comp_variables[name]['plug_diameter'])
if 'hex_nut_size' in comp_variables[name] and comp_variables[name]['hex_nut_size'] != "NA":
hex_nut_size += float(comp_variables[name]['hex_nut_size'])
#Skipped "blind_hole" variable
#Skipped "hose_diameter" variable
if 'height_over_tube' in comp_variables[name] and comp_variables[name]['height_over_tube'] != "NA":
height_over_tube += float(comp_variables[name]['height_over_tube'])
if 'shoulder_diameter' in comp_variables[name] and comp_variables[name]['shoulder_diameter'] != "NA":
shoulder_diameter += float(comp_variables[name]['shoulder_diameter'])
if 'overall_length' in comp_variables[name] and comp_variables[name]['overall_length'] != "NA":
overall_length += float(comp_variables[name]['overall_length'])
#Skipped "part_name" variable
if 'extension_length' in comp_variables[name] and comp_variables[name]['extension_length'] != "NA":
extension_length += float(comp_variables[name]['extension_length'])
if 'intended_nut_thread' in comp_variables[name] and comp_variables[name]['intended_nut_thread'] != "NA":
intended_nut_thread += float(comp_variables[name]['intended_nut_thread'])
if 'mj_class_code' in comp_variables[name] and comp_variables[name]['mj_class_code'] != "NA":
mj_class_code_exists = 1
if 'unique_feature' in comp_variables[name] and comp_variables[name]['unique_feature'] != "NA":
if comp_variables[name]['unique_feature'] == "Yes":
unique_feature_yes += 1
if comp_variables[name]['unique_feature'] == "No":
unique_feature_no += 1
if 'drop_length' in comp_variables[name] and comp_variables[name]['drop_length'] != "NA":
drop_length += float(comp_variables[name]['drop_length'])
if 'elbow_angle' in comp_variables[name] and comp_variables[name]['elbow_angle'] != "NA":
elbow_angle += float(comp_variables[name]['elbow_angle'])
#Skipped "coupling class" variable
if 'bolt_pattern_long' in comp_variables[name] and comp_variables[name]['bolt_pattern_long'] != "NA":
bolt_pattern_long += float(comp_variables[name]['bolt_pattern_long'])
if 'thread_pitch_1' in comp_variables[name]:
thread_pitch_names = ['thread_pitch_1', 'thread_pitch_2', 'thread_pitch_3', 'thread_pitch_4']
for p_name in thread_pitch_names:
if p_name in comp_variables[name] and comp_variables[name][p_name] != 'NA' and comp_variables[name][p_name] != "9999":
thread_pitches[p_name] += float(comp_variables[name][p_name])
if 'groove' in comp_variables[name] and comp_variables[name]['groove'] != "NA":
if comp_variables[name]['groove'] == "Yes":
groove_yes += 1
if comp_variables[name]['groove'] == "No":
groove_no += 1
data_merged.set_value(idx, 'num_components', num_components)
data_merged.set_value(idx, 'total_weight', weight)
data_merged.set_value(idx, 'orentation_yes', orientation_yes)
data_merged.set_value(idx, 'orientation_no', orientation_no)
data_merged.set_value(idx, 'diameter', diameter)
data_merged.set_value(idx, 'total_nominal_size', total_nominal_size)
data_merged.set_value(idx, 'thread_pitch', thread_pitch)
data_merged.set_value(idx, 'adaptor_angle', adaptor_angle)
data_merged.set_value(idx, 'total_length', total_length)
data_merged.set_value(idx, 'outside_shape_round', outside_shape_round)
data_merged.set_value(idx, 'outside_shape_na', outside_shape_na)
data_merged.set_value(idx, 'outside_shape_hex', outside_shape_hex)
data_merged.set_value(idx, 'total_base_diameter', total_base_diameter)
if thread_count == 0:
data_merged.set_value(idx, 'average_thread_size', total_thread_size)
else:
data_merged.set_value(idx, 'average_thread_size', total_thread_size/thread_count)
for connection_type in connection_type_counter:
data_merged.set_value(idx, connection_type, connection_type_counter[connection_type])
data_merged.set_value(idx, 'nut_and_sleeve_length', nut_and_sleeve_length)
data_merged.set_value(idx, 'thickness', thickness)
data_merged.set_value(idx, 'mj_plug_class_code', mj_plug_class_code)
data_merged.set_value(idx, 'base_type_shoulder', base_type_shoulder)
data_merged.set_value(idx, 'base_type_flatbottom', base_type_flatbottom)
data_merged.set_value(idx, 'base_type_saddle', base_type_saddle)
data_merged.set_value(idx, 'hex_size', hex_size)
data_merged.set_value(idx, 'seat_angle', seat_angle)
data_merged.set_value(idx, 'total_thread_size', thread_size)
data_merged.set_value(idx, 'type_boss', type_boss)
data_merged.set_value(idx, 'type_stud', type_stud)
for i in end_form_ids:
data_merged.set_value(idx, i, end_form_ids[i])
data_merged.set_value(idx, 'intended_nut_pitch', intended_nut_pitch)
data_merged.set_value(idx, 'bolt_pattern_wide', bolt_pattern_wide)
data_merged.set_value(idx, 'plug_diameter', plug_diameter)
data_merged.set_value(idx, 'hex_nut_size', hex_nut_size)
data_merged.set_value(idx, 'height_over_tube', height_over_tube)
data_merged.set_value(idx, 'shoulder_diameter', shoulder_diameter)
data_merged.set_value(idx, 'overall_length', overall_length)
data_merged.set_value(idx, 'extension_length', extension_length)
data_merged.set_value(idx, 'intended_nut_thread', intended_nut_thread)
data_merged.set_value(idx, 'mj_class_code_exists', mj_class_code_exists)
data_merged.set_value(idx, 'unique_feature_yes', unique_feature_yes)
data_merged.set_value(idx, 'unique_feature_no', unique_feature_no)
data_merged.set_value(idx, 'drop_length', drop_length)
data_merged.set_value(idx, 'elbow_angle', elbow_angle)
data_merged.set_value(idx, 'bolt_pattern_long', total_base_diameter)
for i in thread_pitches:
data_merged.set_value(idx, i, thread_pitches[i])
data_merged.set_value(idx, 'groove_yes', groove_yes)
data_merged.set_value(idx, 'groove_no', groove_no)
return data_merged
'''
Extracts information that requires aggregating information across
both train and test sets (requires both datasets passed as function
argument).
'''
def extract_train_and_test(train, test):
#Create total number of supplier quotes variable
#that counts the number of distinct tube ids supplied by each supplier
train_s_tid = train[['supplier', 'tube_assembly_id']]
test_s_tid = test[['supplier', 'tube_assembly_id']]
concat_s_tid = pd.concat([train_s_tid, test_s_tid])
grouped = concat_s_tid.groupby('supplier')
num_suppliers = grouped.tube_assembly_id.nunique()
df_num_suppliers = pd.DataFrame(num_suppliers)
df_num_suppliers.columns = ['total_supplier_quotes']
train = pd.merge(left = train, right = df_num_suppliers, left_on = 'supplier', how = 'left', right_index = True)
test = pd.merge(left = test, right = df_num_suppliers, left_on = 'supplier', how = 'left', right_index = True)
#Label encode categorical variables
labels_to_encode = ['component_id_' + str(x) for x in range(1,9)]
labels_to_encode.append('supplier')
for label in labels_to_encode:
lbl = LabelEncoder()
lbl.fit(pd.concat([train[label], test[label]]))
train[label] = lbl.transform(train[label])
test[label] = lbl.transform(test[label])
component_ids = ['component_id_' + str(x) for x in range(1,9)]
quantities = ['quantity_' + str(x) for x in range(1,9)]
#Build a dictionary of components/quantities as keys, and as values have {tube_id_1: {costs: [list_of_costs]}, tube_id_2: ...}
components_and_quantities = {}
from sets import Set
for idx, row in train.iterrows():
key = tuple(row[x] for x in component_ids + quantities)
if key in components_and_quantities:
if row['tube_assembly_id'] in components_and_quantities[key]:
components_and_quantities[key][row['tube_assembly_id']]['costs'].append(row['cost'])
components_and_quantities[key][row['tube_assembly_id']]['quantity'].append(row['quantity'])
components_and_quantities[key][row['tube_assembly_id']]['suppliers'].add(row['supplier'])
else:
components_and_quantities[key][row['tube_assembly_id']] = {'costs': [row['cost']], 'quantity': [row['quantity']], 'suppliers': Set([row['supplier']])}
else:
components_and_quantities[key] = {row['tube_assembly_id']: {'costs': [row['cost']], 'quantity': [row['quantity']], 'suppliers': Set([row['supplier']])}}
for idx, row in test.iterrows():
key = tuple(row[x] for x in component_ids + quantities)
if key in components_and_quantities:
if row['tube_assembly_id'] in components_and_quantities[key]:
components_and_quantities[key][row['tube_assembly_id']]['quantity'].append(row['quantity'])
components_and_quantities[key][row['tube_assembly_id']]['suppliers'].add(row['supplier'])
else:
components_and_quantities[key][row['tube_assembly_id']] = {'costs': [], 'quantity': [row['quantity']], 'suppliers': Set([row['supplier']])}
else:
components_and_quantities[key] = {row['tube_assembly_id']: {'costs': [], 'quantity': [row['quantity']], 'suppliers': Set([row['supplier']])}}
for idx, row in train.iterrows():
key = tuple(row[x] for x in component_ids + quantities)
num_tubes_with_same_component_list = len(components_and_quantities[key])
train.set_value(idx, "num_tubes_with_same_component_list", num_tubes_with_same_component_list)
num_suppliers_with_same_component_list = len(reduce(lambda x,y: x.union(y), [components_and_quantities[key][tube_id]['suppliers'] for tube_id in components_and_quantities[key]]))
train.set_value(idx, "num_suppliers_with_same_component_list", num_suppliers_with_same_component_list)
#Look at the price of a tube with a "nearby" tube name. e.g. you are looking at TA-02796
#get the min, max, and average cost of TA-02797 or TA-02795 (if they exist with a cost)
#If they don't exist, just set variable to 0.
tube_assembly_id = row['tube_assembly_id']
adjacent_tube_assembly_ids = get_adjacent_tube_assembly_ids(tube_assembly_id)
min_adjacent_cost = 0
max_adjacent_cost = 0
average_adjacent_cost = 0
for adjacent_tube in adjacent_tube_assembly_ids:
if adjacent_tube in components_and_quantities[key] and components_and_quantities[key][adjacent_tube]['costs']:
min_adjacent_cost = min(components_and_quantities[key][adjacent_tube]['costs'])
max_adjacent_cost = max(components_and_quantities[key][adjacent_tube]['costs'])
average_adjacent_cost = sum(components_and_quantities[key][adjacent_tube]['costs'])/float(len(components_and_quantities[key][adjacent_tube]['costs']))
break
train.set_value(idx, "min_adjacent_cost", min_adjacent_cost)
train.set_value(idx, "max_adjacent_cost", max_adjacent_cost)
train.set_value(idx, "average_adjacent_cost", average_adjacent_cost)
#Perform the same operations as the above for loop on the test data set.
for idx, row in test.iterrows():
key = tuple(row[x] for x in component_ids + quantities)
num_tubes_with_same_component_list = len(components_and_quantities[key])
test.set_value(idx, "num_tubes_with_same_component_list", num_tubes_with_same_component_list)
num_suppliers_with_same_component_list = len(reduce(lambda x,y: x.union(y), [components_and_quantities[key][tube_id]['suppliers'] for tube_id in components_and_quantities[key]]))
test.set_value(idx, "num_suppliers_with_same_component_list", num_suppliers_with_same_component_list)
tube_assembly_id = row['tube_assembly_id']
adjacent_tube_assembly_ids = get_adjacent_tube_assembly_ids(tube_assembly_id)
min_adjacent_cost = 0
max_adjacent_cost = 0
average_adjacent_cost = 0
for adjacent_tube in adjacent_tube_assembly_ids:
if adjacent_tube in components_and_quantities[key] and components_and_quantities[key][adjacent_tube]['costs']:
min_adjacent_cost = min(components_and_quantities[key][adjacent_tube]['costs'])
max_adjacent_cost = max(components_and_quantities[key][adjacent_tube]['costs'])
average_adjacent_cost = sum(components_and_quantities[key][adjacent_tube]['costs'])/float(len(components_and_quantities[key][adjacent_tube]['costs']))
break
test.set_value(idx, "min_adjacent_cost", min_adjacent_cost)
test.set_value(idx, "max_adjacent_cost", max_adjacent_cost)
test.set_value(idx, "average_adjacent_cost", average_adjacent_cost)
suppliers_and_quote_dates = pd.concat([test[['supplier', 'quote_year', 'quote_month']], train[['supplier', 'quote_year', 'quote_month']]])
grouped = suppliers_and_quote_dates.groupby('supplier')
min_years = grouped.aggregate(np.min)
min_years.columns = ['min_quote_year', 'min_quote_month']
train = pd.merge(left = train, right = min_years, left_on = 'supplier', how = 'left', right_index = True)
test = pd.merge(left = test, right = min_years, left_on = 'supplier', how = 'left', right_index = True)
train['length_of_supplier_relationship'] = (train['quote_year'] - train['min_quote_year']) * 12 + train['quote_month'] - train['min_quote_month']
test['length_of_supplier_relationship'] = (test['quote_year'] - test['min_quote_year']) * 12 + test['quote_month'] - test['min_quote_month']
quantities_to_agg = pd.concat([test[['tube_assembly_id', 'quantity']], train[['tube_assembly_id', 'quantity']]])
grouped = quantities_to_agg.groupby('tube_assembly_id')
min_quantity = grouped.aggregate(np.min)
max_quantity = grouped.aggregate(np.max)
min_quantity.columns = ['min_quantity']
max_quantity.columns = ['max_quantity']
train = pd.merge(left = train, right = min_quantity, left_on = 'tube_assembly_id', how = 'left', right_index = True)
test = pd.merge(left = test, right = min_quantity, left_on = 'tube_assembly_id', how = 'left', right_index = True)
train = pd.merge(left = train, right = max_quantity, left_on = 'tube_assembly_id', how = 'left', right_index = True)
test = pd.merge(left = test, right = max_quantity, left_on = 'tube_assembly_id', how = 'left', right_index = True)
return (train, test)
if __name__ == '__main__':
train = pd.read_csv('competition_data/train_set.csv')
test = pd.read_csv('competition_data/test_set.csv')
#Want to train model (and develop relevant features) using log(1 + x), as it is
#better suited for the RMSLE evaluation metric.
train['cost'] = np.log1p(train['cost'])
train = extract(train)
test = extract(test)
(train, test) = extract_train_and_test(train, test)
train.to_csv('extracted_train.csv', index=False)
test.to_csv('extracted_test.csv', index=False)
#Dump the extracted data for use in modelling.py
pickle.dump(train, open('extracted_train.pkl', 'wb'))
pickle.dump(test, open('extracted_test.pkl', 'wb'))
| [
"nagelbergmark@gmail.com"
] | nagelbergmark@gmail.com |
4ef2cd2eade642a234ff663367f9bf7859fd3d47 | 1fcc47a5d07fcccd2949ecd7929b7d00a5901efc | /tests/function_pool.py | 9777dfd84171fc5a7863e0a994f92b33147576b9 | [] | no_license | roboptim/roboptim-core-python | 597b4363b6762f8b38ea2e8f664ce19a2912267d | 84141efe1eea991258ba88f5957febfb2f879873 | refs/heads/master | 2020-12-25T17:36:59.979228 | 2018-07-05T19:55:41 | 2018-07-05T19:55:41 | 8,874,275 | 4 | 4 | null | 2018-07-05T19:55:42 | 2013-03-19T08:50:33 | C++ | UTF-8 | Python | false | false | 6,672 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import \
print_function, unicode_literals, absolute_import, division
import unittest
import roboptim.core
import numpy as np
class Engine (roboptim.core.PyDifferentiableFunction):
def __init__ (self, n):
roboptim.core.PyDifferentiableFunction.__init__ \
(self, 2*n, n, "Compute xᵢ² + yᵢ² for each function")
self.n = n
self.data = np.zeros (n)
self.jac = np.zeros ((n, 2*n))
self.compute_counter = 0
self.jacobian_counter = 0
def reset (self):
self.compute_counter = 0
self.jacobian_counter = 0
def impl_compute (self, result, x):
self.computeData(x)
def impl_gradient (self, result, x):
return NotImplementedError
def impl_jacobian (self, result, x):
self.computeJacobian (x)
def jacobian (self, x):
self.computeJacobian (x)
def computeData (self, x):
self.compute_counter += 1
# For each square function
for i in range(self.n):
self.data[i] = x[2*i]**2 + x[2*i+1]**2
def computeJacobian (self, x):
self.jacobian_counter += 1
self.jac.fill (0.)
# For each square function
for i in range(self.n):
# For the 2 variables influencing the current square function
for j in range(2*i,2*(i+1)):
self.jac[i,j] = 2*x[j]
def getData (self, idx):
return self.data[idx]
def getJac (self, idx, var_idx):
return self.jac[idx, var_idx]
class Square (roboptim.core.PyDifferentiableFunction):
def __init__ (self, engine, idx):
roboptim.core.PyDifferentiableFunction.__init__ \
(self, engine.inputSize (), 1, "x² + y²")
self.engine = engine
self.idx = idx
def impl_compute (self, result, x):
result[0] = self.engine.getData (self.idx)
def impl_gradient (self, result, x, functionId):
for i in range(2):
result[2*self.idx + i] = self.engine.getJac (self.idx, 2*self.idx + i)
class TestFunctionPoolPy(unittest.TestCase):
def test_engine(self):
engine = Engine (3)
x = np.array([10., -5., 1., 2., -1., 1.])
np.testing.assert_almost_equal (engine.data, np.zeros (engine.n))
engine (x)
np.testing.assert_almost_equal (engine.data,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
assert engine.compute_counter == 1
assert engine.jacobian_counter == 0
engine.reset ()
np.testing.assert_almost_equal (engine.jac, np.zeros ((engine.n, 2*engine.n)))
engine.jacobian (x)
jac = np.zeros ((engine.n, 2*engine.n))
for i in range(engine.n):
for j in range(2):
jac[i,2*i+j] = 2. * x[2*i+j]
np.testing.assert_almost_equal (engine.jac, jac)
assert engine.compute_counter == 0
assert engine.jacobian_counter == 1
engine.reset ()
def test_pool(self):
n = 3
engine = Engine (n)
np.testing.assert_almost_equal (engine.data, np.zeros (engine.n))
functions = [Square (engine, float(i)) for i in range (n)]
print(engine)
pool = roboptim.core.PyFunctionPool (engine, functions, "Dummy pool")
print(pool)
x = np.array([10., -5., 1., 2., -1., 1.])
assert len(x) == 2 * n
res = pool (x)
np.testing.assert_almost_equal (engine.data,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
np.testing.assert_almost_equal (res,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
assert engine.compute_counter == 1
assert engine.jacobian_counter == 0
engine.reset ()
pool_jac = pool.jacobian (x)
jac = np.zeros ((engine.n, 2*engine.n))
for i in range(engine.n):
for j in range(2):
jac[i,2*i+j] = 2. * x[2*i+j]
np.testing.assert_almost_equal (pool_jac, jac)
assert engine.compute_counter == 0
assert engine.jacobian_counter == 1
engine.reset ()
def test_pool_fd(self):
n = 3
engine = Engine (n)
print(engine)
functions = [Square (engine, float(i)) for i in range (n)]
pool = roboptim.core.PyFunctionPool (engine, functions, "Dummy FD pool")
fd_rule = roboptim.core.FiniteDifferenceRule.SIMPLE
fd_pool = roboptim.core.PyFiniteDifference (pool, rule = fd_rule)
print(fd_pool)
x = np.array([10., -5., 1., 2., -1., 1.])
assert len(x) == 2 * n
res = fd_pool (x)
np.testing.assert_almost_equal (engine.data,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
np.testing.assert_almost_equal (res,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
assert engine.compute_counter == 1
assert engine.jacobian_counter == 0
engine.reset ()
fd_pool_jac = fd_pool.jacobian (x)
jac = np.zeros ((engine.n, 2*engine.n))
for i in range(engine.n):
for j in range(2):
jac[i,2*i+j] = 2. * x[2*i+j]
np.testing.assert_almost_equal (fd_pool_jac, jac, 5)
assert engine.compute_counter == 1 + len(x) # simple rule: (f(x+h)-f(x))/h
assert engine.jacobian_counter == 0
engine.reset ()
def test_pool_parallel(self):
n = 3
engine = Engine (n)
np.testing.assert_almost_equal (engine.data, np.zeros (engine.n))
functions = [Square (engine, float(i)) for i in range (n)]
print(engine)
pool = roboptim.core.PyFunctionPool (engine, functions, name = "Dummy pool",
n_proc = 2)
print(pool)
x = np.array([10., -5., 1., 2., -1., 1.])
assert len(x) == 2 * n
res = pool (x)
np.testing.assert_almost_equal (engine.data,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
np.testing.assert_almost_equal (res,
[xi**2 + yi**2 for xi,yi in x.reshape(engine.n, 2) ])
assert engine.compute_counter == 1
assert engine.jacobian_counter == 0
engine.reset ()
pool_jac = pool.jacobian (x)
jac = np.zeros ((engine.n, 2*engine.n))
for i in range(engine.n):
for j in range(2):
jac[i,2*i+j] = 2. * x[2*i+j]
np.testing.assert_almost_equal (pool_jac, jac)
assert engine.compute_counter == 0
assert engine.jacobian_counter == 1
engine.reset ()
if __name__ == '__main__':
unittest.main()
| [
"chretien@lirmm.fr"
] | chretien@lirmm.fr |
a7268692098b9f516f98b43d5ff9d9bc3845b3e2 | 6ce8f5a1a5af7365a58d73d6f7ba39e897d68db9 | /handler/loghandler.py | edab630330a4dbc3f9f1f8076ef79c9f92395deb | [] | no_license | bellyfat/zapier-integrator | c6adc0879f725dadb2d1e8546d030bb49ed924d8 | 631a94865c3e58100d1d62c6916ff4011cc45181 | refs/heads/master | 2022-02-27T08:00:41.146131 | 2018-06-06T05:46:33 | 2018-06-06T05:46:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | import datetime
import logging
import os
def create_logger(jobid):
try:
if not os.path.exists("logs"):
os.makedirs("logs")
filename = "logs/{0}.log".format(str(jobid))
logging.basicConfig(filename=filename, level=logging.DEBUG)
except Exception as e:
print e
pass
def debug_log(m):
try:
logger = logging.getLogger(__name__)
msg = " {0} :: {1}".format(datetime.datetime.now(), m)
logger.debug(msg)
except Exception as e:
pass
def info_log(m):
try:
logger = logging.getLogger(__name__)
msg = " {0} :: {1}".format(datetime.datetime.now(), m)
logger.info(msg)
except Exception as e:
pass
def error_log(m):
try:
logger = logging.getLogger(__name__)
msg = " {0} :: {1}".format(datetime.datetime.now(), m)
logger.error(msg)
except Exception as e:
pass
def warning_log(m):
try:
logger = logging.getLogger(__name__)
msg = " {0} :: {1}".format(datetime.datetime.now(), m)
logger.warning(msg)
except Exception as e:
pass
| [
"sagarcadd@gmail.com"
] | sagarcadd@gmail.com |
1c9ae353e393ba997cf5dae167fce110e3372543 | 482a778972f1eace39e0b75abd60ae244748590d | /Bizmate/diffKeywordsPanel.py | 0d6765dea258a009d5511da499eedd0a0d156ae3 | [] | no_license | ryderautoblr/Automation | a25468bbce63dd9ca461663263078886cd58a56f | 5ed823917d7a6952b9726c1e03617d3db2880a0b | refs/heads/master | 2023-07-25T02:22:23.047437 | 2021-08-23T07:58:36 | 2021-08-23T07:58:36 | 298,778,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,643 | py | import wx
import os
import diffKeywords
import createSelectFilePanel
import wx.lib.scrolledpanel as scrolled
class diffKeywordsPanel(scrolled.ScrolledPanel):
def __init__(self, parent):
super(diffKeywordsPanel, self).__init__(parent)
self.SetupScrolling()
self.selectKeywordsFileNew = createSelectFilePanel.createSelectFilePanel(self,'Select New Keywords Files',0,0)
self.selectKeywordsFileOld = createSelectFilePanel.createSelectFilePanel(self,'Select Old Keywords Files',0,100)
self.btnRun = wx.Button(self, label='Run', pos=(10, 200))
self.btnRun.Bind(wx.EVT_BUTTON, self.on_run)
self.btnStart = wx.Button(self, label='Start/Next', pos=(100, 200))
self.btnStart.Bind(wx.EVT_BUTTON, self.on_start)
wx.StaticText(self,label="Old Keyword",pos=(10, 265))
wx.StaticText(self,label="New Keyword",pos=(10, 515))
self.textOldKeyword = wx.TextCtrl(self, pos=(150, 250),size=(800,200), style=wx.TE_MULTILINE)
self.textNewKeyword = wx.TextCtrl(self, pos=(150, 500),size=(800,45))
self.btnUpdate = wx.Button(self, label='Update', pos=(10, 550))
self.btnUpdate.Bind(wx.EVT_BUTTON, self.on_update)
self.btnSave = wx.Button(self, label='Save', pos=(100, 550))
self.btnSave.Bind(wx.EVT_BUTTON, self.on_save)
self.keywordsObj = diffKeywords.diffKeywords()
self.wordIndex = 0
self.disableUpdate()
def disableUpdate(self):
self.btnUpdate.Disable()
self.btnStart.Disable()
def enableUpdate(self):
self.btnUpdate.Enable()
self.btnStart.Enable()
def disableSelect(self):
self.btnRun.Disable()
self.selectKeywordsFileOld.disable()
self.selectKeywordsFileNew.disable()
def enableSelect(self):
self.btnRun.Enable()
self.selectKeywordsFileOld.enable()
self.selectKeywordsFileNew.enable()
def on_run(self, event):
self.keywordsObj.getKeywords(self.selectKeywordsFileNew.getFileName(),self.selectKeywordsFileOld.getFileName())
self.enableUpdate()
def on_start(self, event):
data,index = self.keywordsObj.getNewWords()
self.disableSelect()
if data:
self.wordIndex = index
self.textOldKeyword.SetValue(data)
self.textNewKeyword.SetValue("")
self.btnUpdate.Enable()
else:
self.textNewKeyword.SetValue("Done! No New Words")
self.btnUpdate.Disable()
def on_update(self,event):
self.keywordsObj.updateKeyword(self.wordIndex, self.textNewKeyword.GetValue())
self.btnUpdate.Disable()
def on_save(self,event):
self.keywordsObj.writeKeywords(self.selectKeywordsFileNew.getFileName().replace(".xlsx","New.xlsx"))
self.enableSelect()
| [
"ryderautoblr@gmail.com"
] | ryderautoblr@gmail.com |
ddffb633c5367c4057f3289c255a4bb05080d7c5 | 83b1ca02d707ae3dfea17e0de27293370e5c65c0 | /slides/SourceCodes/dymos_animations/defect_convergence_lgl/dymos_defect_convergence_lgl.py | e6f7b50535fcda73db21b97681aeb8abdf95449e | [] | no_license | OpenMDAO/dymos_training | fc821c093e16360fd87343311219a837e2aaf73b | f8028e7edab630319205f5873b8b4783e214f8cb | refs/heads/master | 2020-12-19T02:11:44.364680 | 2020-01-22T14:29:49 | 2020-01-22T14:29:49 | 235,590,011 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | import numpy as np
import matplotlib.pyplot as plt
import openmdao.api as om
def make_defect_convergence_animation():
cr = om.CaseReader('min_time_climb_solution_gauss-lobatto.sql')
# for key in cr.system_metadata['traj.phases.phase0']:
# print(key)
#
# print(cr.system_metadata['traj.phases.phase0']['component_options']['transcription'])
# exit(0)
transcription = cr.system_metadata['traj.phases.phase0']['component_options']['transcription']
gd = transcription.grid_data
idxs_disc = gd.subset_node_indices['state_disc']
idxs_col = gd.subset_node_indices['col']
state = 'gam'
for i, case_name in enumerate(cr.list_cases()):
fig, ax = plt.subplots(1, 1, figsize=(8, 3.5))
fig.suptitle('Convergence of state defects over iteration history')
ax.set_xlim(-50, 450)
ax.set_ylim(-20, 50)
plt.text(350, 30, f'iteration: {i}')
case = cr.get_case(case_name)
case.list_outputs(out_stream=None)
# Plot the high-density interpolated solution
ax.plot(case.get_val('traj.phase0.timeseries2.time'),
case.get_val(f'traj.phase0.timeseries2.states:{state}', units='deg'),
color='lightgray',
linestyle=':')
# Plot the discretization nodes
ax.plot(case.get_val('traj.phase0.timeseries.time')[idxs_disc, ...],
case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_disc, ...], 'ko')
# Plot the collocation nodes
ax.plot(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],
case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...], 'k^')
# Plot the evaluated state rates
dgam_dt = case.get_val(f'traj.phase0.timeseries.state_rates:{state}', units='deg/s')[idxs_col, ...].ravel()
dt = np.ones_like(dgam_dt)
s = 3
angles = 'xy'
units='inches'
scale_units='inches'
w = 0.03
ax.quiver(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],
case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...],
dt, dgam_dt,
units=units, angles=angles, scale=s,
scale_units=scale_units, color='r', width=w)
# Plot the interpolated state rates
dgam_dt = case.get_val(f'traj.phase0.state_interp.staterate_col:{state}', units='deg/s').ravel()
ax.quiver(case.get_val('traj.phase0.timeseries.time')[idxs_col, ...],
case.get_val(f'traj.phase0.timeseries.states:{state}', units='deg')[idxs_col, ...],
dt, dgam_dt,
units=units, angles=angles, scale=s,
scale_units=scale_units, color='b', width=w)
plt.savefig(f'frames/frame_{i:02d}.pdf')
# case.list_outputs()
#
#
# fig, ax = plt.subplots(1, 1)
if __name__ == '__main__':
make_defect_convergence_animation()
| [
"rfalck@nasa.gov"
] | rfalck@nasa.gov |
53526a1d54ce7da5a974ff67f774e79cecc1b7ff | 00ac75d326dc9981a6efb753a41d2e44427b15bd | /utils/misc.py | e5f76d96b2a3484b03cbc2c61eb492ed486df534 | [
"MIT"
] | permissive | OpenXAIProject/dac | 59d6b85a936dfacae7aeb9f356ba963407670251 | 652776e21b56dcb68839363bb077d5c5ea28d81e | refs/heads/master | 2022-11-21T20:18:59.077438 | 2020-07-27T04:13:24 | 2020-07-27T04:13:24 | 282,783,144 | 21 | 2 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | import imp
import os
import numpy as np
def add_args(args1, args2):
for k, v in args2.__dict__.items():
args1.__dict__[k] = v
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
module = imp.load_source(module_name, filename)
return module, module_name
# adj: N * N
def adj_to_labels(adj):
N, N = adj.shape
labels = np.zeros(N, dtype=np.long)
k = 0
processed = np.zeros(N, dtype=np.bool)
for i in range(N):
if not processed[i]:
processed[i] = True
labels[i] = k
idxs = adj[i] > 0.5
labels[idxs] = k
processed[idxs] = True
k = k + 1
return labels
| [
"john@lang.aitricsdev.com"
] | john@lang.aitricsdev.com |
9ae75b4e104f26447c714643ec07059c4de61710 | 902e0bcd7abd0eafb1daf820f5009e632bfe9141 | /educa/settings/local.py | 38d16417176ead2bdff2da71e37841ae010c8c28 | [] | no_license | ihfazhillah/educa-lms | 1ba4aebcfc7b68b6b80c3cacff0eeabb3024344b | e0c4ef46a147cc187297291db5adf78cc7da617d | refs/heads/master | 2020-03-28T09:38:34.998747 | 2018-09-22T16:03:49 | 2018-09-22T16:03:49 | 148,048,405 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| [
"mihfazhillah@gmail.com"
] | mihfazhillah@gmail.com |
d28101b5fac424b3cc3fcc47e3bf89b2b6de6ae0 | 6a609bc67d6a271c1bd26885ce90b3332995143c | /exercises/string/read_n_characters_given_read4.py | 389d8d3146560a64e411108a2f4d541362426403 | [] | no_license | nahgnaw/data-structure | 1c38b3f7e4953462c5c46310b53912a6e3bced9b | 18ed31a3edf20a3e5a0b7a0b56acca5b98939693 | refs/heads/master | 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | # -*- coding: utf-8 -*-
"""
The API: int read4(char *buf) reads 4 characters at a time from a file.
The return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.
By using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.
Note:
The read function will only be called once for each test case.
"""
# The read4 API is already defined for you.
# @param buf, a list of characters
# @return an integer
# def read4(buf):
class Solution(object):
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Maximum number of characters to read (int)
:rtype: The number of characters read (int)
"""
index = 0
while n:
# The characters left in buf4 will lost in the next call of read().
# Therefore read() can only be called once.
buf4 = [''] * 4
tmp = read4(buf4)
if not tmp:
return index
for i in xrange(min(tmp, n)):
buf[index] = buf4[i]
index += 1
n -= 1
return index
| [
"wanghan15@gmail.com"
] | wanghan15@gmail.com |
ee0ec04e2d8c04fc9f2ef78728acf8ebd80904ea | 3fdd9e2f5663c6b07420ff0047e20aa1d4dec0e9 | /uim/codec/context/scheme.py | a251974e36cdf82ff8518dff4eee8112df492a74 | [
"Apache-2.0"
] | permissive | erdoukki/universal-ink-library | 39150814e1a11a5f15e7e66491784563e28e6de8 | 689ed90e09e912b8fc9ac249984df43a7b59aa59 | refs/heads/main | 2023-08-05T02:22:11.142863 | 2021-08-20T10:12:06 | 2021-08-20T10:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,708 | py | # -*- coding: utf-8 -*-
# Copyright © 2021 Wacom Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PrecisionScheme(object):
"""
Contains information for the decimal precision of data in different channels.
Parameters
----------
mask_value: int
Mask value which encodes the path precision.
"""
POSITION_SHIFT_BITS: int = 0
SIZE_SHIFT_BITS: int = 4
ROTATION_SHIFT_BITS: int = 8
SCALE_SHIFT_BITS: int = 12
OFFSET_SHIFT_BITS: int = 16
def __init__(self, mask_value: int = 0):
self.__value = mask_value
@property
def value(self) -> int:
"""Value that encodes the bits. (`int`)"""
return self.__value
@value.setter
def value(self, value: int):
self.__value = value
@property
def position_precision(self) -> int:
"""Gets or sets the data precision for position (X, Y, Z) channels. (`int`)"""
return (self.value >> PrecisionScheme.POSITION_SHIFT_BITS) & 0xF
@property
def size_precision(self) -> int:
"""Gets or sets the data precision for the Size channel. (`int`, read-only)"""
return (self.value >> PrecisionScheme.SIZE_SHIFT_BITS) & 0xF
@property
def rotation_precision(self) -> int:
"""Gets or sets the data precision for the Rotation channel. (`int`, read-only)"""
return (self.value >> PrecisionScheme.ROTATION_SHIFT_BITS) & 0xF
@property
def scale_precision(self) -> int:
"""Gets or sets the data precision for the Scale (ScaleX, ScaleY, ScaleZ) channels. (`int`, read-only)"""
return (self.value >> PrecisionScheme.SCALE_SHIFT_BITS) & 0xF
@property
def offset_precision(self) -> int:
"""Gets or sets the data precision for the Offset (OffsetX, OffsetY, OffsetZ) channels. (`int`, read-only)"""
return (self.value >> PrecisionScheme.OFFSET_SHIFT_BITS) & 0xF
def __repr__(self):
return f'Precision scheme. [position:={self.position_precision}, size:={self.size_precision}, ' \
f'rotation:={self.rotation_precision}, scale:={self.scale_precision}, ' \
f'offset:={self.offset_precision}]'
| [
"github@wacom.com"
] | github@wacom.com |
e32f03050666068ee86574f9ca6ea27dc0af133b | eb15a3169a6333f6984bd3ff4cd8a56e4ec11d4a | /scripts/metrics.py | 0fd56e6b7e402f64d8b79d45db97388bfe9b050b | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | waggle-sensor/core | 87255a8ddd5fb9efe91543c9ab3be14b925767b8 | 6e328b1a1c4faebc0585e307e9256db25d7c48ab | refs/heads/master | 2020-12-11T05:53:58.741743 | 2019-09-11T00:40:45 | 2019-09-11T00:40:45 | 68,459,432 | 2 | 3 | null | 2019-04-08T22:03:08 | 2016-09-17T15:10:19 | Python | UTF-8 | Python | false | false | 7,972 | py | import subprocess
import os
import re
import time
from contextlib import suppress
from glob import glob
import logging
import configparser
logger = logging.getLogger('metrics')
def read_file(path):
logger.debug('read_file %s', path)
with open(path) as file:
return file.read()
def read_section_keys(config, section):
try:
return list(config[section])
except KeyError:
return []
def read_config_file(path):
logger.debug('reading config %s', path)
config = configparser.ConfigParser(allow_no_value=True)
config.read(path)
return {
'devices': read_section_keys(config, 'devices'),
'network': read_section_keys(config, 'network'),
'ping': read_section_keys(config, 'ping'),
'services': read_section_keys(config, 'services'),
}
def get_sys_uptime():
fields = read_file('/proc/uptime').split()
return int(float(fields[0]))
def get_dev_exists(path):
return os.path.exists(path)
def get_net_metrics(iface):
try:
rx = int(read_file(os.path.join('/sys/class/net', iface, 'statistics/rx_bytes')))
tx = int(read_file(os.path.join('/sys/class/net', iface, 'statistics/tx_bytes')))
except FileNotFoundError:
rx = 0
tx = 0
return rx, tx
def get_service_status(service):
try:
subprocess.check_output(['systemctl', 'is-active', service]).decode()
return True
except subprocess.CalledProcessError:
return False
# can also have a log tail process watching all the wagman logs for events
def get_wagman_metrics(config, metrics):
# optimization... doesn't bother with query if device missing.
if not get_dev_exists('/dev/waggle_sysmon'):
return
with suppress(Exception):
metrics['wagman', 'uptime'] = int(subprocess.check_output(['wagman-client', 'up']).decode())
log = subprocess.check_output([
'journalctl', # scan journal for
'-u', 'waggle-wagman-driver', # wagman driver logs
'--since', '-90', # from last 90s
'-b', # from this boot only
'-o', 'cat', # in compact form
]).decode()
metrics['wagman', 'comm'] = ':wagman:' in log
with suppress(Exception):
nc, ep, cs = re.findall(r':fails (\d+) (\d+) (\d+)', log)[-1]
metrics['wagman_fc', 'nc'] = int(nc)
metrics['wagman_fc', 'ep'] = int(ep)
metrics['wagman_fc', 'cs'] = int(cs)
with suppress(Exception):
wm, nc, ep, cs = re.findall(r':cu (\d+) (\d+) (\d+) (\d+)', log)[-1]
metrics['wagman_cu', 'wm'] = int(wm)
metrics['wagman_cu', 'nc'] = int(nc)
metrics['wagman_cu', 'ep'] = int(ep)
metrics['wagman_cu', 'cs'] = int(cs)
with suppress(Exception):
nc, ep, cs = re.findall(r':enabled (\d+) (\d+) (\d+)', log)[-1]
metrics['wagman_enabled', 'nc'] = bool(nc)
metrics['wagman_enabled', 'ep'] = bool(ep)
metrics['wagman_enabled', 'cs'] = bool(cs)
with suppress(Exception):
ports = re.findall(r':vdc (\d+) (\d+) (\d+) (\d+) (\d+)', log)[-1]
metrics['wagman_vdc', '0'] = int(ports[0])
metrics['wagman_vdc', '1'] = int(ports[1])
metrics['wagman_vdc', '2'] = int(ports[2])
metrics['wagman_vdc', '3'] = int(ports[3])
metrics['wagman_vdc', '4'] = int(ports[4])
with suppress(Exception):
metrics['wagman_hb', 'nc'] = 'nc heartbeat' in log
metrics['wagman_hb', 'ep'] = 'gn heartbeat' in log
metrics['wagman_hb', 'cs'] = 'cs heartbeat' in log
log = subprocess.check_output([
'journalctl', # scan journal for
'-u', 'waggle-wagman-driver', # wagman driver logs
'--since', '-300', # from last 5m
'-b', # from this boot only
]).decode()
# maybe we just schedule this service to manage its own sleep / monitoring timer
# this would actually allow events to be integrated reasonably.
metrics['wagman_stopping', 'nc'] = re.search(r'wagman:nc stopping', log) is not None
metrics['wagman_stopping', 'ep'] = re.search(r'wagman:gn stopping', log) is not None
metrics['wagman_stopping', 'cs'] = re.search(r'wagman:cs stopping', log) is not None
metrics['wagman_starting', 'nc'] = re.search(r'wagman:nc starting', log) is not None
metrics['wagman_starting', 'ep'] = re.search(r'wagman:gn starting', log) is not None
metrics['wagman_starting', 'cs'] = re.search(r'wagman:cs starting', log) is not None
metrics['wagman_killing', 'nc'] = re.search(r'wagman:nc killing', log) is not None
metrics['wagman_killing', 'ep'] = re.search(r'wagman:gn killing', log) is not None
metrics['wagman_killing', 'cs'] = re.search(r'wagman:cs killing', log) is not None
# print(re.findall(r'wagman:(\S+) stopping (\S+)', log))
# print(re.findall(r'wagman:(\S+) starting (\S+)', log))
# print(re.findall(r'wagman:(\S+) killing', log))
def check_ping(host):
try:
subprocess.check_output(['ping', '-c', '4', host])
return True
except Exception:
return False
def get_sys_metrics(config, metrics):
metrics['uptime'] = get_sys_uptime()
metrics['time'] = int(time.time())
s = read_file('/proc/meminfo')
metrics['mem_total'] = int(re.search(r'MemTotal:\s*(\d+)\s*kB', s).group(1)) * 1024
metrics['mem_free'] = int(re.search(r'MemFree:\s*(\d+)\s*kB', s).group(1)) * 1024
s = read_file('/proc/loadavg')
fs = s.split()
metrics['loadavg1'] = float(fs[0])
metrics['loadavg5'] = float(fs[1])
metrics['loadavg15'] = float(fs[2])
device_table = {
'wagman': '/dev/waggle_sysmon',
'coresense': '/dev/waggle_coresense',
'modem': '/dev/attwwan',
'wwan': '/sys/class/net/ppp0',
'lan': '/sys/class/net/eth0',
'mic': '/dev/waggle_microphone',
'samba': '/dev/serial/by-id/usb-03eb_6124-if00',
'bcam': '/dev/waggle_cam_bottom',
'tcam': '/dev/waggle_cam_top',
}
def get_device_metrics(config, metrics):
for name in config['devices']:
if name not in device_table:
logger.warning('no device "%s"', name)
continue
metrics['dev_' + name, 'up'] = get_dev_exists(device_table[name])
ping_table = {
'beehive': 'beehive',
'nc': '10.31.81.10',
'ep': '10.31.81.51',
}
def get_ping_metrics(config, metrics):
for name in config['ping']:
if name not in ping_table:
logger.warning('no ping host "%s"', name)
continue
metrics['ping_' + name, 'up'] = check_ping(ping_table[name])
network_table = {
'wwan': 'ppp0',
'lan': 'eth0',
}
def get_network_metrics(config, metrics):
for name in config['network']:
if name not in network_table:
logger.warning('no network interface "%s"', name)
continue
rx, tx = get_net_metrics(network_table[name])
metrics['net_' + name, 'rx'] = rx
metrics['net_' + name, 'tx'] = tx
service_table = {
'rabbitmq': 'rabbitmq-server',
'coresense': 'waggle-plugin-coresense',
}
def get_service_metrics(config, metrics):
for name in config['services']:
if name not in service_table:
logger.warning('no service "%s"', name)
continue
metrics['running', name] = get_service_status(service_table[name])
def get_metrics_for_config(config):
metrics = {}
get_sys_metrics(config, metrics)
get_device_metrics(config, metrics)
if 'wagman' in config['devices']:
get_wagman_metrics(config, metrics)
get_ping_metrics(config, metrics)
get_network_metrics(config, metrics)
get_service_metrics(config, metrics)
return metrics
def main():
import pprint
config = read_config_file('/wagglerw/metrics.config')
pprint.pprint(get_metrics_for_config(config))
if __name__ == '__main__':
main()
| [
"sean.shahkarami@gmail.com"
] | sean.shahkarami@gmail.com |
19b37a81020fa9bec45465d5817b2d9d5f003214 | 5804556ef8edcd5bfaa4bf519476b895027e0f98 | /Exercise/XLA/3210.py | f5e26335d02bb38e2bf8f35b37567c56e911d4a7 | [] | no_license | LHKode/Image-Processing | dbbcc5d0e2255c8ef611a1c9108c29c393885682 | 6c19546564f4fcd5f185338a5e2756519fef2e0d | refs/heads/master | 2023-06-25T03:57:03.949627 | 2021-07-21T04:27:56 | 2021-07-21T04:27:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,632 | py | import cv2
import numpy as np
import math
from matplotlib import pyplot as plt
img = cv2.imread('hw4_radiograph_1.jpg',0)
# sx,sy = image.shape
# hr = np.arange(-(sx)/2,(sx)/2)
# hc = np.arange(-(sy)/2,(sy)/2)
#
# [x,y] = np.meshgrid(hc,hr,sparse=True)
# mg = np.sqrt(x**2+y**2)
# H = 1/(1+(mg/20)**(2*1))
# G = np.fft.fftshift(np.fft.fft2(image))
# Ip = G*H
# Im = np.abs(np.fft.ifft2(np.fft.ifftshift(Ip)))
# cv2.imshow('go',Im)
# cv2.imshow('o',image)
# cv2.waitKey()
def gaussianlp(img):
sx, sy = img.shape[:2]
x = np.arange(-sy / 2, sy / 2)
y = np.arange(-sx / 2, sx / 2) # tâm là (0,0)
x, y = np.meshgrid(x, y)
d = np.sqrt(x ** 2 + y ** 2)
H = pow(math.e, (-d ** 2 / (2 * (50 ** 2)))) # cho sigma=50
return H
sx, sy = img.shape[:2]
x = np.arange(-sy / 2, sy / 2)
y = np.arange(-sx / 2, sx / 2) # tâm là (0,0)
x, y = np.meshgrid(x, y)
d = np.sqrt(x ** 2 + y ** 2)
H = gaussianlp(img)
H = pow(math.e, (-d ** 2 / (2 * (50 ** 2)))) # cho sigma=50
g = np.fft.fftshift(np.fft.fft2(img)) # fft and shift to center
img_apply = g * H # apply filter
img_gaussian_lp = abs(np.fft.ifft2(np.fft.ifftshift(img_apply)))
img_gaussian_lp = np.uint8(img_gaussian_lp)
cv2.imshow('Orginal',img)
cv2.imshow('Filter',img_gaussian_lp)
cv2.waitKey()
# ret,img_binary = cv2.threshold(img_butterworth_lp,127,255,cv2.THRESH_BINARY)
# ret,img_trunc = cv2.threshold(img_butterworth_lp,127,255,cv2.THRESH_TRUNC)
# ret,img_tozero = cv2.threshold(img_butterworth_lp,127,255,cv2.THRESH_TOZERO)
# ret, img_otsu = cv2.threshold(img_butterworth_lp, 0,255, cv2.THRESH_OTSU)
img_threshold = cv2.adaptiveThreshold(img_gaussian_lp, 255, cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
# img_gaussian = cv2.adaptiveThreshold(img_butterworth_lp, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
# cv2.THRESH_BINARY,11,2)
Z = img_gaussian_lp.reshape((-1,3))
Z = np.float32(Z)
iter = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,10,1.0)
# criteria = (iter,10,1.0)
K=8
ret,label,center = cv2.kmeans(Z,K,None,iter,10,cv2.KMEANS_RANDOM_CENTERS)
center=np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((img_gaussian_lp.shape))
cv2.imshow('KMeans',res2)
cv2.imshow('Mean_C',img_gaussian_lp)
cv2.waitKey()
# titles = ['Original', 'butterworth_lp','binary_threshold', 'Kmean (K='+str(K)+', inter ='+str(iter)+')']
# images = [img, img_butterworth_lp, img_binary,res2]
# for i in range(4):
# plt.subplot(2,3,i+1)
# plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
# cv2.waitKey() | [
"hoangkhanh9119@gmail.com"
] | hoangkhanh9119@gmail.com |
0c71bb0c3348dc7ce28f6758bb9aa0ec1430c8b9 | f69116784fc84a04ee8a8da2260c298097383928 | /IrasutoyaGenerator.py | db4ac3ba3f26810d49469c5ffdd7b8e723928f90 | [] | no_license | KoshikawaShinya/IRASUTOYA_GAN | 56149af652e122d2952543460b38179bf662b103 | e40aeb2d63c3515b8b6137de1202a0717f2be1bc | refs/heads/master | 2022-12-16T10:49:04.863111 | 2020-09-12T13:33:26 | 2020-09-12T13:33:26 | 291,705,015 | 0 | 0 | null | 2020-09-02T05:33:51 | 2020-08-31T12:09:20 | Python | UTF-8 | Python | false | false | 409 | py | import keras as K
import cv2
import matplotlib.pyplot as plt
import numpy as np
seed = 502
z_dim = 100
model = K.models.load_model('saved_model/mizumashi_model.h5')
#np.random.seed(seed=seed)
z = np.random.normal(0, 1, (1, z_dim))
img = model.predict(z)
img = np.reshape(img, (64, 64, 3))
print(img.shape)
img = 0.5 * img + 0.5
plt.imsave('generate_img/irasuto_2.jpg', img)
plt.imshow(img)
plt.show()
| [
"satowimp0319@gmail.com"
] | satowimp0319@gmail.com |
edcfa6a98717e4cd2d68b62eb93dc8abe8293142 | 07667963cdddf3f046b7223e7fa33acd4d36b35c | /P4Python/config.py | 7b5b89fa960a6e88042bc0896dfa36be61eb791e | [] | no_license | p4misc/p4api_examples | a3799f1e44967e1fdce10ee1b30d3183ecd4256a | 3610e6adb0facaa91dafe816a2c879d191f23662 | refs/heads/master | 2022-12-29T02:06:00.057362 | 2020-10-19T14:08:23 | 2020-10-19T14:08:23 | 295,386,271 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | P4PORT = "1666"
P4USER = "bruno"
P4CLIENT = "test"
| [
"p4miscjp@gmail.com"
] | p4miscjp@gmail.com |
5116b4a93bc9ae0defde9d176964fb6d903735d1 | 4d63e1189d42abc50e015569364fb1799b53665a | /cubepress/model/util.py | a9e196e92700b5ff73b79ad8791bd96d52cfb916 | [
"MIT"
] | permissive | openspending/cubepress | 7c6a5222150a7c55068a28596e13130213e10850 | 691ef925737cb4abb5889ffbb6adfbf60d94d36f | refs/heads/master | 2020-07-11T10:17:05.295637 | 2015-09-04T09:56:50 | 2015-09-04T09:56:50 | 34,243,877 | 4 | 3 | MIT | 2021-06-18T01:57:39 | 2015-04-20T07:04:16 | Python | UTF-8 | Python | false | false | 222 | py | # coding: utf-8
from normality import slugify
def valid_name(name):
slug = slugify(name, sep='_')
if slug != name or slug == 'row_count':
raise ValueError('Invalid identifier: %s' % name)
return slug
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
e3472ddbe2154408ef7551072d639ec11aab7534 | 12e84c82f6d92152a362e50468e9674b36f22289 | /Day 29 - Bitwise AND.py | 38e0fd371013191b17b4d1fcdeedd932d070f16c | [] | no_license | swrnv/hackerrank-30daysofcode | 266b47e94d0af341461ac4681f619a8178a29100 | 1f10e1877a7124a37c232dae1d1cadc220edf65d | refs/heads/main | 2023-03-01T06:08:22.858816 | 2021-02-12T07:05:29 | 2021-02-12T07:05:29 | 328,576,269 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import math
import os
import random
import re
import sys
t = int(input().strip())
for a0 in range(t):
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
print(k-1 if ((k-1) | k) <= n else k-2) | [
"noreply@github.com"
] | swrnv.noreply@github.com |
d4ad6f9520043f4e80c6250d05378364b3fdc160 | db1b3ce8d5dbd94702e8c072686171b4836930e1 | /dictionary/6.py | 634badb85533bc6d23bc69e34a58864bf8babaa4 | [] | no_license | rostonn/pythonW3 | d1e5c4e39a37331437ac5df766d6901f355ee483 | dca530846445c8ae06d75df4787b17a67d837b4f | refs/heads/master | 2021-01-01T04:40:54.965119 | 2016-04-28T04:45:14 | 2016-04-28T04:45:14 | 56,813,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | # dictionary in the form 1 to n (x, x*x)
def square(n):
ans = {}
for x in range(1,n):
ans[x] = x**2
print(ans)
square(6)
| [
"rostonn@gmail.com"
] | rostonn@gmail.com |
13252a273ef6d09e665eec5b0641068b75eeaaf5 | 32fb6fd06b496b4c9ceabe578dceed265950cc37 | /homework/scripts/populatedb.py | bec2769510952534d229f5054eefa24884311ec1 | [] | no_license | rach/homework | 8167d3930d4313818e306fb0965ffbd6402bf12b | aca450753445caa188675d637300ead443d15525 | refs/heads/master | 2021-01-10T04:50:53.857108 | 2016-01-11T21:13:38 | 2016-01-11T21:13:38 | 49,445,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | import os
import sys
from cStringIO import StringIO
import requests
import csv
from sqlalchemy import create_engine
from schematics.exceptions import ModelValidationError, ModelConversionError
from homework.core.schemas import ListingImportSchema
from homework.core.services import ListingService
from pyramid.paster import (
setup_logging,
)
from homework.core.models.meta import (
Base,
create_dbsession
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def csv_row_to_schema(row):
val = {
'pid': row[0],
'street': row[1],
'status': row[2],
'price': row[3],
'bedrooms': row[4],
'bathrooms': row[5],
'sq_ft': row[6],
'coord': (float(row[7]), float(row[8]))
}
return ListingImportSchema(val)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
engine = create_engine('postgresql://homework@/homework')
dbsession = create_dbsession(engine)
service = ListingService(dbsession)
r = requests.get('https://s3.amazonaws.com/opendoor-problems/listings.csv')
if not r.ok:
exit(1)
failed, imported = [], 0;
reader = csv.reader(StringIO(r.text))
reader.next() # Skip header
for row in reader:
schema = csv_row_to_schema(row)
try:
schema.validate()
service.create_listing(
pid=schema.pid,
street=schema.street,
status=schema.status,
price=schema.price,
sq_ft=schema.sq_ft,
bedrooms=schema.bedrooms,
bathrooms=schema.bathrooms,
lat=schema.coord[0],
lng=schema.coord[1],
)
imported += 1
except (ModelConversionError, ModelValidationError), e:
print e.messages
failed.append(row[0])
dbsession.commit()
print 'Successfully imported: %s' % imported
print 'Failed: %s' % len(failed)
print 'Failed ids: %s' % failed
| [
"rachid.belaid@gmail.com"
] | rachid.belaid@gmail.com |
531e51e68531c36ba76cd7cfc4cd3c4b1ae01c7e | 5de474f245bb746e188c8d490ed63bf07d06b68b | /mysite/mysite/webapp/views.py | 75299036fe1c2ac6a6d2da09d9a38507c64dd21c | [] | no_license | hitesh-1997/Medical_Chat_Bot | 610ffb3e40fc459e2bb9c31f7ed535bf8482962d | c5aa981a370eb51c54b973ecaccad51919c64cd2 | refs/heads/master | 2022-12-11T03:35:21.812424 | 2020-06-10T07:05:55 | 2020-06-10T07:05:55 | 170,744,515 | 0 | 0 | null | 2022-12-08T01:21:13 | 2019-02-14T19:16:01 | Java | UTF-8 | Python | false | false | 4,778 | py | from django.shortcuts import render
from django.http import HttpResponse
import nltk, re, check, disease
from disease import *
from check import *
import array
from .models import Diagnosis
import pytz
def sentenizer(str): #getting sentences
sent_tokenizer=nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_tokenizer.tokenize(str)
return sents
def utc_to_local(utc_dt):
local_tz = pytz.timezone("Asia/Kolkata")
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt)
def stem(word):
regexp = r'^(.*?)(ing|ly|ed|ious|ies|ive|es|s|ment|ous)?$'
stem, suffix = re.findall(regexp, word)[0]
return stem
def stem2(word):
regexp = r'^(.*?)(s|ous|ness|iness)?$'
stem, suffix = re.findall(regexp, word)[0]
return stem
# Create your views here.
def first_try(request):
initialise()
count = 0
prob = [] #stores problems faced
symc = [] #symc stores all symptoms
cp=0
#cond = raw_input('Enter your current conditions and sufferings if any \n') #input
request = str(request)
print 'first function is called ********************************'
print request
start = request.find('?')
print 'start index is = ',start
end = request.find('#')
print 'end index if = ',end
cond = request[start+1:end]
cond = cond.replace('_',' ')
print 'here condition is ',cond
ind_sen = sentenizer(cond)
print 'ind_sen is ',ind_sen
for num in ind_sen:
cp = cp+1
sym = '' #sym stores symptoms in a sentence
pain = ''
b_part = ''
neg = ''
count=0
ind_wod = nltk.word_tokenize(num)
for t in ind_wod:
t = t.lower()
if(check_neg(t)== True):
neg = 'no'
t1 = stem(t)
t2 = stem2(t)
if(check_sym(t1)== True):
if(sym is not ''):
sym = sym + ' and ' + t
symc.append(t1)
else:
sym = t
symc.append(t1)
if(sym is not ''):
prob.append(neg + ' ' + sym)
print 'Symptoms Observed are\n'
j=0
#print sym_arr
#print 'Symptoms Matching to that of Dengue\n'
'''symc stores all the symptoms after the cleaning part'''
print 'got following symptoms'
print symc
sym_arr = get_sym_arr(symc)
'''sym_arr is an array of 0's and 1's if the symptoms is present sym_arr[symp_number]=1
else it is 0'''
print 'printing sym array /////////////////'
print sym_arr
'''
d = dis_id2(sym_arr)
print '\nThe disease with maximum probability is \n'
print d
'''
print 'h***************************h'
return HttpResponse(str(sym_arr))
def second_try(request):
initialise()
# print request.body
# print request.META.items()
print '*****************************'
print 'enterd second block'
print '*****************************'
regex = re.compile('^HTTP_')
headers=dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
print ' printing headers '
print ' type of headers ',type(headers)
print headers
print 'headers printed'
print '*************************--------------',headers["HOST"]
request = str(request)
start = request.find('?')
end = request.find('&')
s = request.find('&')
e = request.find('#')
user = request[s+1:e-1]
cond = request[start+1:end]
print 'got user id ------------ //////////// ********'
print user
print cond
sym = []
#sym = array.array('i', (int(t) for t in cond.split(",")))
sym = map(int, cond.split(","))
d, diagnosed = dis_id2(sym)
print d
print 'bool diagnosed ',diagnosed
if diagnosed:
diagnosis = Diagnosis(disease=d, user_id=user)
diagnosis.save()
print 'printing all database results *** -- -- - -- -'
print Diagnosis.objects.all()
#d = disdata[2]
print str(d)
return HttpResponse(str(d))
def disease_history(request):
regex = re.compile('^HTTP_')
headers=dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
request = str(request)
start = request.find('?')
end = request.find('#')
user = request[start+1:end-1]
print user
diseases = Diagnosis.objects.filter(user_id=user).order_by('-created_at')
print 'ohkay'
output = []
for disease in diseases:
output.append(utc_to_local(disease.created_at).strftime("%H:%M:%S %d-%m-%y") + " " + disease.disease)
return HttpResponse(str(','.join(output)))
| [
"sagtanih@gmail.com"
] | sagtanih@gmail.com |
490d5797206e24f90f6839f8a93f190a99cc405d | 705d95b3dc69d3890a2f0e19128aa8065fc54579 | /smaccounts.py | 849afde901e8b845dac7828e4a83fec392b5b305 | [
"MIT"
] | permissive | emdeechege/Python-pwd-locker | 84ad32f71aa2179a9f6465ce48371bc85928e125 | df88496c9f73097749d70b813f8b47931156fcd6 | refs/heads/master | 2020-03-27T03:10:18.447881 | 2018-08-25T19:12:49 | 2018-08-25T19:12:49 | 145,843,279 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,758 | py | import pyperclip
import string
import random
class User:
'''
Class to create new user accounts and save the same to help in accesssing the pwd locker
'''
users_list = []
def __init__(self,first_name,last_name,password):
'''
Method to define the properties of the object
'''
self.first_name = first_name
self.last_name = last_name
self.password= password
def save_user(self):
'''
save user details method into users_list
'''
User.users_list.append(self)
class Credential:
'''
Class that holds and saves user login details, social media a/c credentials, passwords
'''
# Class Variables
credentials_list =[]
@classmethod
def confirm_user(cls,first_name,password):
'''
Method that checks if the name and password entered match entries in the users_list
'''
active_user = ''
for user in User.users_list:
if (user.first_name == first_name and user.password == password):
active_user = user.first_name
return active_user
def __init__(self,user_name,social_media,account_name,password):
'''
Method defining the properties each object will hold
'''
self.user_name = user_name
self.social_media = social_media
self.account_name = account_name
self.password = password
def save_credentials(self):
'''
Function to save new user credentials
'''
Credential.credentials_list.append(self)
def generate_password():
'''
Function to generate random passwords for social media accounts
'''
pwchar = string.printable
length = int(input('Enter password length desired: '))
gen_pwd= ''
for char in range(length):
gen_pwd += random.choice(pwchar)
return gen_pwd
@classmethod
def display_credentials(cls):
'''
Class method to display the list of saved credentials
'''
return cls.credentials_list
@classmethod
def search_social_media(cls, social_media):
'''
Method that acccepts social media name and returns credentials matching the social media name
'''
for credential in cls.credentials_list:
if credential.social_media == social_media:
return credential
@classmethod
def copy_password(cls,social_media):
'''
Class method that copies a credential's password of a specific social media site after the credential's social media name is entered
'''
collect_pass = Credential.search_social_media(social_media)
return pyperclip.copy(collect_pass.password)
| [
"emdeechege@gmail.com"
] | emdeechege@gmail.com |
17d4fd877f2e4265c6bdd8c7bd5bf8ee32db7691 | ba63604ad476e7e8eee34f6288e18d96a6d19073 | /consolidate_marketplace_erp_reports.py | a9d80976096d33baee49882f3ee54bc488118ae4 | [] | no_license | matherthal/marketplace_report_consolidation | f4e2d1d25cc3040897e545dccf5f43357eb7e9d0 | c10dc02c80828ed6f774ad3769064a3d6f7a0e16 | refs/heads/master | 2022-11-24T03:59:42.146007 | 2020-08-09T04:18:43 | 2020-08-09T04:18:43 | 286,131,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,393 | py | #!/usr/bin/env python
# coding: utf-8
from datetime import datetime
import sys
import logging
import logging.config
import warnings
import shutil
import pandas as pd
from pathlib import Path
from services import fup_service, b2w_service, carrefour_service, viavarejo_service, \
colombo_service, zoom_service
logging.config.fileConfig('logging.conf')
warnings.filterwarnings("ignore")
_LOGGER = logging.getLogger()
_MARKETPLACE_SERVICES = {
'b2w': b2w_service,
'carrefour': carrefour_service,
'viavarejo': viavarejo_service,
'colombo': colombo_service,
'zoom': zoom_service
}
_MONTHS = {
1.0: 'JANEIRO',
2.0: 'FEVEREIRO',
3.0: 'MARÇO',
4.0: 'ABRIL',
5.0: 'MAIO',
6.0: 'JUNHO',
7.0: 'JULHO',
8.0: 'AGOSTO',
9.0: 'SETEMBRO',
10.0: 'OUTUBRO',
11.0: 'NOVEMBRO',
12.0: 'DEZEMBRO'
}
def get_max_date_folder(report_path):
report_date = None
for date_folder in report_path.glob('./*'):
# Update report_date folder if it's the first one found or it's a bigger date
if date_folder.is_dir() and (report_date is None or date_folder.name > report_date):
report_date = date_folder.name
return report_date
def export_monthly_consolidated_files(consolidated_df, path, marketplace, report_date,
split_discrepant=True):
path = path / 'resultado'
# Clean up files destination
try:
shutil.rmtree(path)
except:
pass
finally:
path.mkdir(parents=True, exist_ok=True)
# Breakdown data in months by the "Dt Abertura" from FUP report
for month in consolidated_df['Dt Abertura'].dt.month.unique():
month_df = consolidated_df[consolidated_df['Dt Abertura'].dt.month == month]
month_name = _MONTHS[month] if month in _MONTHS else 'ERRO'
if split_discrepant:
is_equal_revenue = month_df['total igual']
if sum(is_equal_revenue) > 0:
destination = \
path / f'{report_date}-{marketplace}-{month_name}-consolidado-IGUAL.xlsx'
month_df[is_equal_revenue].to_excel(destination)
_LOGGER.info(f'CAMINHO DO ARQUIVO CONSOLIDADO GERADO: {destination}')
if sum(is_equal_revenue) < len(month_df):
destination = \
path / f'{report_date}-{marketplace}-{month_name}-consolidado-DIFERENTE.xlsx'
month_df[~is_equal_revenue].to_excel(destination)
_LOGGER.info(f'CAMINHO DO ARQUIVO CONSOLIDADO GERADO: {destination}')
else:
destination = path / f'{report_date}-{marketplace}-{month_name}-consolidado.xlsx'
month_df.to_excel(destination)
_LOGGER.info(f'CAMINHO DO ARQUIVO CONSOLIDADO GERADO: {destination}')
def main(marketplace=None, report_date=None, error_bad_lines=True):
marketplace = marketplace.lower()
report_path = Path(__file__).parent / 'reports' / marketplace
if not report_date:
report_date = get_max_date_folder(report_path)
report_path = report_path / report_date
_LOGGER.info(f'CONSOLIDAÇÃO DE {marketplace} REFERENTE À {report_date}')
_LOGGER.info('OBTENDO ARQUIVOS FUP')
fup_df = fup_service.get_fup_df(report_path, error_bad_lines=error_bad_lines)
_LOGGER.info('PROCESSANDO ARQUIVOS FUP')
gp_fup_df = fup_service.process_fup_df(fup_df)
_LOGGER.info('ESTATÍSTICAS DE ARQUIVO FUP')
fup_service.print_stats(gp_fup_df)
if marketplace not in _MARKETPLACE_SERVICES:
_LOGGER.error('MARKETPLACE DESCONHECIDO')
mp_service = _MARKETPLACE_SERVICES[marketplace]
_LOGGER.info(f'OBTENDO ARQUIVOS {marketplace}')
mp_df = mp_service.get_marketplace_data(report_path)
_LOGGER.info(f'PROCESSANDO ARQUIVOS {marketplace}')
gp_mp_df = mp_service.process_report(mp_df)
_LOGGER.info(f'CONSOLIDANDO ARQUIVOS FUP E {marketplace}')
consolidated_df = mp_service.consolidate(gp_fup_df, gp_mp_df)
_LOGGER.info('EXPORTANDO ARQUIVO CONSOLIDADO PARA EXCEL COM A DATA DE HOJE')
export_monthly_consolidated_files(consolidated_df, report_path, marketplace, report_date)
_LOGGER.info('SCRIPT FINALIZADO COM SUCESSO')
if __name__ == "__main__":
if len(sys.argv) == 1:
print('Consolidação de relatórios de Marketplaces e ERP -- Menu')
options = ', '.join(_MARKETPLACE_SERVICES.keys())
print(f'Digite o nome do marketplace que deseja processar dentre as opçÕes ({options}):')
mp_selected = sys.stdin.readline().strip()
print('(OPCIONAL) Digite a pasta com a data para a qual deseja processar relatórios')
print('(se não digitar nada, será utilizada a última data):')
date_selected = sys.stdin.readline().strip()
print('(OPCIONAL) Digite "ignorar" para ignorar linhas dos relatórios que possuem erros, ou'
' apenas dê enter para prosseguir:')
error_bad_lines = sys.stdin.readline().strip().lower() != 'ignorar'
ans = 'NÃO ' if error_bad_lines else ''
print(f'Foi escolhido {ans}IGNORAR linhas que possuam erros (por ex., número de colunas '
'inválido)')
main(mp_selected, date_selected, error_bad_lines)
sys.stdin.readline()
else:
main(*sys.argv[1:])
| [
"matheuserthal@Matheuss-MacBook-Air.local"
] | matheuserthal@Matheuss-MacBook-Air.local |
d8aa61e21ccc8e7163ef8a9879a8cc2c1ae81989 | faa99ffc930bd0c0b9249171076343654295da05 | /bin/cronmon | 7c3547830fcba25c6d2660635ea7bbb220c9e5d7 | [] | no_license | malev/cronmon | b906afb5c56fe454f7098cd09738abad8700c306 | 270c302d6ffafdc85432f0cbd129b09bbccbb3ae | refs/heads/master | 2021-01-10T07:51:43.592444 | 2015-11-29T00:56:10 | 2015-11-29T00:56:10 | 46,084,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | #!/usr/bin/env python
import sys
from cronmon.cli import main
sys.exit(main())
| [
"marcosvanetta@gmail.com"
] | marcosvanetta@gmail.com | |
501c723904671e6b5e1e983c153f84ed87cd268f | 8e693fa8064473628b581605e3a1408f9f4744a2 | /apps/core/views.py | a1f6f29e5dd5f6bbe5dc5c4bad6784ae85cfb863 | [] | no_license | daniel-santhiago-08/google_trends_app | d084096aa3179a565f4e408f483a7f01a38aecba | 4d7219844cda1cfdbcdaa6bc700e8d2933dc65e1 | refs/heads/master | 2021-08-08T00:29:59.870759 | 2020-04-01T17:43:17 | 2020-04-01T17:43:17 | 252,187,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# @login_required
def home(request):
# data = {}
# data['usuario'] = request.user
# return render(request, 'core/index.html', data)
return render(request, 'index.html') | [
"daniel.santhiago@thrive-wmccann.com"
] | daniel.santhiago@thrive-wmccann.com |
50dce8ff13c18e0e3acfce5ce9beaeeaeda119e2 | b367c827433cd76d4ae53efd7463621e576510c4 | /python/MakeConf_FinalNovember.py | 13004882c421cec15daa7ac4dc697d1e92abe9ca | [] | no_license | tklijnsma/RegressionTraining | 998ec9dcd9103d810febfd3fccd6b4d7c9e00a3b | bb5738b37aa804f6be51892577a3face71e89cbf | refs/heads/master | 2021-01-24T15:34:49.724573 | 2017-04-13T13:33:39 | 2017-04-13T13:33:39 | 57,199,002 | 0 | 2 | null | 2016-04-27T08:55:41 | 2016-04-27T08:55:41 | null | UTF-8 | Python | false | false | 11,757 | py | #!/usr/bin/env python
"""
Thomas:
"""
########################################
# Imports
########################################
import os
import ROOT
from Config import Config
from time import strftime
import argparse
########################################
# Main
########################################
def Make_conf(Verbose=True):
parser = argparse.ArgumentParser()
parser.add_argument( '--inputrootfile', '-i', type=str, help='Path to root file',
# default='/afs/cern.ch/work/t/tklijnsm/public/CMSSW_8_0_4/src/NTuples/Ntup_Jul22_fullpt_training.root'
default='/afs/cern.ch/work/r/rcoelhol/public/CMSSW_8_0_12/src/NTuples/Ntup_10Nov_ElectronPhoton.root'
)
parser.add_argument(
'--particle', metavar='N', type=str, nargs='+', help='Specify particles',
default=['electron','photon'],choices=['electron','photon']
)
parser.add_argument(
'--region', metavar='N', type=str, nargs='+', help='Specify regions',
default=['EB','EE'],choices=['EE','EB']
)
parser.add_argument(
'-n', '--name', type=str, default='NONE', help='Append a string at the end of the name of this config'
)
parser.add_argument( '--fast', action='store_true', help='Change some BDT options to be faster (but maybe less precise)')
args = parser.parse_args()
datestr = strftime( '%b%d' )
return_configs = []
for region in args.region:
for particle in args.particle:
# Instantiate the Config class which prints a .config file
config = Config()
config.Name = 'Config_' + datestr + '_' + particle + '_' + region
if args.name and args.name!='NONE' : config.Name += '_' + args.name
config.InputFiles = os.path.abspath( args.inputrootfile )
config.Tree = 'een_analyzer/{0}Tree'.format( particle.capitalize() )
########################################
# BDT settings
########################################
if args.fast:
config.Options = [
"MinEvents=300",
"Shrinkage=0.15",
"NTrees=1000",
"MinSignificance=5.0",
"EventWeight=1",
]
config.Name += '_FastOptions'
else:
config.Options = [
"MinEvents=200",
"Shrinkage=0.1",
"NTrees=1000",
"MinSignificance=5.0",
"EventWeight=1",
]
config.Target = "genEnergy / ( scRawEnergy + scPreshowerEnergy )"
# Probably not needed
config.TargetError = "1.253*abs( BDTresponse - genEnergy / ( scRawEnergy + scPreshowerEnergy ) )"
config.HistoConfig = "jobs/dummy_Histo.config"
config.CutEB = "scIsEB"
config.CutEE = "!scIsEB"
if region == 'EB':
config.DoEB = "True"
else:
config.DoEB = "False"
# ======================================
# Sample division - need a part for the ECAL-only training, and a part for the combination
config.CutBase = '1.0'
# These are for the old (regular BDT) EP combination - no longer needed
config.CutComb = '1.0'
config.CutError = '1.0'
# Cut events (otherwise running into CPU limits)
config.CutBase += " && NtupID<4000"
config.CutComb += " && NtupID<4000"
config.CutError += " && NtupID<4000"
########################################
# Order tree branches
########################################
# Agreed list on November 23:
# eval[0] = raw_energy;
# eval[1] = the_sc->etaWidth();
# eval[2] = the_sc->phiWidth();
# eval[3] = full5x5_ess.e5x5/raw_energy;
# eval[4] = ele.hcalOverEcalBc();
# eval[5] = rhoValue_;
# eval[6] = theseed->eta() - the_sc->position().Eta();
# eval[7] = reco::deltaPhi( theseed->phi(),the_sc->position().Phi());
# eval[8] = full5x5_ess.r9;
# eval[9] = full5x5_ess.sigmaIetaIeta;
# eval[10] = full5x5_ess.sigmaIetaIphi;
# eval[11] = full5x5_ess.sigmaIphiIphi;
# eval[12] = full5x5_ess.eMax/full5x5_ess.e5x5;
# eval[13] = full5x5_ess.e2nd/full5x5_ess.e5x5;
# eval[14] = full5x5_ess.eTop/full5x5_ess.e5x5;
# eval[15] = full5x5_ess.eBottom/full5x5_ess.e5x5;
# eval[16] = full5x5_ess.eLeft/full5x5_ess.e5x5;
# eval[17] = full5x5_ess.eRight/full5x5_ess.e5x5;
# eval[18] = EcalClusterToolsT<true>::e2x5Max(*theseed, &*ecalRecHits, topology_)/full5x5_ess.e5x5;
# eval[19] = EcalClusterToolsT<true>::e2x5Left(*theseed, &*ecalRecHits, topology_)/full5x5_ess.e5x5;
# eval[20] = EcalClusterToolsT<true>::e2x5Right(*theseed, &*ecalRecHits, topology_)/full5x5_ess.e5x5;
# eval[21] = EcalClusterToolsT<true>::e2x5Top(*theseed, &*ecalRecHits, topology_)/full5x5_ess.e5x5;
# eval[22] = EcalClusterToolsT<true>::e2x5Bottom(*theseed, &*ecalRecHits, topology_)/full5x5_ess.e5x5;
# eval[23] = N_SATURATEDXTALS;
# eval[24] = std::max(0,numberOfClusters);
# eval[25] = clusterRawEnergy[0]/raw_energy;
# eval[26] = clusterRawEnergy[1]/raw_energy;
# eval[27] = clusterRawEnergy[2]/raw_energy;
# eval[28] = clusterDPhiToSeed[0];
# eval[29] = clusterDPhiToSeed[1];
# eval[30] = clusterDPhiToSeed[2];
# eval[31] = clusterDEtaToSeed[0];
# eval[32] = clusterDEtaToSeed[1];
# eval[33] = clusterDEtaToSeed[2];
# eval[34] = ieta;
# eval[35] = iphi;
# eval[36] = (ieta-signieta)%5;
# eval[37] = (iphi-1)%2;
# eval[38] = (abs(ieta)<=25)*((ieta-signieta)) + (abs(ieta)>25)*((ieta-26*signieta)%20);
# eval[39] = (iphi-1)%20;
# eval[34] = raw_es_energy/raw_energy;
# eval[35] = the_sc->preshowerEnergyPlane1()/raw_energy;
# eval[36] = the_sc->preshowerEnergyPlane2()/raw_energy;
# eval[37] = eeseedid.ix();
# eval[38] = eeseedid.iy();
common_vars = [
# ======================================
# Common variables
'scRawEnergy',
'scEtaWidth',
'scPhiWidth',
'full5x5_e5x5/scRawEnergy',
'hadronicOverEm',
'rhoValue',
'delEtaSeed',
'delPhiSeed',
# ======================================
# Showershape variables
'full5x5_r9',
'full5x5_sigmaIetaIeta',
'full5x5_sigmaIetaIphi',
'full5x5_sigmaIphiIphi',
'full5x5_eMax/full5x5_e5x5',
'full5x5_e2nd/full5x5_e5x5',
'full5x5_eTop/full5x5_e5x5',
'full5x5_eBottom/full5x5_e5x5',
'full5x5_eLeft/full5x5_e5x5',
'full5x5_eRight/full5x5_e5x5',
'full5x5_e2x5Max/full5x5_e5x5',
'full5x5_e2x5Left/full5x5_e5x5',
'full5x5_e2x5Right/full5x5_e5x5',
'full5x5_e2x5Top/full5x5_e5x5',
'full5x5_e2x5Bottom/full5x5_e5x5',
# ======================================
# Saturation variables
'N_SATURATEDXTALS',
# ======================================
# Cluster variables
'N_ECALClusters',
'clusterRawEnergy[0]/scRawEnergy',
'clusterRawEnergy[1]/scRawEnergy',
'clusterRawEnergy[2]/scRawEnergy',
'clusterDPhiToSeed[0]',
'clusterDPhiToSeed[1]',
'clusterDPhiToSeed[2]',
'clusterDEtaToSeed[0]',
'clusterDEtaToSeed[1]',
'clusterDEtaToSeed[2]',
]
# EB specific
config.VariablesEB = common_vars + [
'iEtaCoordinate',
'iPhiCoordinate',
'iEtaMod5',
'iPhiMod2',
'iEtaMod20',
'iPhiMod20',
]
# EE specific
config.VariablesEE = common_vars + [
'iXCoordinate',
'iYCoordinate',
'scPreshowerEnergy/scRawEnergy',
# 'preshowerEnergyPlane1/scRawEnergy', # Disabled as of November 2016 (did not influence regression)
# 'preshowerEnergyPlane2/scRawEnergy',# Disabled as of November 2016 (did not influence regression)
]
if Verbose:
print '\n' + '-'*70
print 'Making config file ' + config.Name + '.config'
print ' Using the following branches for EE:'
print ' ' + '\n '.join( config.VariablesEE )
print ' Using the following branches for EB:'
print ' ' + '\n '.join( config.VariablesEB )
########################################
# Ep combination
########################################
# NOVEMBER 25: NO LONGER NECESSARY TO RUN OLD EP COMBO
config.DoCombine = "False"
config.DoErrors = "False"
########################################
# Output
########################################
# if Verbose:
# # Print all branches as a check
# print "\nAll branches in root file:"
# Read_branches_from_rootfile( physical_path(root_file) , config.Tree )
config.Parse()
# # Test if the config file can be read by ROOT TEnv
# print '\nReading in {0} and trying ROOT.TEnv( ..., 0 ):'.format( out_filename )
# I_TEnv = ROOT.TEnv()
# I_TEnv.ReadFile( out_filename, 0 )
# I_TEnv.Print()
# print 'Exited normally'
# print '='*70
# print
return_configs.append( config )
return return_configs
########################################
# Functions
########################################
def Filter( full_list, sel_list ):
# Functions that FILTERS OUT selection criteria
# Return the full list if sel_list is empty or None
if not sel_list:
return full_list
elif len(sel_list)==0:
return full_list
ret_list = []
for item in full_list:
# Loop over selection criteria; if found, don't add the item to the output list
add_item = True
for sel in sel_list:
if sel in item:
add_item = False
if add_item:
ret_list.append( item )
return ret_list
def Read_branches_from_rootfile( root_file, tree_gDirectory ):
root_fp = ROOT.TFile.Open( root_file )
tree = root_fp.Get( tree_gDirectory )
all_branches = [ i.GetName() for i in tree.GetListOfBranches() ]
print ' ' + '\n '.join(all_branches)
########################################
# End of Main
########################################
if __name__ == "__main__":
Make_conf()
| [
"klthomas@student.ethz.ch"
] | klthomas@student.ethz.ch |
29bc56593eeca0b6f0deb0a25c96a29e0f5b79b8 | 5f226346f8547cf0a797e7925d508aac0c20c6f9 | /experiments/jet/config.py | cff8dfa94b7df7b4229a5d5b46ae0979e2d8e45a | [] | no_license | spiec/reinforcement | d345fb958557755874ea5d4d12e944c29d668ccb | 38cae2059acec755a4bb54391eafb6aa62b47fed | refs/heads/master | 2020-04-28T10:48:43.890585 | 2019-04-13T10:48:16 | 2019-04-13T10:48:16 | 175,215,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Author: sebastian.piec@
# Modified: 2019, March 11
# ----------------------------------------------------------------------
seed = 999
agent = "output/jetdqn_v1.pt"
training_log = "logs/training.log"
# dqn
n_episodes = 100
# evolutionary agents
pool_size = 120
n_generations = 40
# ----------------------------------------------------------------------
def read_args():
global seed, agent, n_episodes
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-s", "--seed", type=int, default=seed,
help="random seed")
parser.add_argument("-e", "--episodes", type=int, default=n_episodes,
help="# of episodes")
parser.add_argument("-a", "--agent", type=str, default=agent,
help="agent file")
args = parser.parse_args()
n_episodes = args.episodes
seed = args.seed
agent = args.agent
| [
"sebastian.piec@gmail.com"
] | sebastian.piec@gmail.com |
d72aab054b36b69c32a1d6055d1a12e2f092c571 | 70cd09d24746a5b03c996106272e97294d0d0f7f | /qt4i/device.py | 78a8af3be7cfb2aac2e9266990b3f0f282246c9a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | getwindow/QT4i | 93160a1199ff60ac5bd8427fe1cd40ad9c59e959 | dccafddf716cfeb5b82adac0f109ea40bc7d62d3 | refs/heads/master | 2020-03-30T07:15:48.671698 | 2016-10-08T03:05:10 | 2016-10-08T03:05:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,543 | py | # -*- coding:utf-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
'''iOS设备模块
'''
# 2014/11/16 cherry 创建
# 2015/10/21 jerry 支持远程多终端
import time
import urllib
import datetime
import threading
import os
import xmlrpclib
import base64
from testbase import logger
from util import EnumDirect, Rectangle
from testbase.util import Singleton
from testbase.conf import settings
from _driver.driverserver import DriverManager
from _driver.driverserver import DriverWrapper
class DeviceManager(object):
'''设备管理类,用于多个设备的申请、查询和管理(此类仅用于Device内部,测试用例中请勿直接使用)
'''
__metaclass__ = Singleton
def __init__(self):
self._index = 0
self._lock = threading.Lock()
self._drivers = {}
@property
def drivers(self):
return self._drivers
def acquire_driver(self):
'''申请本地设备或者协助设备,默认第一个申请的设备为本地设备,后续申请的设备均为协作设备
:rtype: DriverWrapper or None
'''
device = None
driver = None
with self._lock:
self._index += 1
if self._index == 1:
DriverManager().start()
driver = DriverWrapper()
else:
if hasattr(settings, 'QT4I_REMOTE_DRIVERS'):
remote_drivers = settings.QT4I_REMOTE_DRIVERS
index = self._index - 2
if len(remote_drivers) - 1 == index:
driver = DriverWrapper(remote_drivers[index]['ip'])
device = 'debug'
else:
raise RuntimeError("本地没有配置协作设备,请检查settings中的REMOTE_DRIVERS变量")
else:
from _driver._respool import DevicePool
device = DevicePool().acquire(None)
if device:
driver = DriverWrapper(device.host)
else:
raise RuntimeError("协作设备申请失败")
with self._lock:
self._drivers[driver] = device
return driver
def release_driver(self, driver):
'''释放指定的远程协助设备
:param driver: 远程协作设备
:type driver: DriverWrapper
'''
with self._lock:
if driver in self._drivers:
if not hasattr(settings, 'QT4I_REMOTE_DRIVERS'):
if self._drivers[driver]:
from _driver._respool import DevicePool
DevicePool().release(self._drivers[driver])
del self._drivers[driver]
self._index -= 1
def release_drivers(self):
'''释放所有占用的远程协助设备
'''
for driver in self._drivers.keys():
self.release_driver(driver)
class Device(object):
'''iOS设备基类(包含基于设备的UI操作接口)
'''
Encoding = 'UTF-8'
Devices = []
class EnumDeviceType(object):
'''定义设备类型'''
iPhone, iPad, iPod, Unknown = ('iPhone', 'iPad', 'iPod', 'Unknown')
@classmethod
def release_all(cls):
for device in cls.Devices:
device.release()
def __init__(self, device_id=None):
'''Device构造函数
:param device_id: 本地设备的UDID
不指定UDID则获取已连接的真机,无真机连接则使用默认的模拟器
指定Simulator则使用默认的模拟器
指定UDID则使用指定的设备,如果是真机的UDID,真机不存在则异常,如果是模拟器的UDID,对不上号则异常(要注意每一台Mac的模拟器的UDID都不相同)。
:type device_id: str|None
'''
self._device_manager = DeviceManager()
self._driver_wrapper = self._device_manager.acquire_driver()
self._driver = self._driver_wrapper.driver
self._device = None
# -*- -*- -*- -*- -*- -*-
# 第一默认: 第一台连接的真机
if device_id is None:
_real_devices = self._driver.dt.get_real_devices()
if len(_real_devices) > 0:
self._device = _real_devices[0]
# 第二默认: 默认模拟器
if self._device is None and (device_id is None or str(device_id).lower() == 'simulator'):
self._driver.dt.start_simulator()
_simulators = self._driver.dt.get_simulators()
for _simulator in _simulators:
if _simulator['state'] == "Booted":
self._device = _simulator
break
# 第三指定: 指定设备
if self._device is None and device_id is not None and str(device_id).lower() != 'simulator':
_device = self._driver.dt.get_device_by_udid(device_id)
if _device is not None:
self._device = _device
# -*- -*- -*- -*- -*- -*-
if self._device is None: raise Exception('无可用的真机和模拟器: device_udid=%s' % device_id)
# -*- -*- -*- -*- -*- -*-
self._device_udid = self._device['udid']
self._device_name = self._device['name']
self._device_ios = self._device['ios']
self._device_type = self.EnumDeviceType.Unknown
self._device_simulator = self._device['simulator']
self._app_started = False
self._keyboard = Keyboard(self)
Device.Devices.append(self)
logger.info('[%s] Device - Connect - %s - %s (%s)' % (datetime.datetime.fromtimestamp(time.time()), self.name, self.udid, self.ios_version))
@property
def driver(self):
'''设备所在的driver
:rtype: xmlrpclib.ServerProxy
'''
return self._driver
@property
def udid(self):
'''设备的udid
:rtype: str
'''
if isinstance(self._device_udid, basestring):
return self._device_udid.encode(self.Encoding)
@property
def name(self):
'''设备名
:rtype: str
'''
if isinstance(self._device_name, basestring):
return self._device_name.encode(self.Encoding)
@property
def ios_version(self):
'''iOS版本
:rtype: str
'''
if isinstance(self._device_ios, basestring):
return self._device_ios.encode(self.Encoding)
@property
def ios_type(self):
'''iOS设备类型
:rtype: str
'''
if isinstance(self._device_type, basestring) and self._device_type != self.EnumDeviceType.Unknown:
self._device_type = self._driver.imobiledevice.get_device_type_by_udid(self.udid)
return self._device_type.encode(self.Encoding)
@property
def simulator(self):
'''是否模拟器
:rtype: boolean
'''
return self._device_simulator
@property
def rect(self):
'''屏幕大小
:rtype : Rectangle
'''
rect = self._driver.uia.target.get_rect(self._device_udid)
origin = rect['origin']
size = rect['size']
return Rectangle(origin['x'], origin['y'], origin['x'] + size['width'], origin['y'] + size['height'])
@property
def keyboard(self):
'''获取键盘
'''
return self._keyboard
def start_app(self, app_name, app_params, trace_template=None, trace_output=None, retry=5, timeout=55, instruments_timeout=20 * 60):
'''启动APP
:param app_name: APP Bundle ID
:type app_name: str
:param app_params: app启动参数
:type app_params: dict
:param trace_template: 专项使用trace_template路径,或已配置的项
:type trace_template: str
:param trace_output: 专项使用trace_output路径
:type trace_output: str
:param retry: 重试次数(建议大于等于2次)
:type retry: int
:param timeout: 单次启动超时(秒)
:type timeout: int
:param instruments_timeout: 闲置超时(秒)
:type instruments_timeout: int
:return: boolean
'''
if not self._app_started:
if self._driver.ins.start(self._device_udid, self._device_simulator, app_name, app_params, trace_template, trace_output, retry, timeout, instruments_timeout):
self._app_started = True
return self._app_started == True
def _stop_app(self):
'''终止APP
:return: boolean
'''
if self._app_started:
if self._driver.ins.release(self._device_udid):
self._app_started = False
Device.Devices.remove(self)
return self._app_started == False
def release(self):
'''终止APP
'''
self._stop_app()
self._device_manager.release_driver(self._driver_wrapper)
logger.info('[%s] Device - Release - %s - %s (%s)' % (datetime.datetime.fromtimestamp(time.time()), self.name, self.udid, self.ios_version))
# 上层对象如果多处引用,会带来问题
# def __del__(self):
# '''析构
# '''
# self.release()
def _check_app_started(self):
'''检测APP是否已启动
:raises: Exception
'''
if not self._app_started:
raise Exception('app not started')
def screenshot(self, image_path):
'''截屏
:param image_path: 截屏图片的存放路径 (默认存储路径为临时路径,在启动APP时被清空)
:type image_path: str
:return: boolean
'''
result = True
try:
self._check_app_started()
self._driver.ins.capture_screen(self._device_udid, image_path)
except Exception:
result = False
if not self.simulator:
result = self._driver.imobiledevice.get_screenshot_by_udid(image_path, self._device_udid)
if result and self._device_manager.drivers[self._driver_wrapper]:
with open(os.path.realpath(image_path), "wb") as fd:
data = self._driver.get_binary_data(image_path).data
fd.write(base64.decodestring(data))
return os.path.isfile(image_path)
def print_uitree(self):
'''打印界面树
'''
self._check_app_started()
_ui_tree = urllib.unquote(self._driver.uia.target.get_element_tree(self._device_udid))
_ui_tree = _ui_tree.replace('\r', '\\r')
_ui_tree = _ui_tree.replace('\n', '\\n')
_ui_tree = _ui_tree.replace('\\', '\\\\')
_ui_tree = eval(_ui_tree)
def _print(_tree, _spaces='', _indent='|---'):
_line = _spaces + '{ ' + ', '.join([
'classname: "%s"' % _tree['classname'],
'label: %s' % ('"%s"' % _tree['label'] if _tree['label'] else 'null'),
'name: %s' % ('"%s"' % _tree['name' ] if _tree['name' ] else 'null'),
'value: %s' % ('"%s"' % _tree['value'] if _tree['value'] else 'null'),
'visible: %s' % ('true' if _tree['visible'] else 'false'),
'enabled: %s' % ('true' if _tree['enabled'] else 'false'),
'valid: %s' % ('true' if _tree['valid'] else 'false'),
'focus: %s' % ('true' if _tree['focus'] else 'false'),
'rect: %s' % _tree['rect']]) + ' }'
print _line
for _child in _tree['children']: _print(_child, _spaces + _indent, _indent)
_print(_ui_tree)
def click(self, x=0.5, y=0.5):
'''点击屏幕
:param x: 横向坐标(从左向右,屏幕百分比)
:type x: float
:param y: 纵向坐标(从上向下,屏幕百分比)
:type y: float
'''
self._check_app_started()
self._driver.uia.target.tap(self._device_udid, {'x':x, 'y':y})
def long_click(self, x, y, duration=3):
'''长按屏幕
:param x: 横向坐标(从左向右计算,绝对坐标值)
:type x: float
:param y: 纵向坐标(从上向下计算,绝对坐标值)
:type y: float
:param duration: 持续时间(秒)
:type duration: float
'''
self._check_app_started()
self._driver.uia.target.tap_with_options(self._device_udid, {'x':x, 'y':y}, {'duration':duration})
def double_click(self, x, y):
'''双击屏幕
:param x: 横向坐标(从左向右计算,屏幕百分比)
:type x: float
:param y: 纵向坐标(从上向下计算,屏幕百分比)
:type y: float
'''
self._check_app_started()
x = x * self.rect.width
y = y * self.rect.height
self._driver.uia.target.double_tap(self._device_udid, {'x':x, 'y':y})
def _drag_from_to_for_duration(self, from_point, to_point, duration=0.5, repeat=1, interval=0):
'''全屏拖拽
:attention: 如有过场动画,需要等待动画完毕
:param from_point : 起始坐标偏移百分比
:type from_point : dict : { x: 0.5, y: 0.8 }
:param to_point : 结束坐标偏移百分比
:type to_point : dict : { x: 0.5, y: 0.1 }
:param duration : 持续时间(秒)
:type duration : float
:param repeat : 重复该操作
:type repeat : int
:param interval : 重复该操作的间隙时间(秒)
:type interval : float
'''
self._check_app_started()
self._driver.uia.target.drag_from_to_for_duration(self._device_udid, from_point, to_point, duration, repeat, interval)
def drag(self, from_x=0.9, from_y=0.5, to_x=0.1, to_y=0.5, duration=0.5, repeat=1, interval=0):
'''回避屏幕边缘,全屏拖拽(默认在屏幕中央从右向左拖拽)
:param from_x: 起点 x偏移百分比(从左至右为0.0至1.0)
:type from_x: float
:param from_y: 起点 y偏移百分比(从上至下为0.0至1.0)
:type from_y: float
:param to_x: 终点 x偏移百分比(从左至右为0.0至1.0)
:type to_x: float
:param to_y: 终点 y偏移百分比(从上至下为0.0至1.0)
:type to_y: float
:param duration: 持续时间(秒)
:type duration: float
:param repeat: 重复该操作
:type repeat: int
:param interval: 重复该操作的间隙时间(秒)
:type interval: float
'''
self._drag_from_to_for_duration({'x': from_x, 'y': from_y}, {'x': to_x, 'y': to_y}, duration, repeat, interval)
def drag2(self, direct=EnumDirect.Left):
'''回避屏幕边缘,全屏在屏幕中央拖拽
:param direct: 拖拽的方向
:type direct: EnumDirect.Left|EnumDirect.Right|EnumDirect.Up|EnumDirect.Down
'''
if direct == EnumDirect.Left : self._driver.uia.target.drag_right_to_left(self._device_udid)
if direct == EnumDirect.Right : self._driver.uia.target.drag_left_to_right(self._device_udid)
if direct == EnumDirect.Up : self._driver.uia.target.drag_down_to_up(self._device_udid)
if direct == EnumDirect.Down : self._driver.uia.target.drag_up_to_down(self._device_udid)
def _flick_from_to(self, from_point, to_point, repeat=1, interval=0):
'''回避屏幕边缘,全屏滑动/拂去
:attention: 如有过场动画,需要等待动画完毕
:param from_point : 起始坐标偏移百分比
:type from_point : dict : { x: 0.5, y: 0.8 }
:param to_point : 结束坐标偏移百分比
:type to_point : dict : { x: 0.5, y: 0.1 }
:param repeat : 重复该操作
:type repeat : int
:param interval : 重复该操作的间隙时间(秒)
:type interval : float
'''
self._check_app_started()
self._driver.uia.target.flick_from_to(self._device_udid, from_point, to_point, repeat, interval)
def flick(self, from_x=0.9, from_y=0.5, to_x=0.1, to_y=0.5, repeat=1, interval=0):
'''回避屏幕边缘,全屏滑动/拂去(默认从右向左滑动/拂去)
该接口比drag的滑动速度快,如果滚动距离大,建议用此接口
:param from_x: 起点 x偏移百分比(从左至右为0.0至1.0)
:type from_x: float
:param from_y: 起点 y偏移百分比(从上至下为0.0至1.0)
:type from_y: float
:param to_x: 终点 x偏移百分比(从左至右为0.0至1.0)
:type to_x: float
:param to_y: 终点 y偏移百分比(从上至下为0.0至1.0)
:type to_y: float
'''
self._flick_from_to({'x': from_x, 'y': from_y}, {'x': to_x, 'y': to_y}, repeat, interval)
def flick2(self, direct=EnumDirect.Left):
'''回避屏幕边缘,全屏在屏幕中央滑动/拂去
:param direct: 滑动/拂去的方向
:type direct: EnumDirect.Left|EnumDirect.Right|EnumDirect.Up|EnumDirect.Down
'''
if direct == EnumDirect.Left : self._driver.uia.target.flick_right_to_left(self._device_udid)
if direct == EnumDirect.Right : self._driver.uia.target.flick_left_to_right(self._device_udid)
if direct == EnumDirect.Up : self._driver.uia.target.flick_down_to_up(self._device_udid)
if direct == EnumDirect.Down : self._driver.uia.target.flick_up_to_down(self._device_udid)
def deactivate_app_ror_duration(self, seconds=3):
'''将App置于后台一定时间
:param seconds: 秒
:type seconds: int
:return: boolean
'''
return self._driver.uia.target.deactivate_app_ror_duration(self._device_udid, seconds)
def install(self, app_path):
'''安装应用程序
:param app_path: ipa或app安装包的路径(注意:真机和模拟器的安装包互不兼容)
:type app_path: str
:return: boolean
'''
return self._driver.dt.install(app_path, self._device_udid)
def uninstall(self, bundle_id):
'''卸载应用程序
:param bundle_id: APP的bundle_id,例如:com.tencent.qq.dailybuild.test
:type bundle_id: str
:return: boolean
'''
return self._driver.dt.uninstall(bundle_id, self._device_udid)
def get_crash_log(self, procname):
'''获取指定进程的最新的crash日志
:param proc_name: app的进程名,可通过xcode查看
:type proc_name: str
:return: string or None - crash日志路径
'''
if self.simulator:
return None
crash_log_path = self._driver.imobiledevice.get_crash_log(procname, self._device_udid)
if self._device_manager.drivers[self._driver_wrapper]:
with open(os.path.realpath(crash_log_path), "wb") as fd:
data = self._driver.get_binary_data(crash_log_path).data
fd.write(base64.decodestring(data))
return crash_log_path if crash_log_path and os.path.isfile(crash_log_path) else None
def copy_to_local(self, bundle_id, remotepath, localpath='/tmp',is_dir = False):
'''拷贝手机中sandbox指定目录的文件到Mac本地
:param bundle_id: app的bundle id
:type bundle_id: str
:param remotepath: sandbox上的目录或者文件,例如:/Library/Caches
:type remotepath: str
:param localpath: 本地的目录
:type localpath: str
:param is_dir: remotepath是否为目录,默认为单个文件
:type is_dir: boolean
:return: list or None
'''
if self.simulator:
return None
files = self._driver.imobiledevice.copy_to_local(bundle_id, remotepath, localpath, self._device_udid, is_dir)
if self._device_manager.drivers[self._driver_wrapper]:
for f in files:
with open(os.path.realpath(f), "wb") as fd:
data = self._driver.get_binary_data(f).data
fd.write(base64.decodestring(data))
return files if files and os.path.isfile(files[0]) else None
def copy_to_remote(self, bundle_id, localpath, remotepath):
'''拷贝Mac本地文件到手机中sandbox的指定目录地
:param bundle_id: app的bundle id
:type bundle_id: str
:param localpath: Mac上的文件路径
:type localpath: str
:param remotepath: iPhone上的目录或者文件,例如:/Documents/
:type remotepath: str
:return: boolean
'''
if self.simulator:
return False
if self._device_manager.drivers[self._driver_wrapper]:
with open(localpath, "rb") as fd:
binary_data = xmlrpclib.Binary(fd.read())
self._driver.send_binary_data(binary_data, localpath)
return self._driver.imobiledevice.copy_to_remote(bundle_id, localpath, remotepath, self._device_udid)
def remove_file(self, bundle_id, app_files):
'''删除手机上app中的文件或者目录(主要用于app的日志或者缓存的清理)
:param bundle_id: app的bundle id
:type bundle_id: str
:param app_files: sandbox上的目录或者文件,例如:/Library/Caches/QQINI/
:type app_files: str
'''
if self.simulator:
return
self._driver.imobiledevice.remove(bundle_id, app_files, self._device_udid)
def reboot(self):
'''重启手机
'''
if self.simulator:
return
self._driver.imobiledevice.reboot(self._device_udid)
class Keyboard(object):
'''键盘
'''
def __init__(self, device):
self._device = device
self._driver = device.driver
def send_keys(self, keys):
'''键盘输入
:param keys: 要输入的字符串
:type keys: str
'''
self._driver.uia.keyboard.sent_keys(self._device.udid, keys)
# -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*- -*-
if __name__ == '__main__':
device_manager = DeviceManager()
driver_wrapper = device_manager.acquire_driver()
res = driver_wrapper.driver.is_working
print 'driver is working: %s' % res
| [
"ericjwjiang@ericjwjiang-mac-mini3.local"
] | ericjwjiang@ericjwjiang-mac-mini3.local |
cd3dffcdae0e94d30f71d7c7e996d7d7a0e186e4 | 6676fc11ac6bcfce184914014c23540b9908c1e9 | /crops/migrations/0001_initial.py | 14e779b055c1bc77d365d1f14011433e02da4313 | [] | no_license | RomitSingh1999/Agriculture | 2a17c83c196ecb68bc2369a7a7f64008c49fe877 | ad3484c1e2915971de7edb46b3d234dd1e0ef800 | refs/heads/master | 2023-03-15T10:25:25.982243 | 2021-03-03T20:35:43 | 2021-03-03T20:35:43 | 303,091,108 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # Generated by Django 3.1.2 on 2020-10-10 12:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"romitsingh19@gmail.com"
] | romitsingh19@gmail.com |
f2ab9b66168fa1a8605e19f7ae2df6bb3570e8f6 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/2018.py | 287d7cf286ece6439372d5a253acd9a4a5feb04d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | def compute(C,F,X):
best, farms = X/2, 0
if X <= C:
return best
dt, dc, drate = 0, 0, 2
while 1:
delta = C/drate
dt = dt + delta
dc= dc + drate*delta - C
drate += F
new_time = dt + (X-dc)/drate
if new_time >= best:
break
best = new_time
return best
f=open("input")
T=int(f.readline())
for case in range(T):
C,F,X = map(float, f.readline().split())
#print(C,F,X)
rtn = compute(C,F,X)
print("Case #%d: %s"%(case+1, rtn))
f.close()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
732e83cac1d21b258d8593edec7ce7e655d9c009 | dc743ec50a12acdd03c53576e25f23af37fe573e | /metadata-wrangling/BIBFRAME/Converter/APP-WIP/Django-based/BIBFRAME_Converter/Webapp/models.py | cf8ae1a105017ed2f5284b57ab02ac3530c0e873 | [] | no_license | ualbertalib/metadata | 860c103145bac86cfc1ef857f73b114b6a5ed4ce | ab125986975c688284c0c5da2fd2e2a812988d46 | refs/heads/master | 2023-07-19T20:29:09.542577 | 2023-02-07T19:01:34 | 2023-02-07T19:01:34 | 37,545,489 | 20 | 7 | null | 2023-07-06T21:19:44 | 2015-06-16T17:33:22 | null | UTF-8 | Python | false | false | 5,854 | py | from django.db import models
class Document(models.Model):
description = models.CharField(max_length=255, blank=True)
OID = models.CharField(max_length=355, blank=True)
old_id = models.CharField(max_length=25, blank=True)
name = models.CharField(max_length=255, blank=True)
uploaded_at = models.DateTimeField(auto_now_add=True)
file_format = models.CharField(max_length=5, default='.xml',)
file_type = models.CharField(max_length=15, default='BIBFRAME Data',)
class Marc_Document(models.Model):
description = models.CharField(max_length=255, blank=True)
document = models.FileField(upload_to='./MARC')
uploaded_at = models.DateTimeField(auto_now_add=True)
file_format = models.CharField(max_length=5, default='.mrc',)
file_type = models.CharField(max_length=15, default='MARC Data',)
class Bib_Document(models.Model):
description = models.CharField(max_length=255, blank=True)
document = models.FileField(upload_to='./BIBFRAME')
uploaded_at = models.DateTimeField(auto_now_add=True)
file_format = models.CharField(max_length=5, default='.xml',)
file_type = models.CharField(max_length=15, default='BIBFRAME Data',)
class Processing(models.Model):
description = models.CharField(max_length=355, blank=True)
name = models.CharField(max_length=355, blank=True)
uploaded_at = models.DateTimeField(auto_now_add=False)
file_format = models.CharField(max_length=5,default='.xml')
file_type = models.CharField(max_length=155,default='BIBFRAME Data')
start_time = models.DateTimeField(auto_now_add=True)
apis = models.CharField(max_length=355,blank=True)
files = models.CharField(max_length=999,default='nofileshasbeenuploadedyet')
status = models.CharField(max_length=50, default='not started')
class Meta:
unique_together = ["name", "uploaded_at", "file_type", "description", "apis"]
class P_progress(models.Model):
pid = models.ForeignKey(Processing, on_delete=models.CASCADE)
stage = models.CharField(max_length=255, default="MARC_to_MARC-XML")
all_names = models.CharField(max_length=25, default="1")
all_titles = models.CharField(max_length=25, default="1")
all_MARC = models.CharField(max_length=25, default="1")
p_names = models.CharField(max_length=25, default="N/A")
c_names = models.CharField(max_length=25, default="N/A")
name_index = models.CharField(max_length=25, default="0")
title_index = models.CharField(max_length=25, default="0")
M_to_B_index = models.CharField(max_length=25, default="0")
master_file = models.CharField(max_length=255, blank=True)
search_api_LC = models.CharField(max_length=255, default=0)
search_api_LCS = models.CharField(max_length=255, default=0)
search_api_VF = models.CharField(max_length=255, default=0)
search_api_VFP = models.CharField(max_length=255, default=0)
search_api_VFC = models.CharField(max_length=255, default=0)
def as_marc(self):
pd = self.pid.id
itemType = self.pid.file_type
if itemType == 'MARC Data':
return dict(
process_ID=pd,
stage=self.stage,
all_titles=self.all_titles,
all_names=self.all_names,
p_names=self.p_names,
c_names=self.c_names,
name_index=self.name_index,
title_index=self.title_index,
M_to_B_index=self.M_to_B_index,
name_percent="{0:.2f}".format(round((int(self.name_index)/int(self.all_names))*100,2)),
title_percent="{0:.2f}".format(round((int(self.title_index)/int(self.all_titles))*100,2)),
M_to_B_percent="{0:.2f}".format(round((int(self.M_to_B_index)/int(self.all_MARC))*100,2)))
def as_bib(self):
pd = self.pid.id
itemType = self.pid.file_type
if itemType == 'BIBFRAME Data':
return dict(
process_ID=pd,
stage=self.stage,
all_titles=self.all_titles,
all_names=self.all_names,
p_names=self.p_names,
c_names=self.c_names,
name_index=self.name_index,
title_index=self.title_index,
name_percent="{0:.2f}".format(round((int(self.name_index)/int(self.all_names))*100,2)),
title_percent="{0:.2f}".format(round((int(self.title_index)/int(self.all_titles))*100,2)))
class Progress_archive(models.Model):
process_ID = models.CharField(max_length=255, blank=True)
description = models.CharField(max_length=255, blank=True)
name = models.CharField(max_length=255, blank=True)
uploaded_at = models.DateTimeField(auto_now_add=False)
file_format = models.CharField(max_length=5,default='.xml')
file_type = models.CharField(max_length=155,default='BIBFRAME Data')
start_time = models.DateTimeField(auto_now_add=True)
apis = models.CharField(max_length=50, blank=True)
status = models.CharField(max_length=50, default='not started')
stage = models.CharField(max_length=255, default="MARC_to_MARC-XML")
all_names = models.CharField(max_length=25, default="1")
all_titles = models.CharField(max_length=25, default="1")
all_MARC = models.CharField(max_length=25, default="1")
p_names = models.CharField(max_length=25, default="N/A")
c_names = models.CharField(max_length=25, default="N/A")
name_index = models.CharField(max_length=25, default="100")
title_index = models.CharField(max_length=25, default="100")
M_to_B_index = models.CharField(max_length=25, default="100")
master_file = models.CharField(max_length=255, blank=True)
search_api_LC = models.CharField(max_length=255, default=0)
search_api_LCS = models.CharField(max_length=255, default=0)
search_api_VF = models.CharField(max_length=255, default=0)
search_api_VFP = models.CharField(max_length=255, default=0)
search_api_VFC = models.CharField(max_length=255, default=0)
| [
"danoosh@ualberta.ca"
] | danoosh@ualberta.ca |
50a9918d05eb0cd59d8f02be86cb3cd4074df66e | 1f0b83c7233c2405ac189b79d75a8aefb6427a18 | /Chapter_05/Quiz/Problem1.py | 330d3ee859394a6f7e0a05ecabd6fffdb5537420 | [] | no_license | k2khan/CS1300 | 255a55904f84c161df3d556ff9b15efbaf94803b | 14c4459f4f526cc526b624e8927659d8616bcd30 | refs/heads/main | 2023-07-28T22:28:12.872569 | 2021-09-25T21:15:20 | 2021-09-25T21:15:20 | 410,383,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | from cs1graphics import *
from time import sleep
paper = Canvas(500, 500)
red = Square(100)
red.setFillColor('red')
red.moveTo(50, 50)
red.setBorderWidth(0)
paper.add(red)
L = Layer()
paper.add(L)
rectangle = Rectangle(100, 400)
rectangle.setFillColor('blue')
L.add(rectangle)
rectangle.moveTo(50, 300)
L.adjustReference(100, 100)
for i in range(1, 100):
L.rotate(-1)
sleep(0.033)
| [
"hamad.khan@slu.edu"
] | hamad.khan@slu.edu |
c0922f5313231e1e685bb96fce78601a8eeb7fac | e4911a7848ffa2aa2095d5f9d7d67648f9560ab2 | /detailsapp/urls.py | 4db012876ab89c3254622cd928d19c79fe0e0219 | [] | no_license | KajuShrivastava/modelforms | dd07533aff46d6d19049dfc24b3880066ceff020 | 724fd07a6bf81196c881f64777ec13d413ac107f | refs/heads/master | 2020-05-15T18:06:45.226050 | 2019-04-20T14:52:01 | 2019-04-20T14:52:01 | 182,418,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from django.urls import path
from django.contrib import admin
from detailsapp.views import userDetails
urlpatterns = [
path('userdetails/',userDetails),
path('display/', userDetails),
path('', userDetails),
]
| [
"kajushrivastava27@gmail.com"
] | kajushrivastava27@gmail.com |
752dbd0c59651336f805ab8222d75267bcaff9d8 | 90e7dac05dd45fe3288158f80a21ded8f6ef912b | /Object Recognition.py | a54665428d1a07ea45981676eee4a3ee3ea25bef | [] | no_license | Akolada/Object-Recognition-in-Images-with-Convolutional-Neural-Network-on-CIFAR10 | 600a20c6d730424c0fab211e07514bcdbfaf40e4 | fd6d7b9b66c1aa58f85ba215a73a5254b03ad8bb | refs/heads/master | 2020-07-05T03:59:45.809382 | 2018-10-02T19:32:48 | 2018-10-02T19:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,937 | py |
# coding: utf-8
# #### Renjie Wei
#
# Train a deep convolution network on a GPU with PyTorch for the CIFAR10 dataset. The convolution network should use (A) dropout, (B) trained with RMSprop or ADAM, and (C) data augmentation. For 10% extra credit, compare dropout test accuracy (i) using the heuristic prediction rule and (ii) Monte Carlo simulation. For full credit, the model should achieve 80-90% Test Accuracy. Submit via Compass (1) the code and (2) a paragraph (in a PDF document) which reports the results and briefly describes the model architecture. Due September 28 at 5:00 PM.
#
# ###### (A)The accuracy is 85.58 %
# ###### (B)Using dropout/ data augmentation/ trained with ADAM optimizer
# ###### (C)The convolution network is as follows:
#
# ###### PytorchDeepConv
# ###### (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
# ###### (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# ###### (2): ReLU()
#
#
# ###### (0): Conv2d(64, 64, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
# ###### (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# ###### (2): ReLU()
# ###### (3): Dropout(p=0.4)
#
# ###### (0): Conv2d(64, 64, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
# ###### (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# ###### (2): ReLU()
#
# ###### (0): Conv2d(64, 64, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
# ###### (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# ###### (2): ReLU()
# ###### (3): Dropout(p=0.4)
#
# ###### (0): Conv2d(64, 64, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
# ###### (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# ###### (2): ReLU()
#
# ###### (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))
# ###### (1): ReLU()
# ###### (2): Dropout(p=0.4)
#
# ###### (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))
# ###### (1): ReLU()
# ###### (2): Dropout(p=0.4)
#
# ###### (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1))
# ###### (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# ###### (2): ReLU()
# ###### (3): Dropout(p=0.4)
#
# ###### (0): Linear(in_features=1024, out_features=500, bias=True)
# ###### (1): BatchNorm1d(500, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# ###### (2): ReLU()
# ###### (3): Dropout(p=0.5)
# ###### (4): Linear(in_features=500, out_features=10, bias=True)
#
#
#
# In[ ]:
import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
import time
#import h5py
# In[ ]:
# Hyper parameters
num_epochs = 60
hidden_sizes = 500
input_channels = 3
num_classes = 10
batch_size = 100
learning_rate = 0.001
# In[ ]:
torch.cuda.is_available()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# In[ ]:
# Download and construct CIFAR-10 dataset
transform = transforms.Compose(
[transforms.RandomHorizontalFlip(0.5),
transforms.RandomAffine(degrees=15, translate=(0.1,0.1)),
transforms.RandomResizedCrop(size=32, scale=(0.8, 1.0)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = torchvision.datasets.CIFAR10(root='./data',
train=True,
transform=transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='./data',
train=False,
transform=transform,
download=True)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# In[ ]:
class PytorchDeepConv(nn.Module):
def __init__(self, input_channels, num_classes=10):
super(PytorchDeepConv, self).__init__()
#Layer 1
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=input_channels, out_channels=64, kernel_size=4, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU())
# Layer 2
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(),
nn.Dropout(0.4))
# Layer 3
self.layer3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU())
# Layer 4
self.layer4 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(),
nn.Dropout(0.4))
# Layer 5
self.layer5 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU())
# Layer 6
self.layer6 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Dropout(0.4))
# Layer 7
self.layer7 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Dropout(0.4))
#Layer 8
self.layer8 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Dropout(0.4))
# Fully Connected
self.layer9 = nn.Sequential(
nn.Linear(64*4*4, 500),
nn.BatchNorm1d(500),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(500, num_classes))
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = self.layer7(out)
out = self.layer8(out)
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.layer9(out)
return out
# In[ ]:
model = PytorchDeepConv(input_channels, num_classes).to(device)
#print(model)
# In[ ]:
#Define loss and optimizer
# Use Adam as the optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# In[ ]:
iter = 0
accuracies = []
for epoch in range(num_epochs):
if(epoch > 6):
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
if(state['step'] >= 1024):
state['step'] = 1000
for i, (images, labels) in enumerate(train_loader):
images, labels = images.to(device), labels.to(device)
# Clear gradients w.r.t parameters
optimizer.zero_grad()
# Forward pass to get output/logits
outputs = model(images)
# Calculate Loss: Softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t paramters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter % 500 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
# Forward pass only to get logits/output
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100 * correct / total
accuracies.append(accuracy)
# Print Loss
print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.data[0], accuracy))
| [
"noreply@github.com"
] | Akolada.noreply@github.com |
77cf063326d302d2a56bdbd85c41b98cdf1ad82d | d961c8712b2c08517a307c928e2f97583bfc42aa | /bin/wheel3 | 7729c664c5c0b041a24cf4abef8bba18e513a57e | [] | no_license | josedlg/aws-django | a301316507b78f1ed68ae817f8efd7e02713a298 | aaf78b455b17f2f18e2ab1e33e5bb7a72c7ab283 | refs/heads/master | 2023-06-26T00:18:44.394773 | 2021-07-31T01:04:57 | 2021-07-31T01:04:57 | 391,082,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/Users/josedelaguarda/Developer/django-docker/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"josedelaguarda@outlook.com"
] | josedelaguarda@outlook.com | |
e524c0884f4220d9139bffe55506485c502f0f9a | 67e620023911906f832aff452a99ac96433e44ef | /lib.py | be84269d8b5836fbc8922335e10f9dbc649e87ff | [
"MIT"
] | permissive | armel/RRFDisplay | bbb609c49ecff189444cdecb313f96baae31cb3c | 2ace16c0a19b9475c2b5913d9a319babe420abfb | refs/heads/master | 2023-01-28T15:34:33.215337 | 2020-09-12T20:49:51 | 2020-09-12T20:49:51 | 164,368,280 | 0 | 0 | MIT | 2020-05-13T00:16:49 | 2019-01-07T02:35:18 | Python | UTF-8 | Python | false | false | 16,423 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
RRFDisplay version Raspberry Pi 3B et Orange Pi Zero
Learn more about RRF on https://f5nlg.wordpress.com
Check video about RRFDisplay on https://www.youtube.com/watch?v=rVW8xczVpEo
73 & 88 de F4HWN Armel
'''
import os
import requests
import sys
import getopt
import json
import urllib3
import calendar
urllib3.disable_warnings()
import settings as s
from math import cos, asin, sqrt, ceil
from lxml import etree
from datetime import datetime, timedelta
# Usage
def usage():
print('Usage : RRFDisplay.py [options ...]')
print()
print('--help this help')
print()
print('Interface settings :')
print(' --interface set interface (default=i2c, choose between [i2c, spi])')
print(' --i2c-port set i2c port (default=0)')
print(' --i2c-address set i2c address (default=0x3C)')
print()
print('Display settings :')
print(' --display set display (default=sh1106, choose between [sh1106, ssd1306, ssd1327, ssd1351, st7735])')
print(' --display-width set display width (default=128)')
print(' --display-height set display height (default=64)')
print(' --display-theme set display theme (default=theme.cfg)')
print()
print('Follow settings :')
print(' --follow set room (default=RRF, choose between [RRF, TECHNIQUE, INTERNATIONAL, LOCAL, BAVARDAGE, FON]) or callsign to follow')
print(' --refresh set refresh (default=1, in second)')
print()
print('WGS84 settings :')
print(' --latitude set latitude (default=48.8483808, format WGS84)')
print(' --longitude set longitude (default=2.2704347, format WGS84)')
print()
print('88 & 73 from F4HWN Armel')
# Calculate uptime with a microtime
def calc_uptime(n):
n = int(n)
day = n / (24 * 3600)
n = n % (24 * 3600)
hour = n / 3600
n %= 3600
minute = n / 60
tmp = ''
if day < 10:
tmp += '0'
tmp += str(day)
tmp += ' d, '
if hour < 10:
tmp += '0'
tmp += str(hour)
tmp += ':'
if minute < 10:
tmp += '0'
tmp += str(minute)
return tmp
# Save stats to get most active link
def save_stat(history, call):
if call in history:
history[call] += 1
else:
history[call] = 1
return history
# Wake up screen
def wake_up_screen(device, display, wake_up):
if 'sh1106' in display:
level_sleep = 4
level_wake_up = 150
elif 'ssd1306' in display:
level_sleep = 4
level_wake_up = 150
elif 'ssd1327' in display:
level_sleep = 4
level_wake_up = 15
elif 'ssd1351' in display:
level_sleep = 50
level_wake_up = 150
else:
level_sleep = 4
level_wake_up = 150
if wake_up is True:
for i in range(level_wake_up, level_sleep, -1):
device.contrast(i) # No Transmitter
return False
else:
for i in range(level_sleep, level_wake_up):
device.contrast(i) # Transmitter
return True
# Calc interpolation
def interpolation(value, in_min, in_max, out_min, out_max):
if (in_max - in_min) != 0:
return int((value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)
else:
return 0
# Get system info
def system_info(value):
if value == 'temp':
tmp = int(os.popen('cat /sys/class/thermal/thermal_zone0/temp').readline())
if tmp > 1000:
tmp /= 1000
return str(tmp)
elif value == 'freq':
tmp = int(os.popen('cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq').readline())
if tmp > 1000:
tmp /= 1000
return str(tmp)
elif value == 'mem':
tmp = list(os.popen('free -h'))
tmp = tmp[1].strip()
tmp = tmp.split()
mem = tmp[1]
#mem_total = int(tmp[1][:-1])
#mem_use = int(tmp[2][:-1])
mem_total = int(tmp[1][:-2])
mem_use = int(tmp[2][:-2])
return str(int((float(mem_use) / float(mem_total)) * 100)), str(mem)
elif value == 'disk':
tmp = list(os.popen('df -h /dev/mmcblk0p1'))
tmp = tmp[1].strip()
tmp = tmp.split()
disk_total = tmp[1]
disk_use = tmp[4]
return str(disk_use), str(disk_total)
elif value == 'load':
tmp = list(os.popen('w | head -n 1'))
tmp = tmp[0].strip()
tmp = tmp.split()
return str(tmp[-3]) + ' ' + str(tmp[-2]) + ' ' + str(tmp[-1])
elif value == 'up':
tmp = list(os.popen('uptime -p'))
tmp = tmp[0].strip()
tmp = [int(s) for s in tmp.split() if s.isdigit()]
if len(tmp) == 3:
day = tmp[0]
hour = tmp[1]
minute = tmp[2]
elif len(tmp) == 2:
day = 0
hour = tmp[0]
minute = tmp[1]
else:
day = 0
hour = 0
minute = tmp[0]
tmp = ''
if day < 10:
tmp += '0'
tmp += str(day)
tmp += ' d, '
if hour < 10:
tmp += '0'
tmp += str(hour)
tmp += ':'
if minute < 10:
tmp += '0'
tmp += str(minute)
return str(tmp)
elif value == 'ip':
tmp = list(os.popen('hostname -I'))
tmp = tmp[0].strip()
tmp = tmp.split()
tmp = tmp[0]
return str(tmp)
elif value == 'arch':
tmp = os.popen('uname -a').readline()
if 'sunxi' in tmp:
tmp = 'Orange Pi'
else:
tmp = 'Raspberry Pi'
return str(tmp)
elif value == 'kernel':
tmp = os.popen('uname -r').readline()
return str(tmp)
# Compute distance
def calc_distance(call, latitude_1, longitude_1):
latitude_2 = float(s.call_latitude)
longitude_2 = float(s.call_longitude)
if (latitude_2 + longitude_2) != 0:
p = 0.017453292519943295 # Approximation Pi/180
a = 0.5 - cos((latitude_2 - latitude_1) * p) / 2 + cos(latitude_1 * p) * cos(latitude_2 * p) * (1 - cos((longitude_2 - longitude_1) * p)) / 2
r = (12742 * asin(sqrt(a)))
if r < 100:
r = round((12742 * asin(sqrt(a))), 1)
else:
r = int(ceil(12742 * asin(sqrt(a))))
return r
return False
# Convert second to time
def convert_second_to_time(time):
hours = time // 3600
time = time - (hours * 3600)
minutes = time // 60
seconds = time - (minutes * 60)
if hours == 0:
return str('{:0>2d}'.format(int(minutes))) + ':' + str('{:0>2d}'.format(int(seconds)))
else:
return str('{:0>2d}'.format(int(hours))) + ':' + str('{:0>2d}'.format(int(minutes))) + ':' + str('{:0>2d}'.format(int(seconds)))
# Convert time to second
def convert_time_to_second(time):
if len(time) > 5:
format = [3600, 60, 1]
else:
format = [60, 1]
return sum([a * b for a, b in zip(format, list(map(int, time.split(':'))))])
# Convert time to second
def convert_time_to_string(time):
if len(time) == 5:
time = '00:' + time
time = time.replace(':', 'h ', 1)
time = time.replace(':', 'm ', 1) + 's'
return time
# Convert time utc to time local
def utc_to_local(utc_dt):
utc_dt = datetime.strptime(utc_dt, '%Y-%m-%d %H:%M:%S')
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
# Sanitize call
def sanitize_call(call):
return call.translate(str.maketrans('', '', '\\\'!@#$"()[]'))
# Scan
def scan(call):
try:
r = requests.get(s.room[s.room_current]['api'], verify=False, timeout=10)
page = r.content.decode('utf-8')
if call in page:
return s.room_current
except requests.exceptions.ConnectionError as errc:
return False
except requests.exceptions.Timeout as errt:
return False
else:
for q in ['RRF', 'TECHNIQUE', 'INTERNATIONAL', 'LOCAL', 'BAVARDAGE', 'FON']:
if q != s.room:
try:
r = requests.get(s.room[q]['api'], verify=False, timeout=10)
page = r.content.decode('utf-8')
if call in page:
return q
except requests.exceptions.ConnectionError as errc:
return False
except requests.exceptions.Timeout as errt:
return False
return False
# Get solar propagation
def get_solar():
solar_data = ''
# Get date
now = datetime.now() - timedelta(minutes=60)
today = format(now, "%Y-%m-%d %H:%M:%S")
# Check file
if os.path.isfile(s.solar_file):
modify = datetime.fromtimestamp(os.path.getmtime(s.solar_file)).strftime("%Y-%m-%d %H:%M:%S")
if not os.path.isfile(s.solar_file) or today > modify or len(s.solar_value) == 0: # if necessary update file
# Request HTTP on hamqsl
try:
r = requests.get(s.solar_url, verify=False, timeout=1)
solar_data = etree.fromstring(r.content)
f = open(s.solar_file, 'w')
f.write(r.content)
f.close
except:
pass
if solar_data != '': # If valid stream
s.solar_value.clear()
# Page 1
for value in solar_data.xpath('/solar/solardata/updated'):
s.solar_value['Updated'] = value.text.strip()
tmp = s.solar_value['Updated'].split(' ')
tmp = tmp[0] + ' ' + tmp[1] + ' ' + tmp[2] + ' ' + tmp[3]
tmp = datetime.strptime(tmp, '%d %b %Y %H%M')
s.solar_value['Updated'] = tmp.strftime("%d-%m, %H:%M")
for value in solar_data.xpath('/solar/solardata/solarflux'):
s.solar_value['Solar Flux'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/aindex'):
s.solar_value['A-Index'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/kindex'):
s.solar_value['K-Index'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/sunspots'):
s.solar_value['Sun Spots'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/xray'):
s.solar_value['X-Ray'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/protonflux'):
s.solar_value['Ptn Flux'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/electonflux'):
s.solar_value['Elc Flux'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/magneticfield'):
s.solar_value['Mag (BZ)'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/solarwind'):
s.solar_value['Solar Wind'] = value.text.strip()
# Page 2
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="80m-40m" and @time="day"]'):
s.solar_value['80m-40m Day'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="30m-20m" and @time="day"]'):
s.solar_value['30m-20m Day'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="17m-15m" and @time="day"]'):
s.solar_value['17m-15m Day'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="12m-10m" and @time="day"]'):
s.solar_value['12m-10m Day'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="80m-40m" and @time="night"]'):
s.solar_value['80m-40m Night'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="30m-20m" and @time="night"]'):
s.solar_value['30m-20m Night'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="17m-15m" and @time="night"]'):
s.solar_value['17m-15m Night'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/calculatedconditions/band[@name="12m-10m" and @time="night"]'):
s.solar_value['12m-10m Night'] = value.text.strip()
for value in solar_data.xpath('/solar/solardata/geomagfield'):
s.solar_value['Geomag Field'] = value.text.strip()
s.solar_value['Geomag Field'] = s.solar_value['Geomag Field'].title()
for value in solar_data.xpath('/solar/solardata/signalnoise'):
s.solar_value['Signal Noise'] = value.text.strip()
# Page 3
for value in solar_data.xpath('/solar/solardata/calculatedvhfconditions/phenomenon[@name="vhf-aurora" and @location="northern_hemi"]'):
s.solar_value['VHF Aurora'] = value.text.strip()
s.solar_value['VHF Aurora'] = s.solar_value['VHF Aurora'].replace('Band ', '')
for value in solar_data.xpath('/solar/solardata/calculatedvhfconditions/phenomenon[@name="E-Skip" and @location="europe"]'):
s.solar_value['E-Skip EU 2m'] = value.text.strip()
s.solar_value['E-Skip EU 2m'] = s.solar_value['E-Skip EU 2m'].replace('Band ', '')
for value in solar_data.xpath('/solar/solardata/calculatedvhfconditions/phenomenon[@name="E-Skip" and @location="europe_4m"]'):
s.solar_value['E-Skip EU 4m'] = value.text.strip()
s.solar_value['E-Skip EU 4m'] = s.solar_value['E-Skip EU 4m'].replace('Band ', '')
for value in solar_data.xpath('/solar/solardata/calculatedvhfconditions/phenomenon[@name="E-Skip" and @location="europe_6m"]'):
s.solar_value['E-Skip EU 6m'] = value.text.strip()
s.solar_value['E-Skip EU 6m'] = s.solar_value['E-Skip EU 6m'].replace('Band ', '')
for value in solar_data.xpath('/solar/solardata/calculatedvhfconditions/phenomenon[@name="E-Skip" and @location="north_america"]'):
s.solar_value['E-Skip NA 2m'] = value.text.strip()
s.solar_value['E-Skip NA 2m'] = s.solar_value['E-Skip NA 2m'].replace('Band ', '')
return True
# Get cluster
def get_cluster():
cluster_data = ''
# Get date
now = datetime.now() - timedelta(minutes=4)
today = format(now, "%Y-%m-%d %H:%M:%S")
# Check file
if os.path.isfile(s.cluster_file):
modify = datetime.fromtimestamp(os.path.getmtime(s.cluster_file)).strftime("%Y-%m-%d %H:%M:%S")
if not os.path.isfile(s.cluster_file) or today > modify or len(s.cluster_value) == 0: # if necessary update file
if os.path.isfile(s.cluster_band_file):
with open(s.cluster_band_file, 'r') as f:
band = f.read().strip()
else:
band = s.cluster_band
# Request HTTP on hamqsl
try:
r = requests.get(s.cluster_url + band, verify=False, timeout=1)
cluster_data = r.json()
f = open(s.cluster_file, 'w')
f.write(r.content)
f.close
except:
pass
if cluster_data != '':
s.cluster_value.clear()
limit = len(cluster_data)
indice = 0
for item in range(0, limit):
if band in s.cluster_exclude:
if str(int(float(cluster_data[item]['freq']))) not in s.cluster_exclude[band]:
s.cluster_value[indice] = cluster_data[item]['call'] + ' ' + cluster_data[item]['freq'] + ' ' + cluster_data[item]['dxcall'] + ' ' + str(utc_to_local(cluster_data[item]['time']))
else:
s.cluster_value[indice] = cluster_data[item]['call'] + ' ' + cluster_data[item]['freq'] + ' ' + cluster_data[item]['dxcall'] + ' ' + str(utc_to_local(luster_data[item]['time']))
indice += 1
if indice == 10:
break
return True | [
"armel.fauveau@globalis-ms.com"
] | armel.fauveau@globalis-ms.com |
9e46e31002326fd0a61378c7813ce1d84306ef03 | df15d06ed6f9c599b287c59b9a16fda258168395 | /Modulo1/Aula06/10 05 2021/Aula06_Ex6.py | f2e2f32c7f6ac00448af622f618d59d1bfcd7d43 | [] | no_license | GHEPT/VSCode_Python | 9ff2178cdd2c33100dc728acd66fd1d8fe44c16c | a10ee0144cdd666d797c3e0a828a7e4307bfd5dd | refs/heads/master | 2023-06-02T04:05:54.930779 | 2021-06-20T19:39:03 | 2021-06-20T19:39:03 | 366,545,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | # Escreva uma função que, dado um númeronotarepresentando a nota de um estudante, converte o valor de nota para um conceito (A, B, C, D, E e F).
""" Nota / Conceito
>= 9.0 / A
>= 8.0 / B
>= 7.0 / C
>= 6.0 / D
>= 5.0 / E
<= 4.0 / F """
def conceito():
global n
if n >= 9:
n = print('A')
return n
elif n >= 8:
n = print('B')
return n
elif n >= 7:
n = print('C')
return n
elif n >= 6:
n = print('D')
return n
elif n >= 5:
n = print('E')
return n
elif n >= 0 and n <= 4:
n = print('F')
return n
else:
n = print('Você digitou uma nota inválida')
return n
n = float(input('Digite sua nota: '))
conceito()
| [
"teodoro.edu@hotmail.com"
] | teodoro.edu@hotmail.com |
62d4261440930ed7f608719d5a33a283b7353a98 | 4fd217c6e58958e2fb8db410bc224cb6c345d2b3 | /9.3.2020/basic2FAILED.py | 0a0540465b976a33e52078804256e44f39324872 | [] | no_license | ElvarThorS/Forritun | 75334314795124739e133acbff4a0a561b1a2419 | 6609a9134d7a5139e5d3f679bfebb1cea4c5278f | refs/heads/master | 2022-12-18T14:28:12.324213 | 2020-09-17T09:12:56 | 2020-09-17T09:12:56 | 292,543,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | #Algorithm
#1 make loop that loops n time
#2 make variables and get the sum of them
n = int(input("Enter the length of the sequence: ")) # Do not change this line
count = 0
sequence1 = 1
sequence2 = 2
sequence3 = 3
while count < n:
if sequence1 < sequence2:
print(sequence1)
sequence1 = sequence3 + sequence2
count +=1
elif sequence2 < sequence3:
print(sequence2)
sequence2 = sequence1 + sequence3
count +=1
elif sequence3 < sequence1:
print(sequence2)
sequence2 = sequence1 + sequence3
count +=1
| [
"elvar-thor@hotmail.com"
] | elvar-thor@hotmail.com |
1a2a632760618a11fee58892f42121405d99e7ef | 4ce365fb7a342d417f35150df9e0819c24659843 | /setup.py | a671d1a6e9649118ba9cdbfe0caf2d6c27eb846a | [
"MIT"
] | permissive | lttb/morphy | fd7fb226f23b574f0a691eec75de979885b0e81e | c51a6fdb6ca4563d65f3b4d5bf42d24f5d4c1d26 | refs/heads/master | 2023-08-20T01:04:34.661607 | 2018-06-03T07:36:28 | 2018-06-03T07:36:28 | 135,484,358 | 0 | 0 | MIT | 2018-06-03T04:06:52 | 2018-05-30T18:47:21 | Python | UTF-8 | Python | false | false | 600 | py | from setuptools import find_packages, setup
setup(
name='morphey',
packages=find_packages(),
version='0.0.1',
description='Morphological Analysis for Russian based on RNNs',
author='Kenzhaev Artur',
author_email='kenzhaev.artur@gmail.com',
url='https://github.com/lttb/morphy',
install_requires=[
'keras>=2.1.6',
'numpy>=1.14.0',
'pymorphy2>=0.8',
'russian-tagsets==0.6',
'scikit-learn>=0.19.1',
'scipy>=1.0.0',
'tensorflow>=1.8.0',
],
setup_requires=['flake8', 'flake8-quotes', 'pyre-check', 'yapf']
)
| [
"kenzhaev.artur@gmail.com"
] | kenzhaev.artur@gmail.com |
a3e4eb81b9d08904866c9f0b06e09ec2e52886b7 | 0b5f8f07825b0998699cea047f552e5459d6bd9f | /Inheritance.py | b6def1e068ab619b046e0bc3874a2a017785043c | [] | no_license | AvanthaDS/PyLearn_v_1_0 | 67f069e43a52da9bf283d125960f05af8cacf53a | dc4aa5473c7ade6947bb6fbb1d3da6cb46e43423 | HEAD | 2016-09-01T09:27:23.942470 | 2015-10-13T13:12:15 | 2015-10-13T13:12:15 | 44,176,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | __author__ = 'Avantha'
class Parent():
def print_last_name(self):
print('De Silva')
class Child(Parent):
def print_first_name(self):
print('Avantha')
def print_last_name(self): # THis overrides the inherited variable information, if it is not for this my second name will appear as avantha de silva'
print('Pilippu Heva')
avantha = Child()
avantha.print_first_name()
avantha.print_last_name()
| [
"avantha.desilva@gmail.com"
] | avantha.desilva@gmail.com |
7ec550710acac36ed347ba40826dbe4fe067766a | 5330918e825f8d373d3907962ba28215182389c3 | /CMGTools/TTHAnalysis/python/analyzers/ttHJetMETSkimmer.py | b521ed5ab1924dd489cef6fe53836e8bffcdd800 | [] | no_license | perrozzi/cmg-cmssw | 31103a7179222c7aa94f65e83d090a5cf2748e27 | 1f4cfd936da3a6ca78f25959a41620925c4907ca | refs/heads/CMG_PAT_V5_18_from-CMSSW_5_3_22 | 2021-01-16T23:15:58.556441 | 2017-05-11T22:43:15 | 2017-05-11T22:43:15 | 13,272,641 | 1 | 0 | null | 2017-05-11T22:43:16 | 2013-10-02T14:05:21 | C++ | UTF-8 | Python | false | false | 2,700 | py |
from CMGTools.RootTools.fwlite.Analyzer import Analyzer
from CMGTools.RootTools.fwlite.Event import Event
from CMGTools.RootTools.statistics.Counter import Counter, Counters
from CMGTools.RootTools.fwlite.AutoHandle import AutoHandle
class ttHJetMETSkimmer( Analyzer ):
def __init__(self, cfg_ana, cfg_comp, looperName ):
super(ttHJetMETSkimmer,self).__init__(cfg_ana,cfg_comp,looperName)
self.bjetCut = eval("lambda jet : "+self.cfg_ana.nBJet[2]) if self.cfg_ana.nBJet[2] != "" else (lambda jet : True);
def declareHandles(self):
super(ttHJetMETSkimmer, self).declareHandles()
def beginLoop(self):
super(ttHJetMETSkimmer,self).beginLoop()
self.counters.addCounter('events')
count = self.counters.counter('events')
count.register('all events')
count.register('pass jetPtCuts')
count.register('pass jetVeto')
count.register('pass met')
count.register('pass HT')
count.register('pass MHT')
count.register('pass nBJet')
count.register('accepted events')
def process(self, iEvent, event):
self.readCollections( iEvent )
self.counters.counter('events').inc('all events')
jets = getattr(event, self.cfg_ana.jets)
for i,ptCut in enumerate(self.cfg_ana.jetPtCuts):
if len(jets) <= i or jets[i].pt() <= ptCut:
return False
self.counters.counter('events').inc('pass jetPtCuts')
if float(self.cfg_ana.jetVetoPt) > 0:
if len(jets) > len(self.cfg_ana.jetPtCuts):
for j in jets[len(self.cfg_ana.jetPtCuts):]:
if j.pt() > self.cfg_ana.jetVetoPt:
return False
self.counters.counter('events').inc('pass jetVeto')
if float(self.cfg_ana.metCut) > 0 and event.met.pt() <= self.cfg_ana.metCut:
return False
self.counters.counter('events').inc('pass met')
if float(self.cfg_ana.htCut[1]) > 0 and getattr(event, self.cfg_ana.htCut[0]) <= self.cfg_ana.htCut[1]:
return False
self.counters.counter('events').inc('pass HT')
if float(self.cfg_ana.mhtCut[1]) > 0 and getattr(event, self.cfg_ana.mhtCut[0]) <= self.cfg_ana.mhtCut[1]:
return False
self.counters.counter('events').inc('pass MHT')
nB = 0;
for j in jets:
if j.btagWP(self.cfg_ana.nBJet[0]) and self.bjetCut(j):
nB += 1;
if nB < int(self.cfg_ana.nBJet[1]):
return False
self.counters.counter('events').inc('pass nBJet')
self.counters.counter('events').inc('accepted events')
return True
| [
"gpetruc@gmail.com"
] | gpetruc@gmail.com |
9caaadd925c85e67b492ab5d14e24fbaf3818be7 | 7a28e230dbe243b16a98f55bbd5530461bea9cc2 | /desafio038.py | 57d8d6dc9b4a1829e3867b77fa16bab49cd882e2 | [] | no_license | GabrielSBova/Scripts-Python | 7a6e7fc0f92e4308dabafc35ef4199d702139f90 | b118502ab5208147e3f107ff3d0fe8253d72839b | refs/heads/master | 2021-02-16T13:58:15.114061 | 2020-03-04T22:04:57 | 2020-03-04T22:04:57 | 245,013,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | num1 = int(input('Digite o primeiro valor: '))
num2 = int(input('Digite o segundo valor: '))
if num1 > num2:
print('O número {} é maior que o número {}'.format(num1, num2))
elif num2 > num1:
print('O número {} é maior que o número {}'.format(num2, num1))
else:
print('Não existe valor maior, os dois são iguais!')
| [
"gabrielbova101@gmail.com"
] | gabrielbova101@gmail.com |
b4852f5e3507c034a051112ab5d20768d558f8b9 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /utils/Autocase_Result/Quote18/HQ_18_052.py | 499f849471c77e6e8e758530e4b2c898b9ce420f | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import time
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class HQ_18_052(xtp_test_case):
def subOrderBook(self, Api, stk_info, case_name, rs_expect):
print Api.GetApiVersion()
def on_order_book(data, error, last):
self.print_msg(case_name, rs_expect, error)
Api.setSubOrderBookHandle(on_order_book)
Api.SubscribeOrderBook(stk_info)
time.sleep(1)
def print_msg(self, case_name, rs_expect, error):
if rs_expect == error:
logger.warning('{0}测试正确!'.format(case_name))
else:
logger.error('{0}测试错误!'.format(case_name))
self.assertEqual(error, rs_expect)
def test_HQ_18_052(self):
pyname = 'HQ_18_052'
client_id = 6
Api = XTPQuoteApi(client_id)
Api.Login()
stk_info = {'ticker': '!@#¥%……&×()<>?', 'exchange_id': 1}
self.subOrderBook(Api, stk_info, pyname,
{'error_id': 11200003, 'error_msg': 'unknown security'}) # 1
Api.Logout()
if __name__=='__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
885b4dd2a9ce98642e4a22da85726bac9cf54dd4 | ba559d93fdd7dadf830199c60b25d247709d66a5 | /apps/tests/models.py | 713d2ad484627a6601d40cd8691d7753239169b9 | [] | no_license | Harut1991/django_rest | aa4b1a273ce2efa1d80ebcccf1b7df714f431f9c | c7b26e7d9dd981f1221c7bd3bd67bafa84cb6cac | refs/heads/master | 2022-12-09T19:23:06.469555 | 2018-09-05T18:15:42 | 2018-09-05T18:15:42 | 145,605,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,638 | py |
from django.contrib.auth.models import User
from django.db import models
from .upload_path import get_image_path
class Test(models.Model):
name = models.CharField(max_length=200)
@property
def get_test_questions(self):
questions = Question.objects.filter(test=self)
return_data = []
if questions:
for i in questions:
return_data.append({'id': i.id, 'question_text': i.question_text})
return return_data
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Test'
class Question(models.Model):
question_text = models.CharField(max_length=200)
test = models.ForeignKey(Test, on_delete=models.CASCADE)
@property
def get_question_answers(self):
answers = Answer.objects.filter(question=self)
return_data = []
if answers:
for i in answers:
return_data.append({'id': i.id, 'answer_text': i.answer_text, 'correct_answer': i.correct_answer})
return return_data
@property
def get_test_detail(self):
return {'id': self.test.id, 'name': self.test.name}
def __str__(self):
return self.question_text
class Meta:
verbose_name_plural = 'Question'
class Answer(models.Model):
answer_text = models.CharField(max_length=200)
question = models.ForeignKey(Question, on_delete=models.CASCADE)
correct_answer = models.BooleanField(default=False)
@property
def get_question_details(self):
return {'id': self.question.id, 'question_text': self.question.question_text,
'test': {
'id': self.question.test.id,
'name': self.question.test.name
}}
def __str__(self):
return self.answer_text
class Meta:
verbose_name_plural = 'Answer'
class UserAnswer(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
answer = models.ForeignKey(Answer, on_delete=models.CASCADE)
@property
def get_answer_detail(self):
return {'id': self.answer.id, 'answer_text': self.answer.answer_text,
'correct_answer': self.answer.correct_answer}
def __str__(self):
return self.answer.answer_text
class Meta:
verbose_name_plural = 'UserAnswer'
class UserPhoto(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ImageField(
upload_to=get_image_path,
null=False,
)
def __str__(self):
return self.image
class Meta:
verbose_name_plural= 'UserPhoto'
| [
"51ad41ad"
] | 51ad41ad |
9a8596e0a1a602c87d7fab6dc03002053d691441 | 1e9b3c4a2c8dee627ed03a28adf55aff66311f41 | /src/simple_multiple_call.py | 672b583cf6a421966316972f43787651a24d3a58 | [] | no_license | roholazandie/rpc_py | 54bfbd68c6be88c0ef33a195a2299d7d78fa2a10 | 8778539e95c702a1a8b6a0125889dc0a7c0f8553 | refs/heads/master | 2023-06-22T03:51:24.878605 | 2020-08-17T18:59:56 | 2020-08-17T18:59:56 | 277,616,148 | 0 | 0 | null | 2023-06-12T21:28:54 | 2020-07-06T18:14:51 | HTML | UTF-8 | Python | false | false | 382 | py | #import xmlrpclib
from xmlrpc.client import ServerProxy
server = ServerProxy('http://localhost:3000')
print(server.get_semantic_similarity("I want a MacBook.", "computers"))
print(server.get_semantic_similarity("I want a MacBook.", "music"))
print(server.get_semantic_similarity("You like Elvis!", "computers"))
print(server.get_semantic_similarity("You like Elvis!", "music"))
| [
"hilbert.cantor@gmail.com"
] | hilbert.cantor@gmail.com |
f1a3abec445a17ae572b06f876161e28bdaf4609 | fecc174119d6174fa60ffaf8c3614a1afa588686 | /oversight/image_source.py | 0127f5e8ffb282cde2ce7353f19fca5152c7f3f4 | [
"Apache-2.0"
] | permissive | hebenon/oversight | 50ffc612ac6911539c8723020dc91946c5d76913 | 83983a7010124bdadfc2d836134049a87928a33e | refs/heads/master | 2021-01-12T12:47:38.294356 | 2017-11-14T10:45:05 | 2017-11-14T10:45:05 | 69,296,502 | 19 | 2 | null | 2017-05-29T06:44:00 | 2016-09-26T22:01:36 | Python | UTF-8 | Python | false | false | 3,277 | py | # Copyright 2016 Ben Carson. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
__author__ = 'bcarson'
import requests
import io
import logging
import random
import time
from datetime import datetime
from threading import Timer
from PIL import Image
from signals import image
logger = logging.getLogger('root')
class ImageSource(object):
"""
ImageSource will generate a stream of image events.
It periodically connects to a URL and downloads an image to generate each event.
"""
def __init__(self, download_url, username=None, password=None, tag=None, download_frequency=2.0, output_width=1000, output_height=565):
self.download_url = download_url
if username is not None:
self.authorisation = (username, password)
else:
self.authorisation = None
self.tag = tag
# Size of images to work with
self.output_width = output_width
self.output_height = output_height
self.download_frequency = download_frequency
Timer(self.download_frequency * random.random(), self.get_image).start()
def get_image(self):
start = time.time()
downloaded_image = None
resized_image = None
try:
request = requests.get(self.download_url, auth=self.authorisation)
if request.status_code is 200:
downloaded_image = io.BytesIO(request.content)
except requests.ConnectionError, e:
logger.error("Connection Error: %s", e)
except requests.HTTPError, e:
logger.error("HTTP Error: %s", e)
if downloaded_image is not None:
try:
resized_image = self.get_resized_image(downloaded_image)
except IOError, e:
logger.error("Failed to resize image: %s", e)
if resized_image is not None:
image.send(self, timestamp=datetime.utcnow(), image=resized_image, source=self.tag)
next_time = max(self.download_frequency - (time.time() - start), 0)
Timer(next_time, self.get_image).start()
def get_resized_image(self, image_input):
"""
Given a raw image from an image source, resize it to a standard size. Doing this results in more consistent
results against the training set.
:param image_input: A buffer with the raw image data.
:return: Resized image data in jpeg format.
"""
image = Image.open(image_input)
resized_image = image.resize((self.output_width, self.output_height))
output_buffer = io.BytesIO()
resized_image.save(output_buffer, 'jpeg')
return output_buffer.getvalue()
| [
"ben.carson@bigpond.com"
] | ben.carson@bigpond.com |
a60040da35be00c6a0f2ceb681037234ab48f192 | b445a970a5e645162f5b0841893327619efb794d | /15-exception/raise.py | 4ff885aeaae0811f876c311577855d1f152c0080 | [] | no_license | eduohe/PythonCodeSnippets | 340f676d3642d7f060bc45fee367f53e926149eb | 0cf2e8c2042c1b296de9c653b910a0596e75fca1 | refs/heads/master | 2020-12-25T11:16:23.023767 | 2015-09-22T21:24:37 | 2015-09-22T21:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | #!/usr/bin/python3
def main():
try:
for linha in ler('exemplo.txt2'):
print(linha, end = '')
except IOError as e:
print("arquivo nao encontrado", e)
except ValueError as e:
print("Tipo de arquivo invalido", e)
def ler(nomearquivo):
if nomearquivo.endswith('.txt'):
arq = open('exemplo.txt')
return arq.readlines()
else: raise ValueError('A extensao do arquivo deve ser .txt')
if __name__ == "__main__" : main() | [
"eduohe@hotmail.com"
] | eduohe@hotmail.com |
36b71e247ee6644a2e21394eb8364b1a8745d375 | 0a16ec91f6f41be823a9f165a10120cd22cd5aa9 | /FoodDBManager.py | 81464a0e018537e84011f1a765a96588429ad780 | [] | no_license | evgenyslab/NaopiEN | bd6e5847b76b87f506dac4e0012b0db76686f3b5 | ac93ae9ed075babccef85d514f64a24b582c9089 | refs/heads/master | 2020-04-06T04:07:20.777671 | 2017-03-07T16:02:20 | 2017-03-07T16:02:20 | 83,042,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,075 | py | import FileUtilitiy
import sqlite3
import json
import os.path
class FoodDBManager:
def __init__(self):
dbDir = "ProgramDataFiles\\foodDB.db"
if os.path.isfile(dbDir):
self.conn = sqlite3.connect(dbDir)
else:
self.conn = sqlite3.connect(dbDir.replace('\\', '/'))
self.c = self.conn.cursor()
foodItemsFile = "ProgramDataFiles\\foodItemsJSON.txt"
foodItemsJSON = FileUtilitiy.readLinesToJSON(foodItemsFile)
self.c.execute("select name from sqlite_master where type='table'")
# print self.c.fetchall()
self.dbName = "foodItems"
sqlStr = "DROP TABLE " + self.dbName
self.c.execute(sqlStr)
sqlStr = "CREATE TABLE " + self.dbName + " "
sqlStr += "(id, name, hasPoultry, hasGluten, calories, buyFrom, mealType, hasFish)"
self.c.execute(sqlStr)
for food in foodItemsJSON:
sqlStr = "INSERT INTO " + self.dbName + " VALUES "
sqlStr += "('" + str(food['id']) + "', '" + str(food['name']) + "', '" + str(food['hasPoultry'])
sqlStr += "', '" + str(food['hasGluten']) + "', '" + str(food['calories'])
sqlStr += "', '" + str(food['buyFrom']) + "', '" + str(food['mealType'])
sqlStr += "', '" + str(food['hasFish']) + "')"
self.c.execute(sqlStr)
self.conn.commit()
def showDB(self):
#DB: data base
sqlStr = "PRAGMA table_info('" + self.dbName + "')"
self.c.execute(sqlStr)
dbRows = self.c.fetchall()
for row in dbRows:
print self.dict_factory(row)['name'], " | ",
print
sqlStr = "SELECT * FROM " + self.dbName
self.c.execute(sqlStr)
dbRows = self.c.fetchall()
for row in dbRows:
print self.dict_factory(row)
def selectDBWhere(self, mealType = "Lunch", canEatPoultry = True, canEatGluten = True, canEatFish = True,
strictIngredients = False):
sqlStr = "SELECT * FROM " + self.dbName + " "
sqlStr += "WHERE mealType='" + mealType + "' "
if (not canEatGluten) or (strictIngredients and (mealType == "breakfast" or mealType == "lunch")):
sqlStr += "AND hasGluten='" + str(canEatGluten) + "' "
if (not canEatPoultry) or (strictIngredients and mealType == "lunch"):
sqlStr += "AND hasPoultry='" + str(canEatPoultry) + "' "
if (not canEatFish) or (strictIngredients and mealType == "dinner"):
sqlStr += "AND hasFish='" + str(canEatFish) + "' "
print sqlStr
self.c.execute(sqlStr)
dbRows = self.c.fetchall()
return dbRows
def dict_factory(self, row):
d = {}
for idx, col in enumerate(self.c.description):
d[col[0]] = row[idx]
return d
#
# fDB = FoodDBManager()
# fDB.showDB()
# print
#
# dbRows = fDB.selectDBWhere(True, False)
# for row in dbRows:
# print fDB.dict_factory(row)
| [
"e.nuger@gmail.com"
] | e.nuger@gmail.com |
73a22fafc4797640bac521956e27568e4d8ea0fd | f8ac61a68c128d9019a2e78e9f3620502a87d220 | /server/commandoperator.py | 0f71917e9f5c0df53a5da990b6f919ef420072cf | [
"MIT"
] | permissive | griehsler/waterIt | a18f0923f3716cc788fc6659ddcf6b6f71331e8a | a7794e0f8de984b9e8ba6398e93350d6e9264432 | refs/heads/master | 2021-06-08T06:29:02.655806 | 2018-12-17T21:50:33 | 2018-12-17T21:50:33 | 147,120,679 | 0 | 0 | MIT | 2021-05-06T19:24:30 | 2018-09-02T20:48:31 | C++ | UTF-8 | Python | false | false | 2,620 | py | import paho.mqtt.client as mqtt
from datetime import datetime
import pytz
import json
from pony.orm import db_session
import sys
from model import *
with open('config.json') as config_file:
1
configuration = json.load(config_file)
incomingTopic = configuration["incomingTopic"]
outgoingTopic = configuration["outgoingTopic"]
humidityThreshold = configuration["humidityThreshold"]
pumpDuration = configuration["pumpSeconds"] * 1000
sleepDuration = configuration["sleepMinutes"] * 60000
db.bind(provider='sqlite', filename=configuration["database"], create_db=True)
db.generate_mapping(create_tables=True)
def on_connect(client, userdata, flags, rc):
if rc != 0:
print("Connection failed with result code "+str(rc))
exit(1)
client.subscribe(incomingTopic)
def on_message(client, userdata, msg):
command = json.loads(msg.payload)
#alert("received: " + str(command))
name = command["name"]
if name == "humidityLevel":
humidity = command["humidity"]
now = datetime.now(pytz.utc)
with db_session:
HumidityMeasure(value=humidity,
timestamp=now)
alert("received humidity measure: {}".format(humidity))
if humidity < humidityThreshold:
alert("triggering pump")
sendCommandAdv("feedWater", "duration", pumpDuration)
Event(kind="pump", timestamp=now)
sendCommand("sendWaterLevel")
else:
sendCommandAdv("sleep", "duration", sleepDuration)
sendCommand("sendHumidityLevel", True)
elif name == "waterLevel":
isLow = bool(command["isLow"])
if isLow:
alert("Water tank is low!")
sendCommandAdv("sleep", "duration", sleepDuration)
sendCommand("sendHumidityLevel", True)
else:
alert("Unrecognized command:")
alert(command)
def sendMessage(message, retain):
client.publish(outgoingTopic, message, 1, retain)
def sendCommand(name, retain=False):
sendMessage('{"name":"' + name + '"}', retain)
def sendCommandAdv(name, paramName, paramValue, retain=False):
message = '"name":"{}","{}":{}'.format(name, paramName, str(paramValue))
sendMessage('{' + message + '}', retain)
def alert(message):
print(message)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(configuration["username"], configuration["password"])
client.connect(configuration["host"], configuration["port"])
sendCommand("sendHumidityLevel", True)
client.loop_forever()
| [
"ronald.griehsler@gmail.com"
] | ronald.griehsler@gmail.com |
acff2b7d7644b0a3ae1403ef41c3a2a3b37be1a0 | 7126face9fda4828bb74b7569b81b56da8e8d00a | /vishtml.py | a57f00d0d6a0ad070fadd5f7564539d814405c94 | [] | no_license | eineEule/WebstatusMonitor | 66914c73352e185614d164dcd78df10016a822d8 | 305800f000b680fde083cc143962f054940ff292 | refs/heads/master | 2021-08-31T11:05:55.321672 | 2017-12-21T04:48:24 | 2017-12-21T04:48:24 | 114,700,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | #!/usr/bin/env python
import cgi
def htmlHead():
print("""Content-type: text/html\n\n
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="initial-scale=1, maximum-scale=5">
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous">
<title>Site Monitor</title>
</head>
<body class="bg-dark">""")
def htmlEnd():
print("""</body>
</html>""")
def opencontainer():
print("""</br><div class="container"><font color="white"><h1>Site List</h1></font>""")
def closecontainer():
print("</div>")
def htmlform():
print("""</br><form class="text-white" method="post" action="sites.cgi">
<input class="form-control form-control-lg" type="text" name="listurl" placeholder="(i.e. https://www.google.com)" autocomplete="off" autofocus></br>
</form>""")
def errHead():
print("""Content-type: text/html\n\n
<!DOCTYPE html>
<html>
<head>
<meta name="viewport" content="initial-scale=1, maximum-scale=5">
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous">
<style> .container{ background-color: white; } </style>
<title>Site Monitor</title>
</head>
<body class="bg-dark">""")
def openerrcon():
print("""</br><div class="container"><h1>Error Log</h1>""")
def openstatcon():
print("""</br><div class="container"><font color="white"><h1>Site Status</h1></font>""")
def openconconf():
print("""</br><div class="container">""")
| [
"noreply@github.com"
] | eineEule.noreply@github.com |
e970ddae739ae9ee0b4d46519705a661d8daa2da | 794075998ca79c106cdfa145c508a2564414f9e7 | /flask-nlp-api/database.py | d973edf14ae47efcfd42330e4fccd5aa49599443 | [
"MIT"
] | permissive | dr-jgsmith/flask-nlp-api | 814ab8cfb95ab20fd378642a17e5e3e4bdeb8851 | bc47594a0b200271ff71d1637ae76338d3b3912c | refs/heads/master | 2020-06-01T07:15:29.648678 | 2019-06-07T06:44:25 | 2019-06-07T06:44:25 | 190,694,684 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | from sqlalchemy.orm import relationship
from extensions import db
relationship = relationship
def ReferenceCol(tablename, nullable=False, pk_name='id', **kwargs):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = ReferenceCol('category')
category = relationship('Category', backref='categories')
"""
return db.Column(
db.ForeignKey("{0}.{1}".format(tablename, pk_name)),
nullable=nullable, **kwargs) # pragma: no cover | [
"justin.smith@directharvest.io"
] | justin.smith@directharvest.io |
7c074e1e0329621355d20a6c40b15862511b0da2 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/eqptcapacity/l3totalusagecaphist1year.py | cf62a802c79ad7330fcb6f152acf04c26193451e | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,472 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class L3TotalUsageCapHist1year(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.L3TotalUsageCapHist1year", "Layer3 total entries max capacity")
counter = CounterMeta("v6TotalEpCap", CounterCategory.GAUGE, "count", "Total v6 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v6TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v6TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v6TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v6TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v6TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v6TotalEpCapTr"
meta._counters.append(counter)
counter = CounterMeta("v4TotalEpCap", CounterCategory.GAUGE, "count", "Total v4 Endpoints capacity")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "v4TotalEpCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "v4TotalEpCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "v4TotalEpCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "v4TotalEpCapSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "v4TotalEpCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "v4TotalEpCapTr"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityL3TotalUsageCapHist1year"
meta.rnFormat = "HDeqptcapacityL3TotalUsageCap1year-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Layer3 total entries max capacity stats in 1 year"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.eqptcapacity.L3TotalUsageCapHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDeqptcapacityL3TotalUsageCap1year-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 36416, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "v4TotalEpCapAvg", "v4TotalEpCapAvg", 36720, PropCategory.IMPLICIT_AVG)
prop.label = "Total v4 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapAvg", prop)
prop = PropMeta("str", "v4TotalEpCapMax", "v4TotalEpCapMax", 36719, PropCategory.IMPLICIT_MAX)
prop.label = "Total v4 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMax", prop)
prop = PropMeta("str", "v4TotalEpCapMin", "v4TotalEpCapMin", 36718, PropCategory.IMPLICIT_MIN)
prop.label = "Total v4 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapMin", prop)
prop = PropMeta("str", "v4TotalEpCapSpct", "v4TotalEpCapSpct", 36721, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v4 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapSpct", prop)
prop = PropMeta("str", "v4TotalEpCapThr", "v4TotalEpCapThr", 36722, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v4 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v4TotalEpCapThr", prop)
prop = PropMeta("str", "v4TotalEpCapTr", "v4TotalEpCapTr", 36723, PropCategory.IMPLICIT_TREND)
prop.label = "Total v4 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v4TotalEpCapTr", prop)
prop = PropMeta("str", "v6TotalEpCapAvg", "v6TotalEpCapAvg", 36741, PropCategory.IMPLICIT_AVG)
prop.label = "Total v6 Endpoints capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapAvg", prop)
prop = PropMeta("str", "v6TotalEpCapMax", "v6TotalEpCapMax", 36740, PropCategory.IMPLICIT_MAX)
prop.label = "Total v6 Endpoints capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMax", prop)
prop = PropMeta("str", "v6TotalEpCapMin", "v6TotalEpCapMin", 36739, PropCategory.IMPLICIT_MIN)
prop.label = "Total v6 Endpoints capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapMin", prop)
prop = PropMeta("str", "v6TotalEpCapSpct", "v6TotalEpCapSpct", 36742, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total v6 Endpoints capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapSpct", prop)
prop = PropMeta("str", "v6TotalEpCapThr", "v6TotalEpCapThr", 36743, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total v6 Endpoints capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("v6TotalEpCapThr", prop)
prop = PropMeta("str", "v6TotalEpCapTr", "v6TotalEpCapTr", 36744, PropCategory.IMPLICIT_TREND)
prop.label = "Total v6 Endpoints capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("v6TotalEpCapTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
f1a6a1f4c9d69dd7b043f7865a0f320005c90e3b | b135e52f29bc394ec4c81b0e52bcb38c34b62229 | /optim/acclip.py | e62dfdfcc70087b9d2a37fe20ce8a650b8c4dbd1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | SAITPublic/MLPerf_Training_v1.1 | 2c563046459f9db2152e66c2c3e01b7d75dd675b | 3f00b82dcaa1c42078c547e0f2ed4aecbcad3277 | refs/heads/master | 2023-09-04T10:30:53.619177 | 2021-10-22T14:25:21 | 2021-10-22T15:06:14 | 420,127,562 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,278 | py | import math
import torch
from torch.optim.optimizer import Optimizer
def centralized_gradient(x,use_gc=True,gc_conv_only=False):
if use_gc:
if gc_conv_only:
if len(list(x.size()))>3:
x.add_(-x.mean(dim = tuple(range(1,len(list(x.size())))), keepdim = True))
else:
if len(list(x.size()))>1:
x.add_(-x.mean(dim = tuple(range(1,len(list(x.size())))), keepdim = True))
return x
class ACClip(Optimizer):
r"""Implements AdamP algorithm.
It has been proposed in `Slowing Down the Weight Norm Increase in
Momentum-based Optimizers`__
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
betas: coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
delta: threhold that determines whether a set of parameters is scale
invariant or not (default: 0.1)
wd_ratio: relative weight decay applied on scale-invariant parameters
compared to that applied on scale-variant parameters (default: 0.1)
nesterov: enables Nesterov momentum (default: False)
Note:
No Reference code
Cosine similarity from AdamP
"""
def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), eps=1e-5, weight_decay=1e-5, fixed_decay=False,
clip_grad_norm=True, max_grad_norm=1.0, alpha=1.0, mod=1) -> None:
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, alpha=alpha, mod=mod)
self._fixed_decay = fixed_decay
self._clip_grad_norm = clip_grad_norm
self.max_grad_norm = max_grad_norm
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 1.0 <= alpha <= 2.0:
raise ValueError("Invalid alpha parameter: {}".format(alpha))
super(ACClip, self).__init__(params, defaults)
@staticmethod
def _channel_view(x):
return x.view(x.size(0), -1)
@staticmethod
def _layer_view(x):
return x.view(1, -1)
@staticmethod
def _cosine_similarity(x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def step(self, closure=None):
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
combined_scale = 1.0
if self._clip_grad_norm and self.max_grad_norm > 0:
parameters = []
for group in self.param_groups:
parameters += [p for p in group['params'] if p.grad is not None]
global_grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if math.isfinite(global_grad_norm) and global_grad_norm > self.max_grad_norm:
combined_scale = (global_grad_norm + 1e-6) / self.max_grad_norm
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if self._clip_grad_norm:
grad.div_(combined_scale)
beta1, beta2 = group['betas']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
# the clipping value, i.e., \tao_0^{\alpha}
state['tau'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
# Second-order momentum, v_t
state['exp_avg_sq'] = torch.zeros_like(p.data, memory_format=torch.preserve_format)
exp_avg, exp_avg_sq, tau = state['exp_avg'], state['exp_avg_sq'], state['tau']
alpha = group['alpha']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
tau.mul_(beta2).add_(grad.abs().pow(alpha), alpha=1 - beta2) # alpha = 1
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# step_size = group['lr']
step_size = group['lr'] / bias_correction1
# truncate large gradient - ACClip
denom = tau.pow(1 / alpha).div(exp_avg.abs().add(group['eps'])).clamp(min=0.0, max=1.0)
# Adaptive Learning Rates : Work like Adam
if group['mod'] == 1:
# denom.div_(exp_avg_sq.mul(beta2).sqrt().add(group['eps']))
denom.div_(exp_avg_sq.div(bias_correction2).mul(beta2).sqrt().add(group['eps']))
update = denom * exp_avg
# # gradient centralization
# update = centralized_gradient(update, use_gc=True, gc_conv_only=False)
# Update
p.data.add_(update, alpha=-step_size)
if not self._fixed_decay:
p.data.mul_(1.0 - group['lr'] * group['weight_decay'])
else:
p.data.mul_(1.0 - group['weight_decay'])
return loss
| [
"k.myeong-woo@samsung.com"
] | k.myeong-woo@samsung.com |
56bc0a3e71fd7a5ae2d279be5e15269ec243a16e | 13088ad7e539f84a79a4f49102222e6dda095317 | /utilities/Requests.py | cb5fdfe9315f9bc7758da8c0faa6b9dc25ecf181 | [] | no_license | pareekajay2/parking | 289ce27cc08ecd98dff0fc12787656029d78aa17 | b2b84534081b7a8343d694a7c82a67f5a728afd2 | refs/heads/master | 2023-03-30T04:49:32.542081 | 2021-03-28T07:39:18 | 2021-03-28T07:39:18 | 352,268,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | class Method:
CreateParkingLot = "Create_parking_lot"
Parking = "Park"
SlotNumbersForDriverOfAge = "Slot_numbers_for_driver_of_age"
SlotNumberForCarWithNumber = "Slot_number_for_car_with_number"
Leave = "Leave"
VehicleRegistrationNumberForDriverAge = "Vehicle_registration_number_for_driver_of_age"
class Variables:
DriverAge = "driver_age"
| [
"pareekajay2@gmail.com"
] | pareekajay2@gmail.com |
4dbac38863702a9b83b29da422de7bdd84555f0e | a8b88720b1e1cb78ab6196cc29a2fc9534e65b29 | /resources/ui_stack_page.py | 14292e51edfc6c1fb7b1e9b723338c8b1f545002 | [] | no_license | RonChu-01/package_demo | 51726ab4cd115c5f6f144973f735ec30b9d8a900 | 7dde3ab21bc29ab810f64b5fd64c69299ba0d90d | refs/heads/master | 2020-05-23T21:08:25.535620 | 2019-08-15T03:33:11 | 2019-08-15T03:33:11 | 186,945,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,701 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_stack_page.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from SlidingStackedWidget import SlidingStackedWidget
class Stack_Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(873, 505)
self.stackedWidget = SlidingStackedWidget(Form)
self.stackedWidget.setGeometry(QtCore.QRect(10, 0, 860, 500))
self.stackedWidget.setMinimumSize(QtCore.QSize(860, 500))
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.layoutWidget = QtWidgets.QWidget(self.page)
self.layoutWidget.setGeometry(QtCore.QRect(300, 60, 248, 14))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(30)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_point_1 = QtWidgets.QLabel(self.layoutWidget)
self.label_point_1.setObjectName("label_point_1")
self.horizontalLayout.addWidget(self.label_point_1)
self.label_point_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_point_2.setObjectName("label_point_2")
self.horizontalLayout.addWidget(self.label_point_2)
self.stackedWidget.addWidget(self.page)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.gridLayoutWidget = QtWidgets.QWidget(self.page_2)
self.gridLayoutWidget.setGeometry(QtCore.QRect(50, 110, 771, 251))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(10, 10, 10, 10)
self.gridLayout.setHorizontalSpacing(20)
self.gridLayout.setVerticalSpacing(30)
self.gridLayout.setObjectName("gridLayout")
self.pushButton_dl_pk = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_dl_pk.setObjectName("pushButton_dl_pk")
self.gridLayout.addWidget(self.pushButton_dl_pk, 1, 4, 1, 1)
self.label_game_status_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_game_status_2.setObjectName("label_game_status_2")
self.gridLayout.addWidget(self.label_game_status_2, 0, 2, 1, 1)
self.label_pk_addr_point = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_pk_addr_point.setText("")
self.label_pk_addr_point.setPixmap(QtGui.QPixmap(":/img/star.png"))
self.label_pk_addr_point.setAlignment(QtCore.Qt.AlignCenter)
self.label_pk_addr_point.setObjectName("label_pk_addr_point")
self.gridLayout.addWidget(self.label_pk_addr_point, 1, 1, 1, 1)
self.lineEdit_output_addr = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_output_addr.setObjectName("lineEdit_output_addr")
self.gridLayout.addWidget(self.lineEdit_output_addr, 2, 2, 1, 1)
self.label_output_name = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_output_name.setAlignment(QtCore.Qt.AlignCenter)
self.label_output_name.setObjectName("label_output_name")
self.gridLayout.addWidget(self.label_output_name, 4, 0, 1, 1)
self.lineEdit_pk_addr = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_pk_addr.setObjectName("lineEdit_pk_addr")
self.gridLayout.addWidget(self.lineEdit_pk_addr, 1, 2, 1, 1)
self.label_pk_addr = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_pk_addr.setAlignment(QtCore.Qt.AlignCenter)
self.label_pk_addr.setObjectName("label_pk_addr")
self.gridLayout.addWidget(self.label_pk_addr, 1, 0, 1, 1)
self.pushButton_chose_pk = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_chose_pk.setObjectName("pushButton_chose_pk")
self.gridLayout.addWidget(self.pushButton_chose_pk, 1, 3, 1, 1)
self.label_game_status = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_game_status.setAlignment(QtCore.Qt.AlignCenter)
self.label_game_status.setObjectName("label_game_status")
self.gridLayout.addWidget(self.label_game_status, 0, 0, 1, 1)
self.lineEdit_sign = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_sign.setObjectName("lineEdit_sign")
self.gridLayout.addWidget(self.lineEdit_sign, 3, 2, 1, 1)
self.pushButton_output_addr = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_output_addr.setObjectName("pushButton_output_addr")
self.gridLayout.addWidget(self.pushButton_output_addr, 2, 3, 1, 1)
self.label_sign = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_sign.setAlignment(QtCore.Qt.AlignCenter)
self.label_sign.setObjectName("label_sign")
self.gridLayout.addWidget(self.label_sign, 3, 0, 1, 1)
self.pushButton_sign = QtWidgets.QPushButton(self.gridLayoutWidget)
self.pushButton_sign.setObjectName("pushButton_sign")
self.gridLayout.addWidget(self.pushButton_sign, 3, 3, 1, 1)
self.label_output_addr = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_output_addr.setAlignment(QtCore.Qt.AlignCenter)
self.label_output_addr.setObjectName("label_output_addr")
self.gridLayout.addWidget(self.label_output_addr, 2, 0, 1, 1)
self.lineEdit_output_name = QtWidgets.QLineEdit(self.gridLayoutWidget)
self.lineEdit_output_name.setObjectName("lineEdit_output_name")
self.gridLayout.addWidget(self.lineEdit_output_name, 4, 2, 1, 1)
self.stackedWidget.addWidget(self.page_2)
self.page_3 = QtWidgets.QWidget()
self.page_3.setObjectName("page_3")
self.layoutWidget1 = QtWidgets.QWidget(self.page_3)
self.layoutWidget1.setGeometry(QtCore.QRect(20, 20, 812, 468))
self.layoutWidget1.setObjectName("layoutWidget1")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_channel_tab = QtWidgets.QWidget(self.layoutWidget1)
self.widget_channel_tab.setMinimumSize(QtCore.QSize(800, 50))
self.widget_channel_tab.setObjectName("widget_channel_tab")
self.label_select_1 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_select_1.setGeometry(QtCore.QRect(307, 18, 16, 16))
self.label_select_1.setText("")
self.label_select_1.setPixmap(QtGui.QPixmap(":/img/yixuan.png"))
self.label_select_1.setObjectName("label_select_1")
self.label_select_2 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_select_2.setGeometry(QtCore.QRect(329, 18, 96, 16))
self.label_select_2.setObjectName("label_select_2")
self.label_select_3 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_select_3.setGeometry(QtCore.QRect(431, 18, 16, 16))
self.label_select_3.setObjectName("label_select_3")
self.label_not_available_1 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_not_available_1.setGeometry(QtCore.QRect(140, 20, 16, 16))
self.label_not_available_1.setText("")
self.label_not_available_1.setPixmap(QtGui.QPixmap(":/img/weipeizhi.png"))
self.label_not_available_1.setObjectName("label_not_available_1")
self.label_not_available_2 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_not_available_2.setGeometry(QtCore.QRect(162, 20, 72, 16))
self.label_not_available_2.setObjectName("label_not_available_2")
self.label_not_available_3 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_not_available_3.setGeometry(QtCore.QRect(240, 20, 18, 16))
font = QtGui.QFont()
font.setUnderline(True)
self.label_not_available_3.setFont(font)
self.label_not_available_3.setObjectName("label_not_available_3")
self.label_available_1 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_available_1.setGeometry(QtCore.QRect(23, 20, 16, 16))
self.label_available_1.setText("")
self.label_available_1.setPixmap(QtGui.QPixmap(":/img/kexuan.png"))
self.label_available_1.setObjectName("label_available_1")
self.label_available_2 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_available_2.setGeometry(QtCore.QRect(45, 20, 31, 16))
self.label_available_2.setObjectName("label_available_2")
self.label_available_3 = QtWidgets.QLabel(self.widget_channel_tab)
self.label_available_3.setGeometry(QtCore.QRect(87, 20, 16, 16))
self.label_available_3.setObjectName("label_available_3")
self.verticalLayout_2.addWidget(self.widget_channel_tab)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.widget_channel_list = QtWidgets.QWidget(self.layoutWidget1)
self.widget_channel_list.setMinimumSize(QtCore.QSize(240, 360))
self.widget_channel_list.setObjectName("widget_channel_list")
self.verticalLayout.addWidget(self.widget_channel_list)
self.widget_select_all = QtWidgets.QWidget(self.layoutWidget1)
self.widget_select_all.setMinimumSize(QtCore.QSize(240, 40))
self.widget_select_all.setObjectName("widget_select_all")
self.checkBox = QtWidgets.QCheckBox(self.widget_select_all)
self.checkBox.setGeometry(QtCore.QRect(20, 10, 71, 21))
self.checkBox.setObjectName("checkBox")
self.verticalLayout.addWidget(self.widget_select_all)
self.horizontalLayout_6.addLayout(self.verticalLayout)
self.widget_select_channel_list = QtWidgets.QWidget(self.layoutWidget1)
self.widget_select_channel_list.setMinimumSize(QtCore.QSize(560, 400))
self.widget_select_channel_list.setObjectName("widget_select_channel_list")
self.horizontalLayout_6.addWidget(self.widget_select_channel_list)
self.verticalLayout_2.addLayout(self.horizontalLayout_6)
self.stackedWidget.addWidget(self.page_3)
self.page_4 = QtWidgets.QWidget()
self.page_4.setObjectName("page_4")
self.widget = QtWidgets.QWidget(self.page_4)
self.widget.setGeometry(QtCore.QRect(30, 30, 802, 438))
self.widget.setObjectName("widget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.widget_unpack_tab = QtWidgets.QWidget(self.widget)
self.widget_unpack_tab.setMinimumSize(QtCore.QSize(800, 40))
self.widget_unpack_tab.setObjectName("widget_unpack_tab")
self.pushButton_unpack = QtWidgets.QPushButton(self.widget_unpack_tab)
self.pushButton_unpack.setGeometry(QtCore.QRect(10, 10, 90, 21))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/img/chubaozhong.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_unpack.setIcon(icon)
self.pushButton_unpack.setObjectName("pushButton_unpack")
self.pushButton_success = QtWidgets.QPushButton(self.widget_unpack_tab)
self.pushButton_success.setGeometry(QtCore.QRect(120, 10, 90, 21))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/img/成功.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_success.setIcon(icon1)
self.pushButton_success.setObjectName("pushButton_success")
self.pushButton_test = QtWidgets.QPushButton(self.widget_unpack_tab)
self.pushButton_test.setGeometry(QtCore.QRect(230, 10, 90, 21))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/img/07-大拇指.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_test.setIcon(icon2)
self.pushButton_test.setObjectName("pushButton_test")
self.pushButton_fail = QtWidgets.QPushButton(self.widget_unpack_tab)
self.pushButton_fail.setGeometry(QtCore.QRect(340, 10, 90, 21))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/img/weipeizhi.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_fail.setIcon(icon3)
self.pushButton_fail.setObjectName("pushButton_fail")
self.verticalLayout_3.addWidget(self.widget_unpack_tab)
self.stackedWidget_2 = SlidingStackedWidget(self.widget)
self.stackedWidget_2.setMinimumSize(QtCore.QSize(800, 390))
self.stackedWidget_2.setObjectName("stackedWidget_2")
self.page_6 = QtWidgets.QWidget()
self.page_6.setObjectName("page_6")
self.stackedWidget_2.addWidget(self.page_6)
self.page_7 = QtWidgets.QWidget()
self.page_7.setObjectName("page_7")
self.stackedWidget_2.addWidget(self.page_7)
self.page_8 = QtWidgets.QWidget()
self.page_8.setObjectName("page_8")
self.stackedWidget_2.addWidget(self.page_8)
self.page_9 = QtWidgets.QWidget()
self.page_9.setObjectName("page_9")
self.stackedWidget_2.addWidget(self.page_9)
self.verticalLayout_3.addWidget(self.stackedWidget_2)
self.stackedWidget.addWidget(self.page_4)
self.retranslateUi(Form)
self.stackedWidget.setCurrentIndex(0)
self.stackedWidget_2.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label_point_1.setText(_translate("Form", "您当前没有可以打包的游戏点击"))
self.label_point_2.setText(_translate("Form", "创建游戏"))
self.pushButton_dl_pk.setText(_translate("Form", "下载母包"))
self.label_game_status_2.setText(_translate("Form", "已上线"))
self.label_output_name.setText(_translate("Form", "输出文件名"))
self.label_pk_addr.setText(_translate("Form", "母包地址"))
self.pushButton_chose_pk.setText(_translate("Form", "选择母包"))
self.label_game_status.setText(_translate("Form", "游戏状态"))
self.pushButton_output_addr.setText(_translate("Form", "存放路径"))
self.label_sign.setText(_translate("Form", "签名证书"))
self.pushButton_sign.setText(_translate("Form", "签名证书"))
self.label_output_addr.setText(_translate("Form", "输出路径"))
self.label_select_2.setText(_translate("Form", "已选择打包渠道:"))
self.label_select_3.setText(_translate("Form", "0"))
self.label_not_available_2.setText(_translate("Form", "未配置渠道:"))
self.label_not_available_3.setText(_translate("Form", "926"))
self.label_available_2.setText(_translate("Form", "可用:"))
self.label_available_3.setText(_translate("Form", "0"))
self.checkBox.setText(_translate("Form", "全选"))
self.pushButton_unpack.setText(_translate("Form", "出包中(0)"))
self.pushButton_success.setText(_translate("Form", "成功(0)"))
self.pushButton_test.setText(_translate("Form", "已测试(0)"))
self.pushButton_fail.setText(_translate("Form", "失败(0)"))
import stack_page_rc
| [
"chuyong@3k.com"
] | chuyong@3k.com |
b2a2559809d92eb818eeb0fd01d727dc5c2d82ea | f876bb72242efab800eb9c24877b00e1c792b1a0 | /Python/data_structure/test.py | 38f282e19d3fe0f95dce89e483f93367c83a0f1e | [] | no_license | weilin-zhang/MyProject_siat | da5634a409cf1ce125120ba28b4bb22ccff5dc4d | cb4abf41bb3492e642d5a3d3eabf99ea2c6bce16 | refs/heads/master | 2020-03-18T20:44:40.605068 | 2018-10-17T12:10:54 | 2018-10-17T12:10:54 | 135,235,555 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,581 | py | # try:
# a = input()
# except IOError:
# print("error:缺少输入")
# else:
# print(a)
# 递归求N!
def recursive_mix(n):
if n == 2:
return 1
return n * recursive_mix(n - 1)
# 十进制转二进制
def recursive_conversion(n):
if n == 0:
return
recursive_conversion(int(n / 2))
print(n % 2)
# return n%2
# 递归实现数字倒叙
def recursive_back(n):
if n == 0:
return
print(n % 10)
recursive_back(int(n / 10))
# print(recursive_mix(5))
# recursive_conversion(23)
# recursive_back(1234)
## 堆栈实现字符串逆序
def func(str):
lis = list(str)
result = ''
while lis:
result+=result.pop()
return result
def towSum(num, target):
d = {}
for i, val in enumerate(num):
if target - val in d:
return [d[target - val], i]
d[val] = i
# print(towSum([4,2,1,6,7],5))
def withoutRep(s):
my = ''
maxlen = 0
for i in range(len(s)):
if s[i] not in my:
my = my+s[i]
else:
maxlen = max(maxlen,len(my))
left = s.index(s[i])+1
right = i
my = s[left:right+1]
return maxlen
def withoutRep1(s):
temp = 0
d = {}
start = 0
for i in range(len(s)):
if s[i] in d and start <= d[s[i]]:
start = d[s[i]] + 1
temp = max(i - start + 1, temp)
d[s[i]] = i
return temp
def withoutRep2(s):
prev = 0
ans = 0
sub = ''
for i in range(len(s)):
if len(s) - i <= prev:
break
while i < len(s) and s[i] not in sub:
ans += 1
sub += s[i]
i += 1
if prev < ans:
prev = ans
ans = 0
sub = ''
return prev
print(withoutRep1('pwwkew'))
'''
滑动窗口
维护一个dict,逐一将元素添加进去
若存在重复元素a,则更新dict,并将left设为dic[a]+1
'''
def withoutRept(s):
dic = {} ## 用一个dic 来存储
left = 0
maxlen = 0
for i in range(len(s)):
if s[i] in dic and left <= dic[s[i]]: # 如果出现重复元素a,后面这个and 貌似也很重要。。。
left = dic[s[i]] +1 # 中
maxlen = max(maxlen,i-left+1)
dic[s[i]] = i
return maxlen
# print(withoutRep1('abba'))
s = 'aaabbcccaa'
def tranStr(s):
st = ''
st1 = ''
for i in range(len(s)-1):
st1 += s[i]
if s[i+1] not in st1:
a = len(st1)
st = st + str(a)+s[i-1]
st1 = ''
return st
print(tranStr(s))
| [
"willzhang003@gmail.com"
] | willzhang003@gmail.com |
c5709606fb924f3f474a7c77011029c101624e0b | c6e5f5c2a695249a306a053a0d0cd4cf4dc6de66 | /analyse_metres_from_location.py | 6c33b479cc69a67c429d9fd2873c586b027a157a | [
"Apache-2.0"
] | permissive | AdamLeonSmith/privkittools | 8befa2f0a8dc408c8b95c0fd12a70c037a3a33ea | 294c107ddaeda5017fcc7e7b87dd54ddc504e081 | refs/heads/master | 2022-05-28T02:03:45.346237 | 2020-05-02T15:55:53 | 2020-05-02T15:55:53 | 256,949,985 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,335 | py | #!/usr/bin/python3
import time
import json
from datetime import datetime, date, timedelta
import math
#Enter your latitude here: http://www.csgnetwork.com/degreelenllavcalc.html
# then copy the Length of a Degree of Latittude/Longitude in Metres
lod_lat = 111061.57849146728
lod_long = 83627.11630547515
lat_per_metre = 1/lod_lat
long_per_metre = 1/lod_long
with open('/home/adam/Desktop/1587822189712.json', encoding='utf-8-sig') as json_file:
data = json.loads(json_file.read().replace('\n', ''))
tgt_location = {}
tgt_location['latitude'] = 41.39808000
tgt_location['longitude'] = 2.1582888
tgt_time = datetime.fromtimestamp(1587794451000/1000)
start_time = tgt_time - timedelta(hours=4)
end_time = tgt_time + timedelta(hours=4)
print("Start of window: " + str(start_time))
print("End of window: " + str(end_time))
hits = 0
for pt in data:
long_diff = abs(tgt_location['longitude'] - pt['longitude'])
lat_diff = abs(tgt_location['latitude'] - pt['latitude'])
_long_diff = long_diff * lod_long
_lat_diff = lat_diff * lod_lat
dist = math.sqrt(_long_diff * _long_diff + _lat_diff * _lat_diff)
if dist < 20:
dt = datetime.fromtimestamp(pt['time'] / 1000.0)
if dt > start_time and dt < end_time:
print("Point of concern identified... " + str(dist) +"m & " + str(dt - tgt_time))
| [
"adam@wearedragonfly.co"
] | adam@wearedragonfly.co |
7c8f9528eabc81860f28438eda853c4c6f951b0b | fd0d8b010d45f959f0660afb192c7349e266a329 | /competitive/AtCoder/ABC197/B.py | 8545cea720cd0360b76c986456a16709998efda5 | [
"MIT"
] | permissive | pn11/benkyokai | b650f5957545fdefbea7773aaae3f61f210f69ce | 9ebdc46b529e76b7196add26dbc1e62ad48e72b0 | refs/heads/master | 2023-01-28T01:38:29.566561 | 2021-10-03T04:20:14 | 2021-10-03T04:20:14 | 127,143,471 | 0 | 0 | MIT | 2023-01-07T07:19:05 | 2018-03-28T13:20:51 | Jupyter Notebook | UTF-8 | Python | false | false | 580 | py | import numpy as np
H, W, X, Y = [int(x) for x in input().split()]
X -= 1
Y -= 1
arr = np.zeros(shape=(H, W))
for y in range(H):
s = input()
for x in range(W):
arr[y, x] = 1 if s[x] == '#' else 0
ans = 1 # 自分自身
a = arr[X, :]
cuma = np.cumsum(a)
for y in range(W):
if y == Y:
continue
if a[y] == 1:
continue
if cuma[y] == cuma[Y]:
ans += 1
b = arr[:, Y]
cumb = np.cumsum(b)
for x in range(H):
if x == X:
continue
if b[x] == 1:
continue
if cumb[x] == cumb[X]:
ans += 1
print(ans)
| [
"pn11@users.noreply.github.com"
] | pn11@users.noreply.github.com |
96737a8d8e3b94d006ce461609ca5978d7aa72cf | 9aeb6b803977b98cf9f325cb942beb54209224a1 | /webapp/order/models.py | fc5698bc956641778d6c62113bba9fb400e3fc3b | [] | no_license | DmPolikarpov/book_store | 65ca1097039561fb5d36fefb161491e78e897b2f | cb9dea58a6ba3116575fd19eecd4036591510f1c | refs/heads/master | 2022-12-11T06:52:04.283211 | 2019-05-14T17:18:21 | 2019-05-14T17:18:21 | 176,566,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | from webapp.db import db
from datetime import datetime
from sqlalchemy.orm import relationship
class Order(db.Model):
""" Модель заказа. """
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
book_id = db.Column(db.Integer, db.ForeignKey('book.id'))
order_date = db.Column(db.DateTime, default=datetime.now)
detail = db.Column(db.JSON, nullable=False)
deliver = db.Column(db.JSON, nullable=False)
users = db.relationship('User', backref='order')
books = db.relationship('Book', backref='order')
def __repr__(self):
return f'Order {self.id} {self.order_date}' | [
"mazaxist2@mail.ru"
] | mazaxist2@mail.ru |
b61d898faa6c848078f2879f12b9b30bb37e259c | 2e6c1dbe9e11426ffa65adc12723ddc4a986e5e9 | /day2/mysys.py | e55e86e06a90022fd72dfd996c41e05a61c66889 | [] | no_license | 525642022/learn_pyhton_basic | 40fb48b85c7120465c790dc82668d83fa2bd6d5d | be1c7499ade2ae1f27440441e7472b1b6a902430 | refs/heads/master | 2020-03-26T04:48:00.681424 | 2018-08-13T05:39:04 | 2018-08-13T05:39:04 | 144,523,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | # 作者 ljc
import sys
import os
# 打印环境变量
print(sys.path)
# 打印当前文件名
print(sys.argv)
a = 1
b = 2
c = 3
d = a if a > b else c
msg = "理解成"
print(msg)
print(msg.encode().decode())
| [
"525642022@qq.com"
] | 525642022@qq.com |
e7baf0aff54ebd9ea52d81571eb26176fcfbae42 | a5a0f90bb6f19aeb3c43d80532d61c0f99ad79a1 | /prayer/blocks.py | aafcf4a1595d81cf264cafbab838df3de045e22c | [] | no_license | AlbianWarp/ReBabel | 3c19153681448c9f700250049defe9e5fa019b4e | e3a39fac0b26c990eee6f55ca753fc6eb90fc80d | refs/heads/master | 2020-06-03T12:09:21.646662 | 2020-03-27T16:29:35 | 2020-03-27T16:29:35 | 191,563,002 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,830 | py | import zlib
from collections import MutableSequence
class Block:
def __init__(self, data=None):
if data is None:
self.type = "NONE"
self.name = ""
self.data = b""
else:
self._set_block_data(data=data)
@property
def block_data(self):
return self._get_block_data(compress_data=False)
@block_data.setter
def block_data(self, block_data):
self._set_block_data(data=block_data)
@property
def zblock_data(self):
return self._get_block_data(compress_data=True)
@zblock_data.setter
def zblock_data(self, block_data):
self._set_block_data(data=block_data)
def _set_block_data(self, data):
self._block_data = data
# The first 4 Byte contain the type of the Block
self.type = data[:4].decode("latin-1")
# the following 128 Byte, contain the Name of the Block in latin-1
# padded with 'NUL' '\0'
self.name = data[4:132].decode("latin-1").rstrip("\0")
# then there is a 32 bit Integer, that states the compressed
# size/length of the data
data_length = int.from_bytes(data[132:136], byteorder="little", signed=False)
# right after that, there is another 32 bit Integer that states the
# uncompressed size/length of the data.
uncompressed_data_length = int.from_bytes(
data[136:140], byteorder="little", signed=False
)
# then there is an 32 Bit Integer containing either a one or a zero, 1
# = block data is compressed, 0 = block data is uncompressed
if (
int.from_bytes(data[140:144], byteorder="little") == 1
and data_length != uncompressed_data_length
):
self.compressed = True
else:
self.compressed = False
# The Compressed and decompressed Data can each be found at offset 144
# + the Length of the "self.data_length" Variable
data_raw = data[144 : 144 + data_length]
if self.compressed:
self.data = zlib.decompress(data_raw)
else:
self.data = data_raw
def _get_block_data(self, compress_data=False):
"""This Function should return All the Block Data in the correct Block Format, containing, the name, type and what not :D"""
data_block = self.data
uncompressed_length = len(data_block)
if compress_data:
data_block = zlib.compress(data_block)
compress_data_bit = 1
else:
compress_data_bit = 0
data = bytes(self.type, encoding="latin-1")
data += bytes(self.name, encoding="latin-1").ljust(128, b"\0")
data += len(data_block).to_bytes(length=4, byteorder="little")
data += uncompressed_length.to_bytes(length=4, byteorder="little")
data += compress_data_bit.to_bytes(length=4, byteorder="little")
data += data_block
return data
class TagBlock(Block):
def __init__(self, data):
"""docstring :D"""
Block.__init__(self, data)
@staticmethod
def create_tag_block(block_type, block_name, named_variables):
tmp_tag_block = TagBlock(Block().block_data)
tmp_tag_block.type = block_type
tmp_tag_block.name = block_name
tmp_tag_block.number_of_integer_variables = 0
tmp_tag_block.number_of_string_varaibles = 0
for variable in named_variables:
if type(variable[1] == int):
number_of_integer_variables = +1
tmp_tag_block.named_variables.append(variable)
elif type(variable[1 == str]):
number_of_string_varaibles = +1
tmp_tag_block.named_variables.append(variable)
return tmp_tag_block
def _get_named_integer_variables(self, data, count):
#
# Each named Integer Variable consists of 3 Parts:
# - a 32bit Integer Variable that states the length of the name 'key_length'
# - n Bytes containing said Name, where n is the length specified in the Integer beforhand 'key'
# - a 32bit Integer containing the 'value' of the Named Integer
# +------------------+-------------------+--------------+
# | 4B Int len(KEY) | nB KEY in LATIN-1 | 4B Int Value |
# +------------------+-------------------+--------------+
#
if count != 0:
key_length = int.from_bytes(data[:4], byteorder="little")
key = data[4 : 4 + key_length].decode("latin-1")
value = int.from_bytes(
data[4 + key_length : 8 + key_length], byteorder="little"
)
self.named_variables.append((key, value))
self._get_named_integer_variables(
data=data[8 + key_length :], count=count - 1
)
else:
self.str_data = data
def _get_named_string_variables(self, data, count):
#
# Each named String Variable consists of 4 Parts:
# - a 32bit Integer Variable that states the length of the name 'key_length'
# - n Bytes containing said name, where n is the length specified in the Integer beforhand 'key'
# - a 32bit Integer Variable that states the length of the value 'value_length'
# - n Bytes containing said 'value', where n is the length specified in the Integer beforhand 'value_length'
# +-----------------+-------------------+-------------------+---------------------+
# | 4B Int len(KEY) | nB KEY in LATIN-1 | 4B Int len(Value) | nB Value in LATIN-1 |
# +-----------------+-------------------+-------------------+---------------------+
#
if count != 0:
key_length = int.from_bytes(data[:4], byteorder="little")
key = data[4 : 4 + key_length].decode("latin-1")
value_length = int.from_bytes(
data[4 + key_length : 8 + key_length], byteorder="little"
)
value = data[8 + key_length : 8 + key_length + value_length].decode(
"latin-1"
)
self.named_variables.append((key, value))
self._get_named_string_variables(
data=data[8 + key_length + value_length :], count=count - 1
)
else:
self.str_data = data
@property
def data(self):
# if self.named_variables.data_changed:
if True:
ints = list()
strings = list()
for variable in self.named_variables:
if type(variable[1]) == int:
ints.append(variable)
elif type(variable[1] == str):
strings.append(variable)
tmp_ints = bytes(len(ints).to_bytes(length=4, byteorder="little"))
for variable in ints:
tmp_ints += len(bytes(variable[0], encoding="latin-1")).to_bytes(
length=4, byteorder="little"
)
tmp_ints += bytes(variable[0], encoding="latin-1")
tmp_ints += variable[1].to_bytes(length=4, byteorder="little")
tmp_strings = bytes(len(strings).to_bytes(length=4, byteorder="little"))
for variable in strings:
tmp_strings += len(bytes(variable[0], encoding="latin-1")).to_bytes(
length=4, byteorder="little"
)
tmp_strings += bytes(variable[0], encoding="latin-1")
tmp_strings += len(bytes(variable[1], encoding="latin-1")).to_bytes(
length=4, byteorder="little"
)
tmp_strings += bytes(variable[1], encoding="latin-1")
self._data = tmp_ints + tmp_strings
# self.named_variables.data_changed = False
return self._data
@data.setter
def data(self, data):
self._data = data
self.named_variables = list()
# Integers
self.number_of_integer_variables = int.from_bytes(data[:4], byteorder="little")
self._get_named_integer_variables(
data=data[4:], count=self.number_of_integer_variables
)
# Strings
self.number_of_string_varaibles = int.from_bytes(
self.str_data[:4], byteorder="little"
)
self._get_named_string_variables(
data=self.str_data[4:], count=self.number_of_string_varaibles
)
class TagBlockVariableList(MutableSequence):
def __init__(self, data=None):
self.data_changed = False
super(TagBlockVariableList, self).__init__()
if not (data is None):
self._list = list(data)
else:
self._list = list()
def append(self, val):
list_idx = len(self._list)
self.data_changed = True
self.insert(list_idx, val)
| [
"KeyboardInterrupt@KeyboardInterrupt.com"
] | KeyboardInterrupt@KeyboardInterrupt.com |
4c6b48556148eb4ff0afb1feb79bb39b4b8880d7 | 30b3e64625079dc74372973447068a72031b71e7 | /SEMANA I/introductorio_2017/python/gcd/gcd.py | 0afed0f79d22de96c8270005369d286ed3aef41a | [] | no_license | AndreyArguedas/Paradigmas-de-programacion | 1567f8241fa9602d76750c9492950594868e327c | bedf535349df9cf996a1a4928ac1bd9d826a52f2 | refs/heads/master | 2021-01-01T18:04:23.705512 | 2017-11-16T06:26:39 | 2017-11-16T06:26:39 | 98,235,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import sys
def gcd(a, b) :
if a == 0 : return b;
if b == 0 : return a;
while a != b :
if a > b :
a = a - b
else :
b = b - a
return a
def main():
a, b = (0, 0)
if len(sys.argv) >= 2 :
a = int(sys.argv[1])
if len(sys.argv) >= 3 :
b = int(sys.argv[2])
print("gcd(%d, %d)= %d" % (a, b, gcd(a, b))) # Estilo 2.x
print(f"gcd({a}, {b})= {gcd(a, b)}") # Estilo 3.x
#
main()
| [
"andrey.arguedas.espinoza@est.una.ac.cr"
] | andrey.arguedas.espinoza@est.una.ac.cr |
19c612c0ab1c8d493215af1861ac5ed57794ff03 | 017785d3e5cbd85460bed3f6513d3a7cc1cc8511 | /summarizer/tf-idf.py | 86d73f83373f645bd54cdb1f3b9d3cbb3703f829 | [] | no_license | abhinavagrawal1995/rollup | b952fb68f25438409986250ecf781f4eefd757ea | b480a383b0c13b8e8da84e484daaca323e992728 | refs/heads/master | 2020-04-09T04:54:30.850633 | 2019-08-12T17:37:15 | 2019-08-12T17:37:15 | 160,042,839 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | from __future__ import division
import string
import math
#in Scikit-Learn
from sklearn.feature_extraction.text import TfidfVectorizer
def getFileData(fname):
arr = []
f = open(fname, "r")
for line in f:
arr.append(line)
return arr
def jaccard_similarity(query, document):
intersection = set(query).intersection(set(document))
union = set(query).union(set(document))
return len(intersection)/len(union)
def term_frequency(term, tokenized_document):
return tokenized_document.count(term)
def sublinear_term_frequency(term, tokenized_document):
count = tokenized_document.count(term)
if count == 0:
return 0
return 1 + math.log(count)
def augmented_term_frequency(term, tokenized_document):
max_count = max([term_frequency(t, tokenized_document) for t in tokenized_document])
return (0.5 + ((0.5 * term_frequency(term, tokenized_document))/max_count))
def inverse_document_frequencies(tokenized_documents):
idf_values = {}
all_tokens_set = set([item for sublist in tokenized_documents for item in sublist])
for tkn in all_tokens_set:
contains_token = map(lambda doc: tkn in doc, tokenized_documents)
idf_values[tkn] = 1 + math.log(len(tokenized_documents)/(sum(contains_token)))
return idf_values
def tfidf(documents):
tokenized_documents = [tokenize(d) for d in documents]
idf = inverse_document_frequencies(tokenized_documents)
tfidf_documents = []
for document in tokenized_documents:
doc_tfidf = []
for term in idf.keys():
tf = sublinear_term_frequency(term, document)
doc_tfidf.append(tf * idf[term])
tfidf_documents.append(doc_tfidf)
return tfidf_documents
########### END BLOG POST 1 #############
def cosine_similarity(vector1, vector2):
dot_product = sum(p*q for p,q in zip(vector1, vector2))
magnitude = math.sqrt(sum([val**2 for val in vector1])) * math.sqrt(sum([val**2 for val in vector2]))
if not magnitude:
return 0
return dot_product/magnitude
fname = "data.txt"
tokenize = lambda doc: doc.lower().split(" ")
all_documents = getFileData(fname)
sklearn_tfidf = TfidfVectorizer(norm='l2',min_df=0, use_idf=True, smooth_idf=False, sublinear_tf=True, tokenizer=tokenize)
sklearn_representation = sklearn_tfidf.fit_transform(all_documents)
tfidf_representation = tfidf(all_documents)
our_tfidf_comparisons = []
for count_0, doc_0 in enumerate(tfidf_representation):
for count_1, doc_1 in enumerate(tfidf_representation):
our_tfidf_comparisons.append((cosine_similarity(doc_0, doc_1), count_0, count_1))
skl_tfidf_comparisons = []
for count_0, doc_0 in enumerate(sklearn_representation.toarray()):
for count_1, doc_1 in enumerate(sklearn_representation.toarray()):
skl_tfidf_comparisons.append((cosine_similarity(doc_0, doc_1), count_0, count_1))
# for x in zip(sorted(our_tfidf_comparisons, reverse = True), sorted(skl_tfidf_comparisons, reverse = True)):
# print(x) | [
"abhinavagrawal1995@gmail.com"
] | abhinavagrawal1995@gmail.com |
b543756fb0b2b2c9fe0b4e538ea633c08917f216 | 1eef6887714889b345faef9a558b0ddb2d0ae0cd | /templateEnumMessageApp/views.py | ba05e3ac7119fb586de5ab9d96e2b90cf1c0d3e5 | [] | no_license | rick-liyue-huang/Django-First-App | 798bf5e65d23899f5e8017833292df870a02a8c3 | 3cb3252be984f10462c5c932c7c4d5eb2248fea7 | refs/heads/master | 2022-10-16T00:50:16.516652 | 2020-06-05T06:10:20 | 2020-06-05T06:10:20 | 264,623,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py |
from django.views.generic import View
# from django.http import HttpResponse
from django.shortcuts import render
from .const import MessageType
class TemplateEnumMessage(View):
TEMPLATE = 'message.html'
def get(self, request, message_type):
data = {}
try:
message_type_obj = MessageType[message_type]
except Exception as e:
data['error'] = 'no such message type {}'.format(e)
return render(request, self.TEMPLATE, data)
message = request.GET.get('message', '')
if not message:
data['error'] = 'message can not be empty'
return render(request, self.TEMPLATE, data)
data['message'] = message
data['message_type'] = message_type_obj
return render(request, self.TEMPLATE, data)
# return HttpResponse(message) | [
"rick.liyue.huang@gmail.com"
] | rick.liyue.huang@gmail.com |
7d331351c1107a3a09fe4f2cf9e40465612f468b | be496d11a15cfcdc52a1d9e9a2d6a0a81756c612 | /backend/supplies/migrations/0006_merge_20190624_1447.py | 0aacbf83b74bb4d680b976996abaf1ef0b6d678a | [
"MIT"
] | permissive | nokia-wroclaw/innovativeproject-inventory-of-supplies | e96ef8d8b846c6e29723bbe2403e48b3b9c7af7e | 4a976079ba876d05f2899592fc22a2a1a8c62688 | refs/heads/master | 2022-12-08T21:23:14.883780 | 2020-12-01T08:32:50 | 2020-12-01T08:32:50 | 173,175,506 | 1 | 6 | MIT | 2022-12-08T17:45:44 | 2019-02-28T19:35:31 | JavaScript | UTF-8 | Python | false | false | 271 | py | # Generated by Django 2.1.7 on 2019-06-24 14:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('supplies', '0003_supply_deleted'),
('supplies', '0005_auto_20190514_2036'),
]
operations = [
]
| [
"rafal.kaminski@nokia.com"
] | rafal.kaminski@nokia.com |
44cb2907841d9204c1d8dc6fb1ec7364a0e26996 | d4a90c0ec3c1029396da7e9975587a0391a02871 | /backend/comments/models.py | e8da99e56bb7ef20410342a7c63b66cddfe487ce | [] | no_license | kingu-crimson/twitter-clone | 1227a1430aed1a498a591dc4f21f8c5cc17b6e90 | 62e8d598451c5460ea2f152822b19b36f6951b5f | refs/heads/master | 2023-02-28T02:24:58.914193 | 2021-02-06T18:55:43 | 2021-02-06T18:55:43 | 332,479,003 | 0 | 2 | null | 2021-02-05T01:39:53 | 2021-01-24T15:06:27 | JavaScript | UTF-8 | Python | false | false | 630 | py | from django.db import models
from tweets.models import Tweet
from users.models import UserAccount
# Create your models here.
class Comment(models.Model):
user_id = models.ForeignKey(UserAccount, on_delete=models.CASCADE, related_name='user_comments')
tweet_id = models.ForeignKey(Tweet, on_delete=models.CASCADE, related_name='comments')
content = models.CharField(max_length=5000)
image = models.CharField(max_length=1000, default='', blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "%s %s %s" % (self.user_id.name, self.tweet_id.content, self.content) | [
"mohanbali94@gmail.com"
] | mohanbali94@gmail.com |
cc126b63521c102c5ac49fbea493bc0bec64bece | 35576000f335cd3996e9f27f10ef02c55d5ec311 | /server.py | bd83043567d69e3f356bbb533d7b1cd4cbd26ade | [] | no_license | allisonkcraig/native-js-slideshow | 36fafbd9fb7e14bd06df8eca0812e5a4f4841674 | 1a0b708cf2dff1cae1d99dea8c3fb217d4f49fd6 | refs/heads/master | 2020-05-20T11:50:46.781768 | 2015-09-23T20:21:01 | 2015-09-23T20:21:01 | 42,488,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | from flask import Flask, render_template
import os
from instagram import client
import jinja2
app = Flask(__name__)
#configure the Instagram API using environmental variables
instaConfig = {
'client_id':os.environ.get('INSTA_CLIENT_ID'),
'client_secret':os.environ.get('INSTA_CLIENT_SECRET'),
'redirect_uri':os.environ.get('REDIRECT_URI')
}
# Instantiate instagram
igapi = client.InstagramAPI(**instaConfig)
@app.route('/')
def index():
"""Returns the index page with the cat slideshow"""
# Below is a repo from a fellow Hackbrighter that I used as an example of how to use the python-instagram module.
# This module allows me to make an API call wihtout needing a user to log in with OAuth
# https://github.com/GstarGface/hide-and-cheek-design-lab/blob/master/design_lab.py
tagged_media, next = igapi.tag_recent_media(count=30, tag_name='catsofinstagram')
# Store images and data in a dictionary to pass through Jinja
imageData = {
'tagged' : tagged_media,
}
return render_template("index.html", **imageData)
if __name__ == "__main__":
app.run(debug=False) | [
"allison@craig.net"
] | allison@craig.net |
4160b403383a69983605517e47e579cc789b69d5 | 690a3b6506cf2fa8bb43f976614a3748afa2087a | /Algorithm/BEAKJOON/IM대비/2628_종이자르기.py | 9eab05c0c62f2a37c7f13f8fff16000fd2293b3a | [] | no_license | jes5918/TIL | d1429c14cecd94620a188ba592d8da981446417b | e2f66f7e8e77a12a5d5f265f161de43d1059af42 | refs/heads/master | 2023-08-24T22:00:33.512302 | 2021-01-12T07:42:25 | 2021-01-12T07:42:25 | 280,310,413 | 6 | 0 | null | 2021-09-22T19:37:34 | 2020-07-17T02:47:58 | Python | UTF-8 | Python | false | false | 450 | py | import sys
M, N = map(int, sys.stdin.readline().split())
K = int(sys.stdin.readline())
row = [0]
col = [0]
res = []
for _ in range(K):
method, num = map(int, sys.stdin.readline().split())
if method:
col.append(num)
else:
row.append(num)
col.append(M)
row.append(N)
col.sort()
row.sort()
for i in range(1, len(row)):
for j in range(1, len(col)):
res.append((row[i]-row[i-1])*(col[j]-col[j-1]))
print(max(res))
| [
"wjsdmltntn@gmail.com"
] | wjsdmltntn@gmail.com |
de0a5b63a015d3cb8d5629f2ee14a4b9a88ef4c7 | d737fa49e2a7af29bdbe5a892bce2bc7807a567c | /software/qt_examples/src/pyqt-official/itemviews/spreadsheet/spreadsheet_rc.py | dbb98931b9eec41ed2d9ce83657f0cc20eba536e | [
"MIT",
"CC-BY-NC-SA-4.0",
"GPL-1.0-or-later",
"GPL-3.0-only"
] | permissive | TG-Techie/CASPER | ec47dfbfd6c3a668739ff4d707572e0b853518b4 | 2575d3d35e7dbbd7f78110864e659e582c6f3c2e | refs/heads/master | 2020-12-19T12:43:53.825964 | 2020-01-23T17:24:04 | 2020-01-23T17:24:04 | 235,736,872 | 0 | 1 | MIT | 2020-01-23T17:09:19 | 2020-01-23T06:29:10 | Python | UTF-8 | Python | false | false | 1,690 | py | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed May 15 17:17:52 2013
# by: The Resource Compiler for PyQt (Qt v5.0.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x00\xae\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x04\x03\x00\x00\x00\xed\xdd\xe2\x52\
\x00\x00\x00\x0f\x50\x4c\x54\x45\xc0\xc0\xc0\xc0\xc0\xc0\xc0\xc0\
\xc0\xff\xff\xff\x00\x00\x00\x63\x34\x8b\x60\x00\x00\x00\x03\x74\
\x52\x4e\x53\x00\x01\x02\x0d\x63\x94\xb3\x00\x00\x00\x4b\x49\x44\
\x41\x54\x78\x5e\x3d\x8a\xc1\x0d\xc0\x30\x0c\x02\x1d\x89\x01\xba\
\x8b\x3d\x40\x54\xb3\xff\x4c\x05\xa7\x0a\x0f\x74\xe6\x1c\x41\xf2\
\x89\x58\x81\xcc\x7c\x0d\x2d\xa8\x50\x06\x96\xc0\x6a\x63\x9f\xa9\
\xda\x12\xec\xd2\xa8\xa5\x40\x03\x5c\x56\x06\xfc\x6a\xfe\x47\x0d\
\xb8\x2e\x50\x39\xde\xf1\x65\xf8\x00\x49\xd8\x14\x02\x64\xfa\x65\
\x99\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0d\
\x0f\x7f\xc5\x07\
\x00\x69\
\x00\x6e\x00\x74\x00\x65\x00\x72\x00\x76\x00\x69\x00\x65\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x12\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| [
"TGTechie01@gmail.com"
] | TGTechie01@gmail.com |
b102858ebb1f6eaeb466cde5026c9d514200742c | e69098785d34a36fa23be13d487b8717205291a3 | /src/controller.py | de21d1861e93de1e82640cdb48e8eb1723090f6e | [] | no_license | srodrigo23/monitor-server | 81dfbe3c99ad851671717e796fc291f955a8a165 | 0832f3c9095a6daca9165326d1f7e148c575e05b | refs/heads/main | 2023-08-27T17:21:10.631394 | 2021-09-30T21:36:29 | 2021-09-30T21:36:29 | 393,668,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,793 | py | from view.main_view import MainView
from settings import Settings
from video_reader import VideoReader
from launcher import launch_server
from launcher import launch_camera
import time
import os, signal
import socket
class Controller:
def __init__(self):
self.view = None # to control view
self.settings = Settings()
self.num_cams = self.settings.get_num_cams()
self.video_reader = VideoReader(self.settings.get_empty_video(), self.num_cams)
self.video_reader.start() # to change
self.server_process = None
self.server_connected = False
def kill_process(self, pid):
"""
Method to kill server process turning off the server
"""
os.kill(pid + 1, signal.SIGTERM)
def connect_to_server(self):
"""
Method to connect to server, connecting through sockets
"""
if self.server_process:
while not self.server_connected:
try:
self.socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.settings.get_host_address(), self.settings.get_port()))
time.sleep(2.0)
except socket.error as e:
print_log('e', f'Connection don\'t reached {str(e)}')
else:
self.server_connected = True
print('connected')
# TODO listen messages
else:
self.view.server_panel.show_error_message("Error connecting", "Imposible to connect to the server")
def launch_camera_process(self, video_name):
"""
Method to run a camera process and return process id to show in the screen
"""
sys_path = self.settings.get_system_camera_path()
videos_folder_path = self.settings.get_videos_path()
self.camera_process = launch_camera(sys_path,
os.path.join(videos_folder_path, video_name))
return self.camera_process.pid
def launch_server_process(self):
"""
Method to run a server and return process id to show in the screen
"""
sys_path = self.settings.get_system_server_path()
self.server_process = launch_server(sys_path)
return self.server_process.pid
def get_frame(self, id_camera):
return self.video_reader.get_frame_from_queue(id_camera)
def get_videos(self):
return self.settings.get_videos()
def set_view(self, view):
self.view = view
def get_time(self):
return time.strftime('%H:%M:%S')
def get_num_cams(self):
return self.num_cams
| [
"rodrigosergio93@gmail.com"
] | rodrigosergio93@gmail.com |
36533c180a2ec9b995e9d793f8ba329dcf6cffcf | d53b7b59471d7240327c80eebd0d5cfe9fd3e4bc | /Crash Course/ch11-Testing/test_name_function.py | 0ca89904afeb74624055becc2f1694602a56d945 | [] | no_license | jpc0016/Python-Examples | 5b2ed8b799dd6492d664eb7714cfe4d97b486dc5 | b93c3cd5e1c1b91079db2281137a8451f2566885 | refs/heads/master | 2021-06-16T12:20:48.439503 | 2021-04-04T15:24:32 | 2021-04-04T15:24:32 | 187,428,168 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,934 | py | # CH11 Example
#
# test_name_function.py
#
# Import unittest module to run test cases on get_formatted_name()
import unittest
from name_function import get_formatted_name
# Create class that contains series of tests for get_formatted_name(). Can be
# named anything. Must inherit from unittest.TestCase class in module unittest.
class NameTestCase(unittest.TestCase):
"""Tests for name_function.py"""
def test_first_last_name(self):
"""Do names like 'Janis Joplin' work?"""
formatted_name = get_formatted_name('janis', 'joplin')
# Asserting that formatted_name equals 'Janis Joplin'
self.assertEqual(formatted_name, 'Janis Joplin')
# Test for middle name functionality
def test_first_last_middle_name(self):
"""Do names like 'Wolfgang Amadeus Mozart' work?"""
formatted_name = get_formatted_name('wolfgang', 'mozart', 'amadeus')
self.assertEqual(formatted_name, 'Wolfgang Amadeus Mozart')
unittest.main()
# Output for first and last name only:
# .
# ----------------------------------------------------------------------
# Ran 1 test in 0.000s
#
# OK
# Changing the function in name_function.py to accept a middle name causes the test
# case to fail. Output is below:
# E
# ======================================================================
# ERROR: test_first_last_name (__main__.NameTestCase)
# Do names like 'Janis Joplin' work?
# ----------------------------------------------------------------------
# Traceback (most recent call last):
# File "test_name_function.py", line 16, in test_first_last_name
# formatted_name = get_formatted_name('janis', 'joplin')
# TypeError: get_formatted_name() missing 1 required positional argument: 'last'
#
# ----------------------------------------------------------------------
# Ran 1 test in 0.000s
#
# FAILED (errors=1)
| [
"jcox@battlestation.com"
] | jcox@battlestation.com |
1e106f2c8e6c57eaa478d34ce7183f5c95bbc13b | 3471b23756df18fb291de98a6a6571d6a8f6ae0f | /ipacdashboard/backendDash/social2/urls.py | e54312ca901281969f5462b56ddb499d22182809 | [] | no_license | yadavshashwat/ipacprojects | adef57c75d85b1fda3ee97dc27f0988fadf291b3 | dde58cbadfd1291a284546ed26229fd2ce183c5b | refs/heads/master | 2022-12-26T02:47:00.570514 | 2020-10-17T15:33:54 | 2020-10-17T15:33:54 | 268,454,578 | 0 | 1 | null | 2020-09-17T21:50:24 | 2020-06-01T07:31:50 | PHP | UTF-8 | Python | false | false | 440 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
urlpatterns=[
url(r'temp/$', 'social2.views.temp', name="temp"),
url(r'login/$', 'social2.views.login', name="login"),
url(r'post_user_token/$', 'social2.views.xhr_user_token', name="xhr_user_token"),
url(r'post_page_data/$', 'social2.views.xhr_page_data', name="xhr_page_data"),
url(r'pages/$', 'social2.views.page', name='page'),
]
| [
"y.shashwat@gmail.com"
] | y.shashwat@gmail.com |
2ddfb12cb3b096683e18865806a83cbe395abc6e | d0a1ddf0403b1b25f094902477897f3dcc282f8e | /quotebot.py | 90f9db11090a9db4b0cd9c0151aee356c267a331 | [] | no_license | cameronkc/discord-bot | b205b0657e82c1fe7624175aca5ae9d8ee359c63 | c7f37dba486af18ef0bb9bb13071c63928dd8e66 | refs/heads/main | 2023-08-07T12:48:11.308255 | 2021-09-20T22:36:22 | 2021-09-20T22:36:22 | 408,619,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,842 | py | import discord
from discord import message
from discord.ext import commands
import random
import os
#initialize bot
#Set command prefix
bot = commands.Bot(command_prefix = '.', description='.bothelp for commands')
#create event
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.idle, activity=discord.Game('your mom xD'))
print("Bot ready...")
#test command
@bot.command()
async def ping(ctx):
#retrieve latency of bot
latency = bot.latency
#send latency to user
await ctx.send(latency)
@bot.event
#print cooldown error
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandOnCooldown):
msg = "**I'm on cooldown**, please wait {:.2f}s daddy :)".format(error.retry_after)
await ctx.send(msg)
@bot.command()
#command list
async def bothelp(ctx):
await ctx.send(
"Here are a list of my commands:"
"\n"
"\n**.quote** - generate a random quote"
"\n**.addquote** - add your own quote"
"\n**.lastquote** - print the last added quote"
"\n**.randomnumber** - print a random number"
"\n**.rps** - play a game of rock, paper, scissors against me"
)
@bot.command()
#set quote command
async def addquote(ctx):
await ctx.send("Please enter your quote in the following format - 'quote.... - author'")
text_file = open("quotes.txt", "a")
msg = await bot.wait_for("message")
text_file.write(f"\n{msg.content}")
text_file.close
await ctx.send("Quote Added!")
@bot.command()
#retrieve last added
async def lastquote(ctx):
f=open("quotes.txt", "r")
lines = f.readlines()
await ctx.send(lines[-1])
f.close()
@bot.command()
#random number generator
async def randomnumber(ctx):
await ctx.send(f"The random number is {random.randint(0, 100)}")
moves = ['rock', 'paper', 'scissors']
#rock paper scissors
@bot.command()
async def rps(ctx):
game = True
global moves
await ctx.send("Game Started: Please type 'rock', 'paper', or 'scissors'")
player = await bot.wait_for("message")
computer = random.choice(moves)
if computer == player.content:
await ctx.send(f"The bot chose {computer}, it's a tie!")
elif computer == 'scissors' and player.content == 'paper':
await ctx.send(f"The bot chose {computer}, you lost!")
elif computer == 'paper' and player.content == 'rock':
await ctx.send(f"The bot chose {computer}, you lost!")
elif computer == 'rock' and player.content == 'scissors':
await ctx.send(f"The bot chose {computer}, you lost!")
elif player == 'quit':
await ctx.send("Qutting game, see you next time!")
else:
await ctx.send(f"The bot chose {computer}, you win!")
#set cooldwon
@bot.command()
@commands.cooldown(1, 3, commands.BucketType.user)
#quote command
async def quote(ctx):
f=open("quotes.txt", "r")
lines = f.readlines()
await ctx.send(random.choice(lines))
f.close()
#with open('badwords.txt', 'r') as f:
# global badwords
# words = f.read()
# badwords = words.split()
#
#with open('greetings.txt', 'r') as f:
# global greetings
# words = f.read()
# greetings = words.split()
#@bot.event
#async def on_message(message):
# if message.author == bot.user or message.author.bot:
# return
#
# for word in badwords:
# if word in message.content:
# await message.reply(f"Hey {message.author.mention}, don't fucking use that language you fat nasty cunt", mention_author=True)
# for word in greetings:
# if word in message.content:
# await message.reply(f"Hello {message.author.mention}, ur so sexy ;)", mention_author=True)
# await bot.process_commands(message)
#run event
bot.run('ODg4MTkwNjY0NDM1Njk5NzEy.YUPGBA.tVA4j3kkN_iwkdc_DMCfZC-iA_o')
| [
"cameronleekcmo@gmail.com"
] | cameronleekcmo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.