blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c144c9ebf7ac40827af104a5950dc340e65e4004
|
83d947dd8683ed447b6bdb9d15683109ca0195bc
|
/git_sew/ui/cli/containers/App.py
|
b38e83e444cd4ff51285d171725911c2c7266b75
|
[
"MIT"
] |
permissive
|
fcurella/git-sew
|
dda6b84a3b522bb1fc5982bfa610b174159cb691
|
920bc26125a127e257be3e37a9bf10cb90aa5368
|
refs/heads/master
| 2020-07-23T14:51:39.476225
| 2019-09-09T22:56:11
| 2019-09-10T15:45:11
| 207,599,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 475
|
py
|
import urwid
from urwid_pydux import ConnectedComponent
from git_sew.ui.cli.components.gitlogs import Footer, Loading
class App(ConnectedComponent):
def map_state_to_props(self, state, own_props):
return {"body": state["box"]}
def render_component(self, props):
if props["body"] is None:
body = Loading()
else:
body = props["body"]
return urwid.Padding(urwid.Frame(body, footer=Footer()), left=2, right=2)
|
[
"flavio.curella@gmail.com"
] |
flavio.curella@gmail.com
|
921bcd8a926822b2b2751dc1a9451a069fca8ce1
|
b8b3d78501b372ea3b721a286f22208069dbb5ff
|
/fexp/cache/cache_multiple_files.py
|
5601ef84cfc8364993d6a8817ea8b0d29fc88f25
|
[] |
no_license
|
vitoralbiero/drl_action_unit_detection
|
0ddce49c10b27925c712ef367f405650be58f5bd
|
d8eb9a35e74483ce581b637f761f343607f255bf
|
refs/heads/master
| 2020-05-02T08:58:13.639089
| 2019-06-13T17:16:52
| 2019-06-13T17:16:52
| 177,856,625
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
from .file_cache_manager import FileCacheManager
import numpy as np
from os import makedirs, path
class CacheMultipleFiles(FileCacheManager):
_directory_path = ''
def __init__(self, directory_path):
self._directory_path = directory_path
if not path.exists(directory_path):
makedirs(directory_path)
def contains(self, key):
key = self._file_path(key)
return path.exists(key)
def get(self, key):
key = self._file_path(key)
return np.load(key)
def add(self, key, value):
key = self._file_path(key)
np.save(key, value, allow_pickle=False)
def _file_path(self, key):
file_name = key + '.npy'
return path.join(self._directory_path, file_name)
|
[
"vitor_albiero@outlook.com"
] |
vitor_albiero@outlook.com
|
56ddc0dc1295c7e6d63d54f21fd0051d0310496f
|
db2898a94bafdba44b6d7639116a5794df13cf97
|
/lists/urls.py
|
39146046b0b9d855e86df929153a63b9f831f76e
|
[] |
no_license
|
martinnobis/tddwp-book
|
8287c3a9dc1d6c90c09887e0a6695e0afce679bb
|
4a35ba75e133d222321370c4b8955b6d472f6b42
|
refs/heads/master
| 2021-09-05T13:33:34.932540
| 2018-01-28T08:16:20
| 2018-01-28T08:16:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
"""superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from lists import views
urlpatterns = [
# MN: home_page is a function in views.py
# All of these start with a /lists/ (e.g. /lists/new)
url(r'^new$', views.new_list, name='new_list'),
url(r'^(\d+)/$', views.view_list, name='view_list'), # MN: (.+) is a capture group, which will get passed to the view as a parameter.
url(r'^users/(.+)/$', views.my_lists, name='my_lists'),
]
|
[
"martin.nobis@gmail.com"
] |
martin.nobis@gmail.com
|
46b665075a25c4fcc68e25e503e45bad689ec5ea
|
db38fcc061cad5201e7c281d66cb3cedd937a183
|
/tail.py
|
34a331cbedca5d877c03750bc9a2f247210243cc
|
[] |
no_license
|
Krishnasaianumandla/Dissection-comments
|
305d3547692268ec3e6aa71ad1cd2b0773068fd9
|
33f86dabac065440a124d6865d574610ab1e7ccb
|
refs/heads/main
| 2023-08-10T17:05:12.910575
| 2021-09-14T14:46:24
| 2021-09-14T14:46:24
| 406,406,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,163
|
py
|
"""Implementing the tail shell command in python."""
"""
Imports the sys library so that we can use the fuctions in it
"""
import sys
"""
from lib.helper module imports tail and readfile libraries
"""
from lib.helper import tail, readfile
"""
Value None is assigned to the variable TEXT of NoneType
"""
TEXT = None
"""
Number of command line arguments in the list is decremented by 1 and the value is
assigned to ARG_CNT variable
"""
ARG_CNT = len(sys.argv) - 1
"""
sys.stdin.read function reads lines from the console and assignes
them to TEXT variable if ARG_CNT = 0
"""
if ARG_CNT == 0:
TEXT = sys.stdin.read()
"""
Second argument on the command line is assigned to filename variable,
readfile function takes filename as argument and return value is
assigned to TEXT variable if ARG_CNT = 1
"""
if ARG_CNT == 1:
filename = sys.argv[1]
TEXT = readfile(filename)
"""
Prints the string argument in the print function if ARG_CNT is greater than 1
"""
if ARG_CNT > 1:
print("Usage: tail.py <file>")
"""
Calls the tail function with TEXT variable as argument and return value is printed
"""
print(tail(TEXT))
|
[
"noreply@github.com"
] |
noreply@github.com
|
745f90f519853d1de410ac75ee637f5d3b14f3a6
|
070b693744e7e73634c19b1ee5bc9e06f9fb852a
|
/python/problem-tree/maximum_width_of_binary_tree.py
|
a18203e5b1c59a32be6b1e9d83fef22553353874
|
[] |
no_license
|
rheehot/practice
|
a7a4ce177e8cb129192a60ba596745eec9a7d19e
|
aa0355d3879e61cf43a4333a6446f3d377ed5580
|
refs/heads/master
| 2021-04-15T22:04:34.484285
| 2020-03-20T17:20:00
| 2020-03-20T17:20:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
# https://leetcode.com/problems/maximum-width-of-binary-tree
# https://leetcode.com/problems/maximum-width-of-binary-tree/solution
from TreeNode import TreeNode
class Solution:
# Wrong Answer
def widthOfBinaryTree0(self, root):
if root is None:
return 0
print(root)
width, nodes, curDepth, q = 0, [], 0, [(0, root)]
while q:
depth, node = q.pop(0)
if depth != curDepth:
curDepth = depth
while nodes[0] is None:
nodes.pop(0)
while nodes[-1] is None:
nodes.pop()
width = max(width, len(nodes))
nodes = [node]
else:
nodes.append(node)
if node:
if node.left or node.right:
q.append((depth + 1, node.left))
q.append((depth + 1, node.right))
print(nodes)
while nodes[0] is None:
nodes.pop(0)
while nodes[-1] is None:
nodes.pop()
width = max(width, len(nodes))
return width
# Wrong Answer
def widthOfBinaryTree1(self, root):
if root is None:
return 0
if root.left is None and root.right is None:
return 1
def getWidth(width, minW, maxW):
if minW == maxW:
return max(width, 1)
return max(width, maxW - minW + 1)
width, curDepth, minW, maxW, q = 0, 0, 0, 0, [(0, 1, root)]
while q:
depth, pos, node = q.pop(0)
print(depth, pos, node.val)
if curDepth != depth:
width = getWidth(width, minW, maxW)
curDepth, minW, maxW = depth, pos, pos
else:
maxW = pos
if node.left:
q.append((depth + 1, pos * 2, node.left))
if node.right:
q.append((depth + 1, pos * 2 + 1, node.right))
width = getWidth(width, minW, maxW)
return width
# runtime; 40ms, 100.00%
# memory; 13MB, 100.00%
def widthOfBinaryTree(self, root):
if root is None:
return 0
nodesDict, prevDepth, q = {}, -1, [(0, 1, root)]
while q:
depth, pos, node = q.pop(0)
if prevDepth != depth:
prevDepth = depth
nodesDict[depth] = [pos, pos]
else:
nodesDict[depth][1] = pos
if node.left:
q.append((depth + 1, pos * 2, node.left))
if node.right:
q.append((depth + 1, pos * 2 + 1, node.right))
print(nodesDict)
return max([maxPos - minPos + 1 for minPos, maxPos in nodesDict.values()])
s = Solution()
root1 = TreeNode(1)
root1.left = TreeNode(3)
root1.right = TreeNode(2)
root1.left.left = TreeNode(5)
root1.left.right = TreeNode(3)
root1.right.right = TreeNode(9)
root2 = TreeNode(1)
root2.left = TreeNode(3)
root2.left.left = TreeNode(5)
root2.left.right = TreeNode(3)
root3 = TreeNode(1)
root3.left = TreeNode(3)
root3.right = TreeNode(2)
root3.left.left = TreeNode(5)
root4 = TreeNode(1)
root4.left = TreeNode(1)
root4.right = TreeNode(1)
root4.left.left = TreeNode(1)
root4.right.right = TreeNode(1)
root4.left.left.left = TreeNode(1)
root4.right.right.right = TreeNode(1)
root5 = TreeNode(1)
root6 = TreeNode(1)
root6.left = TreeNode(2)
root7 = TreeNode(1)
root7.left = TreeNode(3)
root7.right = TreeNode(2)
root7.left.left = TreeNode(5)
data = [(root1, 4),
(root2, 2),
(root3, 2),
(root4, 8),
(root5, 1),
(root6, 1),
(root7, 2),
]
for root, expected in data:
real = s.widthOfBinaryTree(root)
print('{}, expected {}, real {}, result {}'.format(root, expected, real, expected == real))
|
[
"agapelover4u@yahoo.co.kr"
] |
agapelover4u@yahoo.co.kr
|
ec70ce257331cb0ed165f2de96058639be4d4c96
|
6cb2f739aaf10feee77ad335de342146c10bfb5d
|
/pong_deep_rl.py
|
66ba0312716cd3ae068ecc4dc41203a543256027
|
[] |
no_license
|
Ultraleow/the-unbeatable-pong
|
b4c3af260a1431850610d94b4dd7a257ab53316b
|
89b7ac5d632986ade2fc2a5a6caa231ed29bb34b
|
refs/heads/main
| 2023-02-03T03:03:57.711572
| 2020-12-19T11:52:54
| 2020-12-19T11:52:54
| 323,026,707
| 5
| 0
| null | 2020-12-20T08:29:01
| 2020-12-20T08:29:00
| null |
UTF-8
|
Python
| false
| false
| 3,385
|
py
|
import gym
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
import numpy as np
UP_ACTION = 2
DOWN_ACTION = 3
env = gym.make("Pong-v0")
def create_model():
model = Sequential()
model.add(Dense(units=200, input_dim=80 * 80, activation='relu', kernel_initializer='glorot_uniform'))
model.add(Dense(units=1, activation='sigmoid', kernel_initializer='RandomNormal'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def preprocessing(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
I = I[35:195] # crop
I = I[::2, ::2, 0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
# I[I != 0] = 1 # everything else (paddles, ball) just set to 1
I = I / 255
return I.astype(np.float).ravel() # flattens
def discount_rewards(r, gamma):
""" take 1D float array of rewards and compute discounted reward """
r = np.array(r)
discounted_r = np.zeros_like(r)
running_add = 0
# we go from last reward to first one so we don't have to do exponentiations
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0 # if the game ended (in Pong), reset the reward sum
running_add = running_add * gamma + r[
t] # the point here is to use Horner's method to compute those rewards efficiently
discounted_r[t] = running_add
discounted_r -= np.mean(discounted_r) # normalizing the result
discounted_r /= np.std(discounted_r) # idem
return discounted_r
def main():
model = create_model()
observation = env.reset()
prev_input = None
gamma = 0.99
x_train, y_train, rewards = [], [], []
reward_sum = 0
episode_nb = 0
while (True):
# preprocess the observation, set input as difference between images
cur_input = preprocessing(observation)
x = cur_input - prev_input if prev_input is not None else np.zeros(80 * 80)
prev_input = cur_input
# forward the policy network and sample action according to the proba distribution
proba = model.predict(np.expand_dims(x, axis=1).T)
action = UP_ACTION if np.random.uniform() < proba else DOWN_ACTION
y = 1 if action == 2 else 0 # 0 and 1 are our labels
# log the input and label to train later
x_train.append(x)
y_train.append(y)
# do one step in our environment
observation, reward, done, info = env.step(action)
env.render()
rewards.append(reward)
reward_sum += reward
# end of an episode
if done:
print('At the end of episode', episode_nb, 'the total reward was :', reward_sum)
# increment episode number
episode_nb += 1
# training
model.fit(x=np.vstack(x_train), y=np.vstack(y_train), verbose=1,
sample_weight=discount_rewards(rewards, gamma))
# Reinitialization
x_train, y_train, rewards = [], [], []
observation = env.reset()
reward_sum = 0
prev_input = None
if __name__ == '__main__':
main()
|
[
"nghaosiong98@gmail.com"
] |
nghaosiong98@gmail.com
|
c64515dd8b96864dfa14c9209203695e3785e7f5
|
b72c42ba5edfb4f2ac743dab42575d8940c44e7d
|
/Slide_5.py
|
beee47a9bd6fc6f7bc673fe650329a6579301bf7
|
[] |
no_license
|
DimanLux/PYTHON100
|
67b6c368ee1a0dc7f8650c7f11806aee75650199
|
e519385c300190308ec643aad6a9ed8f402afaee
|
refs/heads/master
| 2020-04-19T13:35:44.540257
| 2019-02-03T12:31:45
| 2019-02-03T12:31:45
| 168,221,689
| 0
| 0
| null | 2019-01-29T20:15:21
| 2019-01-29T20:15:21
| null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
def math_1(x, y, z, f):
res = (((x * (y - x)) / z) + x + ((f + z) / (f ** y)) - ((z - f) / z)) / (((z + f) / z ** y) - f)
return res
if __name__ == "__main__":
print("output1 = " + str(math_1(5, 2.3, 2, 7.8)))
print("output2 = " + str(math_1(1234, 37872, 1231, 12314)))
|
[
"dm_0585@mail.ru"
] |
dm_0585@mail.ru
|
dde29523d724243a2dd12fdf0f4dccb9869f5742
|
3a5694978c42269129ab956bf8d44602709bd51f
|
/backend/auth/tests/test_urls.py
|
ce34fc922a3090d5a1121ff0fca1fd61d5763ea2
|
[] |
no_license
|
JimmeeX/react-native-django
|
f11dfd2425b6a4ad2bf98ff92f21c38a211090c9
|
6460bda8565ef244544ea2a8c900eebb1c6dab17
|
refs/heads/master
| 2023-08-14T06:58:25.010265
| 2020-11-18T09:19:45
| 2020-11-18T09:19:45
| 313,874,641
| 0
| 0
| null | 2021-09-22T19:40:12
| 2020-11-18T08:50:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 355
|
py
|
from django.test import SimpleTestCase
class TestUrls(SimpleTestCase):
# def test_list_url_is_resolved(self):
# self.assertEquals(1, 2, msg="Some message?")
# def test_failed_case(self):
# self.assertEquals(1, 2, msg="Test failed case")
def test_success_case(self):
self.assertEquals(2, 2, msg="Test success case")
|
[
"jameslin3118@gmail.com"
] |
jameslin3118@gmail.com
|
79041edf09726eb92000831b1ec4ce4f540981a1
|
b112367ff243a616bd41596ba50a51c215b977f2
|
/taobao_scrapper_extreme.py
|
84a003ef6f199ed7523b05473d149ac0c3610842
|
[
"MIT"
] |
permissive
|
marcozzxx810/TaobaoWebscrapper
|
cabd3da70befd0fc5be55f4444b2598e81614ad0
|
c7c50731478d733023e09336c724aef9a3342687
|
refs/heads/master
| 2021-02-15T10:53:47.046573
| 2020-03-04T13:06:48
| 2020-03-04T13:06:48
| 244,891,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,222
|
py
|
import os
import re
import json
import time
import random
import requests
import pandas as pd
from retrying import retry
import openpyxl
from login import TaoBaoLogin
requests.packages.urllib3.disable_warnings()
req_session = requests.Session()
GOODS_EXCEL_PATH = 'taobao_goods.xlsx'
DATE=time.strftime("%d-%b-%Y", time.gmtime())
class GoodsSpider:
def __init__(self, q):
self.q = q
self.timeout = 15
self.goods_list = []
tbl = TaoBaoLogin(req_session)
tbl.login()
@retry(stop_max_attempt_number=3)
def spider_goods(self, page):
"""
:param page: taobao page
:return:
"""
s = page * 44
search_url = f'https://s.taobao.com/search?initiative_id=tbindexz_20170306&ie=utf8&spm=a21bo.2017.201856-taobao-item.2&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&imgfile=&q={self.q}&suggest=history_1&_input_charset=utf-8&wq=biyunt&suggest_query=biyunt&source=suggest&bcoffset=4&p4ppushleft=%2C48&s={s}&data-key=s&data-value={s + 44}'
# not using coz i dont have proper
proxies = {'http': '118.24.172.149:1080',
'https': '60.205.202.3:3128'
}
headers = {
'referer': 'https://www.taobao.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
response = req_session.get(search_url, headers=headers,
verify=False, timeout=self.timeout)
# print(response.text)
goods_match = re.search(r'g_page_config = (.*?)}};', response.text)
if not goods_match:
print('提取页面中的数据失败!')
print(response.text)
raise RuntimeError
goods_str = goods_match.group(1) + '}}'
goods_list = self._get_goods_info(goods_str)
self._save_excel(goods_list)
def _get_goods_info(self, goods_str):
"""
analysis json file and grap the data we needed
:param goods_str: string
:return:
"""
goods_json = json.loads(goods_str)
goods_items = goods_json['mods']['itemlist']['data']['auctions']
goods_list = []
for goods_item in goods_items:
goods = {'title': goods_item['raw_title'],
'price': goods_item['view_price'],
'location': goods_item['item_loc'],
'sales': goods_item['view_sales'],
'comment_url': goods_item['comment_url']}
goods_list.append(goods)
return goods_list
def _save_excel(self, goods_list):
"""
save json to excel
:param goods_list: goods data
:param startrow:
:return:
"""
if os.path.exists(GOODS_EXCEL_PATH):
df = pd.read_excel(GOODS_EXCEL_PATH)
df = df.append(goods_list)
else:
df = pd.DataFrame(goods_list)
if not os.path.exists(GOODS_EXCEL_PATH):
writer = pd.ExcelWriter(GOODS_EXCEL_PATH)
df.to_excel(excel_writer=writer, columns=['title', 'price', 'location', 'sales', 'comment_url'], index=False,
encoding='utf-8', sheet_name=DATE+self.q)
writer.save()
writer.close()
else :
excel = openpyxl.load_workbook(GOODS_EXCEL_PATH)
writer = pd.ExcelWriter(GOODS_EXCEL_PATH, engine='openpyxl')
writer.book = excel
writer.sheets = dict((ws.title, ws) for ws in excel.worksheets)
df.to_excel(writer, columns=['title', 'price', 'location', 'sales', 'comment_url'], index=False,
encoding='utf-8', sheet_name=DATE+self.q)
writer.save()
writer.close()
def patch_spider_goods(self):
"""
patch goods slowly
:return:
"""
print(DATE+self.q)
if os.path.exists(GOODS_EXCEL_PATH):
excel_file = openpyxl.load_workbook(GOODS_EXCEL_PATH)
# print(excel_file.sheetnames())
if (DATE+self.q) in excel_file.sheetnames:
re_write_flage = str(input("TYPE LETTER T to rewrite, TYPE LETTER F to cancel: "))
if re_write_flage is 'F':
print("Script is terminated")
excel_file.close()
os._exit(0)
elif re_write_flage is 'T':
print("Script is going to rewrite")
excel_file.remove(excel_file[DATE+self.q])
excel_file.create_sheet(DATE+self.q)
excel_file.save(GOODS_EXCEL_PATH)
excel_file.close()
for i in range(0, 2):
print('第%d页' % (i + 1))
self.spider_goods(i)
time.sleep(random.randint(10, 15))
if __name__ == '__main__':
gs = GoodsSpider('Xiaomi10')
gs.patch_spider_goods()
time.sleep(random.randint(30, 60))
gs = GoodsSpider('Xiaomi10Pro')
gs.patch_spider_goods()
|
[
"marcowingwing@gmail.com"
] |
marcowingwing@gmail.com
|
767a5c36ac1b28c136a2addb2cd1ae4cd12a0895
|
6e9872b3f9d1bfb62f5958ddedeff26b1a852189
|
/Prg1/server.py
|
b32d5177178abe0d10287b08ae62e3a8122427e4
|
[] |
no_license
|
rat17sri/Python-Socket-Programming
|
4f8a985e164a1785668c694bf4206f6aa84832fb
|
d1dd89bd9be7e8cd8114a9e28eb1ad227afd43f9
|
refs/heads/master
| 2020-04-07T20:46:03.741368
| 2019-01-16T17:14:01
| 2019-01-16T17:14:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
import socket
# import the socket library
s = socket.socket()
# Created the object 's' of function socket() of socket library
print "Socket Succesfully Created"
# Denotion for successful object creation
port = 12345
# using a random port > 1023 as port number ranges from 0 - 65535 and 0 - 1023 are reserved so a specfic port has to be choosen above the range.
host = '127.0.0.1'
# address of the client computer for local server
s.bind((host, port))
# It binds address and port number to socket and it denotes that the server is now ready to accept the request from client
print "Socket binded to %s " %(port)
# Denotion of successful binding of port and host and printing the port number as well.
s.listen(5)
# Server is in listening mode i.e. the server is ready to be in the communication with the client.
print "socket is listening"
# Denotion for the server is in listen mode
while True:
c , addr = s.accept()
# When the client requests for connection to the server then the server accept the request using function s.accept().
# This function returns tuple c and addr where c is the new socket object used for sending-receiving data on the connection and addr is the address bound to the socket on client side connection.
print "Got connection from" , addr
# Prints the host name and port number
c.send("Hello World")
# Sending data to the client
c.close()
# Close the connection from the client
|
[
"noreply@github.com"
] |
noreply@github.com
|
8c98c7208b6b1c9f418875ff9aac783f045b81f4
|
2937ae94f4312b2bf6be024be9d1c03c6cff0523
|
/python/black-white-captcha.py
|
3db96762c6c7e95dce1f23cdfba6a80532a1f203
|
[
"MIT"
] |
permissive
|
niraj-lal-rahi/scrapper-laravel-python
|
52d05ec5e8426a8f690a52fce9fcb1525ac9d88e
|
7cc24feda17548bbcfa89449ea7a2387dd958979
|
refs/heads/master
| 2023-01-07T12:57:22.095388
| 2020-11-12T17:20:06
| 2020-11-12T17:20:06
| 291,505,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# import requests
# api_key = "364d902ab388957"
# payload = {'apikey':api_key , "OCREngine": 2}
# f_path = "screenshot.png"
# with open(f_path, 'rb') as f:
# j = requests.post('https://api.ocr.space/parse/image', files={f_path: f}, data=payload).json()
# if j['ParsedResults']:
# print(j['ParsedResults'][0]['ParsedText'])
# print("end")
# import requests
# api_key = "364d902ab388957"
# img_url = "https://i.stack.imgur.com/022oK.jpg"
# url = f"https://api.ocr.space/parse/imageurl?apikey={api_key}&url={img_url}&OCREngine=2"
# j = requests.get(url).json()
# if j['ParsedResults']:
# print(j['ParsedResults'][0]['ParsedText'])
from bs4 import BeautifulSoup
import requests
# lists
urls=[]
# function created
def scrape(site):
# getting the request from url
r = requests.get(site)
# converting the text
s = BeautifulSoup(r.text,"html.parser")
for i in s.find_all("a"):
href = i.attrs['href']
if href.startswith("/"):
site = site+href
if site not in urls:
urls.append(site)
print(site)
# calling it self
scrape(site)
# main function
if __name__ =="__main__":
# website to be scrape
site="http://example.webscraping.com//"
# calling function
scrape(site)
|
[
"krniraj007@gmail.com"
] |
krniraj007@gmail.com
|
28a5b9492df7e5c77f28f783a5b250602b9bcf95
|
2dd0999515779e710dd27fc68e7541e30f8d549d
|
/venv/bin/http
|
13ebdff3cc3a76a10c746af79bf55264d87facc9
|
[] |
no_license
|
iahnglish/exjun-flasky
|
bc1b364a68218a86472d5d3dc69d103f12b955a1
|
0227300edd9c3787131901f25e9c95c742f2e67f
|
refs/heads/master
| 2021-01-20T14:15:48.037530
| 2017-05-08T02:33:47
| 2017-05-08T02:33:47
| 90,577,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
#!/Users/exjun/Desktop/Python/myproject/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from httpie.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"iahnglish@gmail.com"
] |
iahnglish@gmail.com
|
|
6b6866ccaff5a37f66771ccb14633bec1c330e5a
|
03bf7975b4fe6abf0ac20dffdb22a69ffc16fbb0
|
/wikipedia/__init__.py
|
8e4106956c471a2e232b199a28de6b29aeb7a7d6
|
[] |
no_license
|
AbcSxyZ/radio-parser
|
6ab178878b715070f02db519e1b0de0f542386a0
|
3953b30b20bca5d25a1a56c13c4cf7b25b9b0418
|
refs/heads/master
| 2023-05-26T05:13:50.414932
| 2023-05-23T08:39:21
| 2023-05-23T08:39:21
| 273,698,217
| 0
| 0
| null | 2023-05-23T08:39:22
| 2020-06-20T11:51:44
|
Python
|
UTF-8
|
Python
| false
| false
| 411
|
py
|
import logging
LOG_FILE = "wikipedia.log"
def setup_logger():
logger = logging.getLogger("wiki")
logger.setLevel(logging.DEBUG)
#Setup log format
format_log = logging.Formatter("%(levelname)s:%(process)s: %(message)s")
#Setup log file
log_file = logging.FileHandler(LOG_FILE)
log_file.setFormatter(format_log)
logger.addHandler(log_file)
return logger
setup_logger()
|
[
"rossi.sim@outlook.com"
] |
rossi.sim@outlook.com
|
cf034327ad61b8216fbb305045967563cb7bb817
|
22e9348f150bb9ce9a0764fe42ba3dcf11832021
|
/env/lib/python2.7/abc.py
|
9a1dec7df3fd00273a6c54648a2b7d273bd9b1ea
|
[] |
no_license
|
chinmay60/Activity-Tracking
|
06d8dc2944e4f6b4266aaa25198718e2ab3b4e8c
|
304233d00910f310cd59bc133f593b3593ae4f1f
|
refs/heads/master
| 2023-02-09T22:54:50.113561
| 2021-01-05T23:32:54
| 2021-01-05T23:32:54
| 302,783,949
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 97
|
py
|
/usr/local/Cellar/python@2/2.7.15_3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/abc.py
|
[
"chinmayvinchurkar24@gmail.com"
] |
chinmayvinchurkar24@gmail.com
|
17f859589c603a22117367624010559c8063f80b
|
f6a8d93c0b764f84b9e90eaf4415ab09d8060ec8
|
/Functions/orders.py
|
7fef95d38a0f0303db054de91a1ee188f9750e62
|
[] |
no_license
|
DimoDimchev/SoftUni-Python-Fundamentals
|
90c92f6e8128b62954c4f9c32b01ff4fbb405a02
|
970360dd6ffd54b852946a37d81b5b16248871ec
|
refs/heads/main
| 2023-03-18T17:44:11.856197
| 2021-03-06T12:00:32
| 2021-03-06T12:00:32
| 329,729,960
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
def order(product, times_ordered):
final_price = 0
for i in range (times_ordered):
if product == "coffee":
final_price += 1.50
elif product == "water":
final_price += 1.00
elif product == "coke":
final_price += 1.40
elif product == "snacks":
final_price += 2.00
return final_price
product_input = input()
number = int(input())
print('{0:.2f}'.format(order(product_input, number)))
|
[
"noreply@github.com"
] |
noreply@github.com
|
eaee1a4301e5e38b6c950cf1d1f1dd928ffb4550
|
ad4c4788a98351e9239007891865254e6b9ac8bc
|
/blog_project/blog/migrations/0002_auto_20200905_0915.py
|
56fdfbf6f263e94e637934f3eabce43c73f49b33
|
[] |
no_license
|
rajarawal/blog
|
172a37362b1b7accb9939e73bbf3925cbb59c7d5
|
3dd7ff68cd879b3d788f2f940b920f7d58b13847
|
refs/heads/master
| 2022-12-16T18:01:34.037417
| 2020-09-09T08:25:45
| 2020-09-09T08:25:45
| 294,036,769
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
# Generated by Django 3.1 on 2020-09-05 03:45
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateTimeField(default=datetime.datetime(2020, 9, 5, 3, 45, 17, 296061, tzinfo=utc)),
),
]
|
[
"rajarawal@outlook.com"
] |
rajarawal@outlook.com
|
d07fd4eba40be4e6d660ea3cad36659836d2690b
|
65f3167355b658954928fe38514135766e2ab1d1
|
/website/migrations/0024_members_firstname.py
|
8687ea8634681d30923a1433663658211d05f953
|
[] |
no_license
|
jeet15/delhiortho
|
fa669c44aac3e4767f7d2f668f41e45fc00b529b
|
4925ba66847c5289c41f9cb775865b345a0bec0d
|
refs/heads/master
| 2023-07-31T01:16:51.837231
| 2020-07-27T12:08:58
| 2020-07-27T12:08:58
| 278,882,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
# Generated by Django 3.0.8 on 2020-07-26 08:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0023_members_email'),
]
operations = [
migrations.AddField(
model_name='members',
name='firstname',
field=models.CharField(default=True, max_length=255),
preserve_default=False,
),
]
|
[
"gurjeetsingh1506@gmail.com"
] |
gurjeetsingh1506@gmail.com
|
99518c9b227d4932dd9ce324f21f34c350249ea3
|
8e29c4109010a0e002b17a13adbeb4534f8589e1
|
/old/podesta-urlscrape.mt2.py
|
c8f197cd126008a318b3f405e65376db269d0a5a
|
[
"CC0-1.0"
] |
permissive
|
vs49688/dnc-downloader
|
264385a92d69b98e392a6ee99d0823bf5f70768b
|
4f5bcec544457cd2095ea6c132cc7edfbec5eece
|
refs/heads/master
| 2021-01-19T03:26:44.022691
| 2020-12-09T14:24:33
| 2020-12-13T14:34:53
| 64,003,093
| 13
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,786
|
py
|
#!/usr/bin/env python3
# SPDX-License-Identifier: CC0-1.0
##
# Scrape the actual PDF url from
# https://wikileaks.org/clinton-emails/emailid/{id}
#
# Stored results in clintonurls.json. This is idempotent, just
# restart it if it fails.
#
# At the end, it will spit out an aria2 input file to download
# everything.
##
import os.path
import urllib.parse
import json
import concurrent.futures
import traceback
import threading
import time
import wikileaks
import shlex
import sqlite3
db = sqlite3.Connection('wikileaks.db', check_same_thread=False)
done = False
mutex = threading.Lock()
def get_url_proc(id: int, pool: concurrent.futures.Executor):
while True:
try:
info = wikileaks.head_podesta_eml(id)
break
except urllib.error.HTTPError as e:
print(f'{id}: Caught exception {e}, retrying')
#pool.submit(get_url_proc, id, pool)
#return
except:
traceback.print_exc()
return
print(f'{id}: {info}')
mutex.acquire()
try:
cur = db.cursor()
cur.execute('INSERT INTO podesta_emails(id, path, url) VALUES (?, ?, ?)', (
info.id, info.path, info.url
))
except:
traceback.print_exc()
return
finally:
mutex.release()
needed = set()
cur = db.cursor()
try:
for i in range(1, wikileaks.COUNT_PODESTA + 1):
if not cur.execute('SELECT id FROM podesta_emails WHERE id = ?', (i,)).fetchone():
needed.add(i)
finally:
cur.close()
def checkpoint():
while not done:
time.sleep(10)
print('~10 seconds passed, checkpointing')
mutex.acquire()
try:
db.commit()
finally:
mutex.release()
cpthread = threading.Thread(target=checkpoint, daemon=False)
cpthread.start()
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as pool:
for i in needed:
pool.submit(get_url_proc, i, pool)
done = True
cpthread.join()
db.close()
|
[
"zane@zanevaniperen.com"
] |
zane@zanevaniperen.com
|
b5a93fc345ba8f35ee02a1700dd57d7007a40ee6
|
24b46a2abaf51140a8c5eef27b8ce1d2e0653e40
|
/scraper.py
|
46376af48165053b1483a56ba8298199ff903d4a
|
[] |
no_license
|
bbq12340/kakao
|
8491232fd56ed41d737499d1336c1294a3c7f69b
|
0b14e2a6c39d77ef61f01413cbe07ae59f7084cd
|
refs/heads/main
| 2023-03-21T05:45:51.936842
| 2021-03-15T00:24:32
| 2021-03-15T00:24:32
| 347,788,300
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,217
|
py
|
import requests
import pandas as pd
import time
def functionA(query, p):
url = 'https://m.map.kakao.com/actions/searchJson'
payload = {
"type": "PLACE",
"q": query,
"wxEnc": "LVSOTP",
"wyEnc": "QNLTTMN",
"pageNo": p,
"sort": "0"
}
my_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
"Host": "m.map.kakao.com",
"Referer": f"https://m.map.kakao.com/actions/searchView?q={query}&wxEnc=LVSOTP&wyEnc=QNLTTMN"
}
r = requests.get(url, params=payload, headers=my_headers)
if r.status_code == 200:
place_list = r.json()["placeList"]
else:
place_list = []
return place_list
def request_kakao(query, p):
url = 'https://search.map.daum.net/mapsearch/map.daum'
payload = {
"q": query,
"msFlag": "S",
"page": p,
"sort": "0"
}
my_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.192 Safari/537.36",
"Host": "search.map.daum.net",
"Referer": "https://map.kakao.com/"
}
r = requests.get(url, params=payload, headers=my_headers)
return r
def extract_kakao(query, p):
result = []
r = request_kakao(query, p)
if r.status_code == 200:
place_list = r.json().get("place")
for place in place_list:
cat = (",").join(
[place[f"cate_name_depth{i}"] for i in range(1, 6)]).rstrip(",")
data = {
"업체명": place["name"], # 업체명
"별점": place["rating_average"], # 별점
"방문뷰": place["rating_count"], # 방문뷰
"블로그리뷰": place["reviewCount"], # 블로그리뷰
"업종": cat, # 업종,
"전화번호": place["tel"], # 전화번호
"지번": place["address"], # 지번
"도로명": place["new_address"] # 도로명
}
result.append(data)
return result
def start_scraping(query, target=0, delay=0):
l = 0
p = 1
encoded_query = query.encode("utf-8")
if target == 0:
r = request_kakao(encoded_query, 1)
target = int(r.json().get("place_totalcount"))
print(target)
while l < target:
data = extract_kakao(encoded_query, p)
print(p)
if len(data) == 0:
print("무슨 일이지 시발?")
break
p = p + 1
df = pd.DataFrame(data, columns=["name", "rating", "rating_count",
"review_count", "reviewCount", "category", "tel", "address", "new_address"])
df.to_csv(f"{query}.csv", mode="a", encoding="utf-8-sig",
header=False, index=False)
l = l + len(data)
time.sleep(delay)
if l >= target:
print("끝!")
break
if __name__ == '__main__':
# start_scraping("마포구 맛집", target=0, delay=1)
r = request_kakao("강남구 맛집".encode("utf-8"), 34)
print(r.text)
|
[
"bbq12340@hotmail.com"
] |
bbq12340@hotmail.com
|
16ddd801db805b58c91e02ddca2c4973ad471372
|
d56fac1a53753396e8dfd43bc9061a13444b2c87
|
/Hades_1/dlg_coeficientes/controlador/cnt_coeficientes.py
|
1947daaca6c3df7fd60c842ab6fe078382d5ac25
|
[] |
no_license
|
PedroBiel/Hades
|
5c7a7f04e7e1521038ff0d296d41784cadebc3ad
|
d36839995c269574d36afb0dc28e21db3502dd6c
|
refs/heads/main
| 2023-06-23T17:42:50.230412
| 2021-07-27T10:02:09
| 2021-07-27T10:02:09
| 389,928,637
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,342
|
py
|
# -*- coding: utf-8 -*-
"""
Controlador de coeficientes
Created on Wed Oct 28 14:49 2020
__author__ = Pedro Biel
__version__ = 0.0.0
__email__ = pbiel@taimweser.com
"""
from dlg_coeficientes.datos.datos_coeficientes import Unicos
from dlg_coeficientes.modelo.mdl_tablemodeleditable import PandasModelEditable
class CntCoeficientes:
"""Controlador de los coeficientes de mayoración y del redondeo."""
def __init__(self, ventana):
"""Crea la ventana de MainWindow."""
self.v = ventana
def crea_coeficientes(self):
"""
Crea el df_coeficientes con los nudos únicos del DataFrame df_apoyos y
asigna dichos valores a un modelo.
Muestra el modelo en el diálogo de grupos.
"""
# Status bar.
text = 'Creando los coeficientes de mayoración y de redondeo.'
self.v.status_bar(text)
# Asigna valores de df_apoyos a df_coeficientes y de df_coeficientes al modelo.
equals = self.compara_dataframes_apoyos()
if equals: # Los DataFrames son iguales, self.v.df_coeficientess no se modifica.
if self.v.df_coeficientes.empty: # Si es la primera vez que se llama a los coeficientes.
self.v.df_coeficientes = self.get_coeficientes()
else: # Los DataFrames no son iguales, self.v.df_coeficientes se modifica.
self.v.df_coeficientes = self.get_coeficientes()
# Se hace una nueva copia de df_apoyos para que la próxima vez que
# se llame a las ruedas sean iguales df_apoyos y df_apoyos_prev
# y se conserven los cambios en coeficientes.
self.v.df_apoyos_prev = self.v.df_apoyos.copy()
model = self.get_modelo(self.v.df_coeficientes)
# Salida en el diálogo.
self.v.call_dialogo_coeficientes(self.v.df_coeficientes, model)
# Status bar.
text = 'Coeficientes de mayoración y de redondeo creados.'
self.v.status_bar(text)
def compara_dataframes_apoyos(self):
"""
Compara el DataFrame Apoyos con el DataFrame previo.
Si son iguales retorna True, si no lo son, retorna False.
"""
equals = self.v.df_apoyos.equals(self.v.df_apoyos_prev)
# print('\nequals:', equals)
return equals
def dataframe_coeficientes(self):
"""
Sea df_apoyos el DataFrame con los apoyos obtiene un nuevo DataFrame
con los valores únicos del DataFrame de los apoyos.
"""
try:
unicos = Unicos(self.v.pd, self.v.df_grupos)
df_unicos = unicos.get_df()
except Exception as e:
print('Exception en CntCoeficientes.dataframe_coeficientes():', e)
return df_unicos
def get_coeficientes(self):
"""Getter del DataFrame con las ruedas."""
df_coeficientes = self.dataframe_coeficientes()
return df_coeficientes
def get_modelo(self, df):
"""Getter del modelo con los datos del DataFrame."""
model = PandasModelEditable(df)
return model
|
[
"noreply@github.com"
] |
noreply@github.com
|
3f5447789a83847dcf555f556eaf2067a532731c
|
c06c2c4e084dad8191cbb6fb02227f7b05ba86e7
|
/chat/extras/output/output_adapter.py
|
33c6cd8c7c3458943de637f070c279efad3b0687
|
[] |
no_license
|
thiagorocha06/chatbot
|
053c525d0c6d037570851411618f3cb1186b32b4
|
2d22a355926c50d9b389d3db883f435950b47a77
|
refs/heads/master
| 2020-03-24T14:31:59.134462
| 2018-07-29T14:42:55
| 2018-07-29T14:42:55
| 142,770,645
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
from chat.extras.adapters import Adapter
class OutputAdapter(Adapter):
"""
A generic class that can be overridden by a subclass to provide extended
functionality, such as delivering a response to an API endpoint.
"""
def process_response(self, statement, session_id=None):
"""
Override this method in a subclass to implement customized functionality.
:param statement: The statement that the chat bot has produced in response to some input.
:param session_id: The unique id of the current chat session.
:returns: The response statement.
"""
return statement
|
[
"thiagorocha06@gmail.com"
] |
thiagorocha06@gmail.com
|
5b9498a265068ebc80293b329f26e0f90eb756c8
|
c0816431d4a00b8cab6a5c264261b5e94aefc0a1
|
/ui.py
|
556dc4c07fc3b0dfd455d71bd13fa9b1ff3e1b32
|
[] |
no_license
|
antonl/pyDAQmx
|
4345a375d2b5c5779aacc53b7cda7d8bda5911f0
|
072140e57860f509887ddb9f28691d407ec89e16
|
refs/heads/master
| 2021-01-22T03:08:38.660412
| 2013-06-14T19:38:37
| 2013-06-14T19:38:37
| 9,877,515
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,319
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '2chan_scope_ui.ui'
#
# Created: Tue Jun 11 19:23:20 2013
# by: PyQt4 UI code generator 4.9.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.setWindowModality(QtCore.Qt.ApplicationModal)
MainWindow.resize(800, 600)
MainWindow.setUnifiedTitleAndToolBarOnMac(False)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.graphicsView = PlotWidget(self.centralwidget)
self.graphicsView.setObjectName(_fromUtf8("graphicsView"))
self.verticalLayout.addWidget(self.graphicsView)
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setMinimumSize(QtCore.QSize(0, 100))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_2 = QtGui.QGroupBox(self.groupBox)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_2)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.stacked_plot = QtGui.QRadioButton(self.groupBox_2)
self.stacked_plot.setObjectName(_fromUtf8("stacked_plot"))
self.buttonGroup = QtGui.QButtonGroup(MainWindow)
self.buttonGroup.setObjectName(_fromUtf8("buttonGroup"))
self.buttonGroup.addButton(self.stacked_plot)
self.verticalLayout_2.addWidget(self.stacked_plot)
self.xy_plot = QtGui.QRadioButton(self.groupBox_2)
self.xy_plot.setObjectName(_fromUtf8("xy_plot"))
self.buttonGroup.addButton(self.xy_plot)
self.verticalLayout_2.addWidget(self.xy_plot)
self.gridLayout.addWidget(self.groupBox_2, 0, 2, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth())
self.groupBox_3.setSizePolicy(sizePolicy)
self.groupBox_3.setMinimumSize(QtCore.QSize(0, 0))
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label = QtGui.QLabel(self.groupBox_3)
self.label.setMargin(8)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox_3)
self.label_2.setMargin(8)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1)
self.iOffsetBox = QtGui.QDoubleSpinBox(self.groupBox_3)
self.iOffsetBox.setMinimumSize(QtCore.QSize(100, 0))
self.iOffsetBox.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.iOffsetBox.setMinimum(-100.0)
self.iOffsetBox.setMaximum(100.0)
self.iOffsetBox.setObjectName(_fromUtf8("iOffsetBox"))
self.gridLayout_2.addWidget(self.iOffsetBox, 0, 1, 1, 1)
self.qOffsetBox = QtGui.QDoubleSpinBox(self.groupBox_3)
self.qOffsetBox.setMinimumSize(QtCore.QSize(100, 0))
self.qOffsetBox.setMinimum(-100.0)
self.qOffsetBox.setMaximum(100.0)
self.qOffsetBox.setObjectName(_fromUtf8("qOffsetBox"))
self.gridLayout_2.addWidget(self.qOffsetBox, 1, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox_3, 0, 3, 1, 3)
self.groupBox_4 = QtGui.QGroupBox(self.groupBox)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.startButton = QtGui.QPushButton(self.groupBox_4)
self.startButton.setObjectName(_fromUtf8("startButton"))
self.verticalLayout_3.addWidget(self.startButton)
self.gridLayout.addWidget(self.groupBox_4, 0, 7, 1, 1)
self.groupBox_5 = QtGui.QGroupBox(self.groupBox)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.qGainBox = QtGui.QDoubleSpinBox(self.groupBox_5)
self.qGainBox.setMinimumSize(QtCore.QSize(100, 0))
self.qGainBox.setObjectName(_fromUtf8("qGainBox"))
self.gridLayout_3.addWidget(self.qGainBox, 1, 1, 1, 1)
self.iGainBox = QtGui.QDoubleSpinBox(self.groupBox_5)
self.iGainBox.setMinimumSize(QtCore.QSize(100, 0))
self.iGainBox.setObjectName(_fromUtf8("iGainBox"))
self.gridLayout_3.addWidget(self.iGainBox, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox_5)
self.label_3.setMargin(8)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_3.addWidget(self.label_3, 0, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox_5)
self.label_4.setMargin(8)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_3.addWidget(self.label_4, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_5, 0, 6, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 0, 8, 1, 1)
self.verticalLayout.addWidget(self.groupBox)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "2 Chan Scope", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("MainWindow", "Settings", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("MainWindow", "Plot Type", None, QtGui.QApplication.UnicodeUTF8))
self.stacked_plot.setText(QtGui.QApplication.translate("MainWindow", "Stacked Plot", None, QtGui.QApplication.UnicodeUTF8))
self.xy_plot.setText(QtGui.QApplication.translate("MainWindow", "XY Plot", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("MainWindow", "Vertical Offsets", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "I offset", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "Q offset", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("MainWindow", "Controls", None, QtGui.QApplication.UnicodeUTF8))
self.startButton.setText(QtGui.QApplication.translate("MainWindow", "Pause", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_5.setTitle(QtGui.QApplication.translate("MainWindow", "Gains", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "I Gain", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "Q Gain", None, QtGui.QApplication.UnicodeUTF8))
from pyqtgraph import PlotWidget
|
[
"anton.loukianov@gmail.com"
] |
anton.loukianov@gmail.com
|
ce3a31a37fad7413fec2ae08f830708f46ff664b
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/3/iju.py
|
64d6a1b3ccf14cabc615879040affc0ee38aa5ae
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'iJU':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
c5d8272bd09d323e174bede2869a37459b35b2d8
|
d11d111ea18c64f7f63769e96a3bfe0c8d5105b3
|
/manage.py
|
6f34b7c7204240e85ff6a4dfb5c4ae71eb730f82
|
[] |
no_license
|
PR-Juyoung/GJAI_web_stu
|
6d509d6c41810f7ee08fbe4a705a68b41382783c
|
34b31690773b20a4c73689022e0a2608e520f0cf
|
refs/heads/master
| 2023-08-11T05:35:23.776388
| 2021-09-14T07:07:38
| 2021-09-14T07:07:38
| 381,273,082
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GJAI_web_2.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"9862wndud@gmail.com"
] |
9862wndud@gmail.com
|
437a0dd39544857d6737b5b81f1e3bc8c3e87655
|
24dfdb37c79f62cd54689f71f003730e1c341b06
|
/basic/openVC/face_recog.py
|
c6006cfeab95544dfe668ed29f47d99a5cc45734
|
[] |
no_license
|
pois2000/project
|
b324340d5bbb06d081c7de0695c7a79c62337027
|
d45bd387155f0e225c6e5b234bb88db7f528c8c0
|
refs/heads/master
| 2022-05-27T23:48:28.756806
| 2022-05-01T02:20:11
| 2022-05-01T02:20:11
| 93,464,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,221
|
py
|
import numpy as np
import cv2
import sys, getopt
# from cv2.cv import *
# faceCascade = cv2.CascadeClassifier('D:\opencv\data\haarcascades\haarcascade_frontalface_default.xml')
# eyeCascade = cv2.CascadeClassifier('D:\opencv\data\haarcascades\haarcascade_eye.xml')
#-----------------------------------------------------------------------------
# Load and configure Haar Cascade Classifiers
#-----------------------------------------------------------------------------
# location of OpenCV Haar Cascade Classifiers:
baseCascadePath = 'D:\opencv\data\haarcascades'
# xml files describing our haar cascade classifiers
faceCascadeFilePath = baseCascadePath + 'haarcascade_frontalface_default.xml'
noseCascadeFilePath = baseCascadePath + 'haarcascade_mcs_nose.xml'
# build our cv2 Cascade Classifiers
faceCascade = cv2.CascadeClassifier(faceCascadeFilePath)
noseCascade = cv2.CascadeClassifier(noseCascadeFilePath)
#-----------------------------------------------------------------------------
# Load and configure mustache (.png with alpha transparency)
#-----------------------------------------------------------------------------
# Load our overlay image: mustache.png
imgMustache = cv2.imread('mustache.png',-1)
# Create the mask for the mustache
orig_mask = imgMustache[:,:,3]
# Create the inverted mask for the mustache
orig_mask_inv = cv2.bitwise_not(orig_mask)
# Convert mustache image to BGR
# and save the original image size (used later when re-sizing the image)
imgMustache = imgMustache[:,:,0:3]
origMustacheHeight, origMustacheWidth = imgMustache.shape[:2]
#-----------------------------------------------------------------------------
# Main program loop
#-----------------------------------------------------------------------------
# collect video input from first webcam on system
video_capture = cv2.VideoCapture(0)
while True:
# Capture video feed
ret, frame = video_capture.read()
# Create greyscale image from the video feed
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in input video stream
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CV_HAAR_SCALE_IMAGE
# flags = 0
)
# Iterate over each face found
for (x, y, w, h) in faces:
# Un-comment the next line for debug (draw box around all faces)
# face = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
# Detect a nose within the region bounded by each face (the ROI)
nose = noseCascade.detectMultiScale(roi_gray)
for (nx,ny,nw,nh) in nose:
# Un-comment the next line for debug (draw box around the nose)
#cv2.rectangle(roi_color,(nx,ny),(nx+nw,ny+nh),(255,0,0),2)
# The mustache should be three times the width of the nose
mustacheWidth = 3 * nw
mustacheHeight = mustacheWidth * origMustacheHeight / origMustacheWidth
# Center the mustache on the bottom of the nose
x1 = nx - (mustacheWidth/4)
x2 = nx + nw + (mustacheWidth/4)
y1 = ny + nh - (mustacheHeight/2)
y2 = ny + nh + (mustacheHeight/2)
# Check for clipping
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
# Re-calculate the width and height of the mustache image
mustacheWidth = x2 - x1
mustacheHeight = y2 - y1
# Re-size the original image and the masks to the mustache sizes
# calcualted above
mustache = cv2.resize(imgMustache, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
# take ROI for mustache from background equal to size of mustache image
roi = roi_color[y1:y2, x1:x2]
# roi_bg contains the original image only where the mustache is not
# in the region that is the size of the mustache.
roi_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# roi_fg contains the image of the mustache only where the mustache is
roi_fg = cv2.bitwise_and(mustache,mustache,mask = mask)
# join the roi_bg and roi_fg
dst = cv2.add(roi_bg,roi_fg)
# place the joined image, saved to dst back over the original image
roi_color[y1:y2, x1:x2] = dst
break
# Display the resulting frame
cv2.imshow('Video', frame)
# press any key to exit
# NOTE; x86 systems may need to remove: &amp;amp;amp;amp;amp;amp;quot;&amp;amp;amp;amp;amp;amp;amp; 0xFF == ord('q')&amp;amp;amp;amp;amp;amp;quot;
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
|
[
"pois2000@gmail.com"
] |
pois2000@gmail.com
|
39fe66b6f1dcefaec65de082d6af8a0c15789557
|
e77a7cc1ed343a85662f0ad3c448a350ab776261
|
/data_structures/array/number_of_1_in_sorted_array.py
|
79689872fbb5d35fdec0a24168779d5ce80f4454
|
[
"MIT"
] |
permissive
|
M4cs/python-ds
|
9dcecab10291be6a274130c42450319dc112ac46
|
434c127ea4c49eb8d6bf65c71ff6ee10361d994e
|
refs/heads/master
| 2020-08-10T03:40:22.340529
| 2019-10-10T17:52:28
| 2019-10-10T17:52:28
| 214,247,733
| 2
| 0
|
MIT
| 2019-10-10T17:43:31
| 2019-10-10T17:43:30
| null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# The array is sorted in decreasing order
def count(arr):
start = 0
end = len(arr) - 1
while start <= end:
mid = (start + end) // 2
if arr[mid] == 1 and (arr[mid + 1] == 0 or mid == high):
return mid + 1
if arr[mid] == 1:
start = mid + 1
else:
end = mid - 1
return 0
arr = [0,0,0,0]
print(count(arr))
|
[
"prabhupant09@gmail.com"
] |
prabhupant09@gmail.com
|
781f27873e2ef899b1fc8c15ae9877ecbaee512f
|
ab3272f4e8a0e949e057d184ba38b998edae0a79
|
/search_indexes.py
|
965019f0fc6015f086f80fd8fee659e3f0470039
|
[] |
no_license
|
leety/ssrd_people
|
3e2e1db7956575d4b52f9dcf9af5943d8d8f4d42
|
72927477a4bb3b1ec3bd0a8789a3d2f04456e7ee
|
refs/heads/master
| 2021-01-22T05:42:41.327247
| 2017-02-12T01:04:26
| 2017-02-12T01:04:26
| 81,693,583
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 879
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from aldryn_search.utils import get_index_base, strip_tags
from .models import Person
class PeopleIndex(get_index_base()):
haystack_use_for_indexing = getattr(settings, "SSRD_PEOPLE_SEARCH", True)
INDEX_TITLE = True
def get_title(self, obj):
return obj.name
def get_description(self, obj):
return obj.description
def get_index_kwargs(self, language):
return {'translations__language_code': language}
def get_index_queryset(self, language):
return self.get_model().objects.active_translations(
language_code=language).translated(language)
def get_model(self):
return Person
def get_search_data(self, obj, language, request):
return strip_tags(self.get_description(obj)).strip()
|
[
"hello.tien@gmail.com"
] |
hello.tien@gmail.com
|
600272f0befe1b90825cf2bcace14aee5a25c29d
|
4511e58821e50bcecd371ebdec5fa26fd21f670b
|
/app.py
|
c885ed6092a6df57153e6b89eeaead8319705f57
|
[] |
no_license
|
alvin158/car-prices-ml
|
161617283d8316f0c9d88fbb9377c00c246c3c3c
|
2476327415ff76a27fafcd93d2e57a1e59a18654
|
refs/heads/main
| 2023-07-09T23:02:39.632635
| 2021-08-10T03:16:56
| 2021-08-10T03:16:56
| 394,509,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0])
return render_template('index.html', prediction_text='Recommended selling price should be ${}'.format(output))
@app.route('/predict_api', methods=['POST'])
def predict_api():
'''
For direct API calls trought request
'''
data = request.get_json(force=True)
prediction = model.predict([np.array(list(data.values()))])
output = round(prediction[0])
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True)
|
[
"zhitat93@hotmail.com"
] |
zhitat93@hotmail.com
|
6c64b3c39f941e15df6d0cd922abb53fb30147e6
|
31c6439a8e1d9f7f7a1a854197305c2da9c33a00
|
/prediction_code/sentiment_rnn_pred.py
|
fbdfc56ae927421d4bd6188656a50f146e3498af
|
[] |
no_license
|
ShehriyarShariq/AI-Project
|
f3abe0a15d581c8df4e6acbe7c8a41eea430670f
|
b1bcaa1756ac5931862a7f3262b8c807a8e4a3ee
|
refs/heads/main
| 2023-05-14T15:49:53.587208
| 2021-06-10T19:32:20
| 2021-06-10T19:32:20
| 375,682,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,212
|
py
|
import pandas as pd
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.layers import Bidirectional
from keras.preprocessing import sequence
from keras.layers import Dropout
import h5py
import utility_functions as uf
from keras.models import model_from_json
from keras.models import load_model
import json
from nltk.tokenize import RegexpTokenizer
weight_path = '../model/best_model.hdf5'
prd_model = load_model(weight_path)
prd_model.summary()
word_idx = json.load(open("../Data/word_idx.txt"))
def get_sentiment_DL(prd_model, data, word_idx):
live_list = []
live_list_np = np.zeros((56,1))
# split the sentence into its words and remove any punctuations.
tokenizer = RegexpTokenizer(r'\w+')
data_sample_list = tokenizer.tokenize(data)
labels = np.array(['1','2','3','4','5','6','7','8','9','10'], dtype = "int")
# get index for the live stage
data_index = np.array([word_idx[word.lower()] if word.lower() in word_idx else 0 for word in data_sample_list])
data_index_np = np.array(data_index)
# padded with zeros of length 56 i.e maximum length
padded_array = np.zeros(56)
padded_array[:data_index_np.shape[0]] = data_index_np
data_index_np_pad = padded_array.astype(int)
live_list.append(data_index_np_pad)
live_list_np = np.asarray(live_list)
# get score from the model
score = trained_model.predict(live_list_np, batch_size=1, verbose=0)
single_score = np.round(np.argmax(score)/10, decimals=2) # maximum of the array i.e single band
# weighted score of top 3 bands
top_3_index = np.argsort(score)[0][-3:]
top_3_scores = score[0][top_3_index]
top_3_weights = top_3_scores/np.sum(top_3_scores)
single_score_dot = np.round(np.dot(top_3_index, top_3_weights)/10, decimals = 2)
return single_score_dot, single_score
text_data = "I love Artificial Intelligence!!"
# Deep Learning
sentiment_score = get_sentiment_DL(prd_model, text_data, word_idx)
print(str(sentiment_score) + " for \"" + text_data + "\"")
|
[
"shehriyarshariq@gmail.com"
] |
shehriyarshariq@gmail.com
|
7b57c6b8f00aee7146f0fe59c37715e1d98abd23
|
360558c34098ef95077e70a318cda7cb3895c6d9
|
/tests/test_observable/test_windowwithtimeorcount.py
|
266b08416b4ddd813c1b2536d83e66bbad25aa6f
|
[
"Apache-2.0"
] |
permissive
|
AlexMost/RxPY
|
8bcccf04fb5a0bab171aaec897e909ab8098b117
|
05cb14c72806dc41e243789c05f498dede11cebd
|
refs/heads/master
| 2021-01-15T07:53:20.515781
| 2016-03-04T04:53:10
| 2016-03-04T04:53:10
| 53,108,280
| 0
| 1
| null | 2016-03-04T04:50:00
| 2016-03-04T04:49:59
| null |
UTF-8
|
Python
| false
| false
| 3,316
|
py
|
import unittest
from datetime import timedelta
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestWindowWithTime(unittest.TestCase):
def test_window_with_time_or_count_basic(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_completed(600))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_error(self):
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_error(600, ex))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"), on_next(420, "3 8"), on_next(470, "4 9"), on_error(600, ex))
xs.subscriptions.assert_equal(subscribe(200, 600))
def test_window_with_time_or_count_disposed(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(205, 1), on_next(210, 2), on_next(240, 3), on_next(280, 4), on_next(320, 5), on_next(350, 6), on_next(370, 7), on_next(420, 8), on_next(470, 9), on_completed(600))
def create():
def projection(w, i):
def inner_proj(x):
return "%s %s" % (i, x)
return w.map(inner_proj)
return xs.window_with_time_or_count(70, 3, scheduler).map(projection).merge_observable()
results = scheduler.start(create, disposed=370)
results.messages.assert_equal(on_next(205, "0 1"), on_next(210, "0 2"), on_next(240, "0 3"), on_next(280, "1 4"), on_next(320, "2 5"), on_next(350, "2 6"), on_next(370, "2 7"))
xs.subscriptions.assert_equal(subscribe(200, 370))
|
[
"dag@brattli.net"
] |
dag@brattli.net
|
88316ac5704d66e90cf23a4f9bf3cc8f3441e3df
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/sdk/cognitiveservices/azure-cognitiveservices-formrecognizer/azure/cognitiveservices/formrecognizer/form_recognizer_client.py
|
f080824c5befddc8650458e653398bd431232f5c
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 17,235
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Configuration, Serializer, Deserializer
from .version import VERSION
from msrest.pipeline import ClientRawResponse
from . import models
class FormRecognizerClientConfiguration(Configuration):
"""Configuration for FormRecognizerClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param endpoint: Supported Cognitive Services endpoints (protocol and
hostname, for example: https://westus2.api.cognitive.microsoft.com).
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
if endpoint is None:
raise ValueError("Parameter 'endpoint' must not be None.")
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
base_url = '{Endpoint}/formrecognizer/v1.0-preview'
super(FormRecognizerClientConfiguration, self).__init__(base_url)
self.add_user_agent('azure-cognitiveservices-formrecognizer/{}'.format(VERSION))
self.endpoint = endpoint
self.credentials = credentials
class FormRecognizerClient(SDKClient):
"""Extracts information from forms and images into structured data based on a model created by a set of representative training forms.
:ivar config: Configuration for client.
:vartype config: FormRecognizerClientConfiguration
:param endpoint: Supported Cognitive Services endpoints (protocol and
hostname, for example: https://westus2.api.cognitive.microsoft.com).
:type endpoint: str
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
"""
def __init__(
self, endpoint, credentials):
self.config = FormRecognizerClientConfiguration(endpoint, credentials)
super(FormRecognizerClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '1.0-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
def train_custom_model(
self, source, custom_headers=None, raw=False, **operation_config):
"""Train Model.
The train request must include a source parameter that is either an
externally accessible Azure Storage blob container Uri (preferably a
Shared Access Signature Uri) or valid path to a data folder in a
locally mounted drive. When local paths are specified, they must follow
the Linux/Unix path format and be an absolute path rooted to the input
mount configuration
setting value e.g., if '{Mounts:Input}' configuration setting value is
'/input' then a valid source path would be '/input/contosodataset'. All
data to be trained are expected to be under the source. Models are
trained using documents that are of the following content type -
'application/pdf', 'image/jpeg' and 'image/png'."
Other content is ignored when training a model.
:param source: Get or set source path.
:type source: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: TrainResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.formrecognizer.models.TrainResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
train_request = models.TrainRequest(source=source)
# Construct URL
url = self.train_custom_model.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(train_request, 'TrainRequest')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TrainResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
train_custom_model.metadata = {'url': '/custom/train'}
def get_extracted_keys(
self, id, custom_headers=None, raw=False, **operation_config):
"""Get Keys.
Use the API to retrieve the keys that were
extracted by the specified model.
:param id: Model identifier.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeysResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.formrecognizer.models.KeysResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_extracted_keys.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('KeysResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_extracted_keys.metadata = {'url': '/custom/models/{id}/keys'}
def get_custom_models(
self, custom_headers=None, raw=False, **operation_config):
"""Get Models.
Get information about all trained models.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ModelsResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.formrecognizer.models.ModelsResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_custom_models.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ModelsResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_custom_models.metadata = {'url': '/custom/models'}
def get_custom_model(
self, id, custom_headers=None, raw=False, **operation_config):
"""Get Model.
Get information about a model.
:param id: Model identifier.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ModelResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.formrecognizer.models.ModelResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.get_custom_model.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ModelResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_custom_model.metadata = {'url': '/custom/models/{id}'}
def delete_custom_model(
self, id, custom_headers=None, raw=False, **operation_config):
"""Delete Model.
Delete model artifacts.
:param id: The identifier of the model to delete.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.delete_custom_model.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_custom_model.metadata = {'url': '/custom/models/{id}'}
def analyze_with_custom_model(
self, id, form_stream, keys=None, custom_headers=None, raw=False, **operation_config):
"""Analyze Form.
The document to analyze must be of a supported content type -
'application/pdf', 'image/jpeg' or 'image/png'. The response contains
not just the extracted information of the analyzed form but also
information about content that was not extracted along with a reason.
:param id: Model Identifier to analyze the document with.
:type id: str
:param form_stream: A pdf document or image (jpg,png) file to analyze.
:type form_stream: Generator
:param keys: An optional list of known keys to extract the values for.
:type keys: list[str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AnalyzeResult or ClientRawResponse if raw=true
:rtype: ~azure.cognitiveservices.formrecognizer.models.AnalyzeResult
or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.cognitiveservices.formrecognizer.models.ErrorResponseException>`
"""
# Construct URL
url = self.analyze_with_custom_model.metadata['url']
path_format_arguments = {
'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True),
'id': self._serialize.url("id", id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if keys is not None:
query_parameters['keys'] = self._serialize.query("keys", keys, '[str]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'multipart/form-data'
if custom_headers:
header_parameters.update(custom_headers)
# Construct form data
form_data_content = {
'form_stream': form_stream,
}
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AnalyzeResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
analyze_with_custom_model.metadata = {'url': '/custom/models/{id}/analyze'}
|
[
"laurent.mazuel@gmail.com"
] |
laurent.mazuel@gmail.com
|
e2c63fd44222cfa6dd178d152e811377be48d2ef
|
25873da962b0acdcf2c46b60695866d29008c11d
|
/test/programrtest/aiml_tests/learn_tests/test_learn_aiml.py
|
face1b7f8aacdcafc02f4548da2466da762b9c4a
|
[] |
no_license
|
LombeC/program-r
|
79f81fa82a617f053ccde1115af3344369b1cfa5
|
a7eb6820696a2e5314d29f8d82aaad45a0dc0362
|
refs/heads/master
| 2022-12-01T14:40:40.208360
| 2020-08-10T21:10:30
| 2020-08-10T21:10:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
import unittest
import os
from programr.context import ClientContext
from programrtest.aiml_tests.client import TestClient
class LearnTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(LearnTestClient, self).load_configuration(arguments)
self.configuration.client_configuration.brain_config[0].brain_config[0].files.aiml_files._files = [os.path.dirname(__file__)]
class LearnAIMLTests(unittest.TestCase):
def setUp(self):
client = LearnTestClient()
self._client_context = client.create_client_context("testid")
def test_learn(self):
response = self._client_context.bot.ask_question(self._client_context, "MY NAME IS FRED")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember your name is FRED")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS MY NAME")
self.assertIsNotNone(response)
self.assertEqual(response, "YOUR NAME IS FRED")
def test_learn_x_is_y(self):
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE SUN IS HOT")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE SUN is HOT")
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE SKY IS BLUE")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE SKY is BLUE")
response = self._client_context.bot.ask_question(self._client_context, "LEARN THE MOON IS GREY")
self.assertIsNotNone(response)
self.assertEqual(response, "OK, I will remember THE MOON is GREY")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE SUN")
self.assertIsNotNone(response)
self.assertEqual(response, "HOT")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE SKY")
self.assertIsNotNone(response)
self.assertEqual(response, "BLUE")
response = self._client_context.bot.ask_question(self._client_context, "WHAT IS THE MOON")
self.assertIsNotNone(response)
self.assertEqual(response, "GREY")
|
[
"hilbert.cantor@gmail.com"
] |
hilbert.cantor@gmail.com
|
3738edf52791abeab271273732441f1ff83dee01
|
6d6c4a6ddda550ceac91408dcec54143e36d0db9
|
/src/model/test.py
|
15eb202b007ca339ed69c541e18a60c6eb45e7d9
|
[
"WTFPL"
] |
permissive
|
smzhao/fpage
|
60c4c84bec9c2c1804308e02fc65f86d0a2eb12f
|
0dec9546b1a1ba74e4f688626b1e66fe5439cef1
|
refs/heads/master
| 2021-01-19T18:59:09.678463
| 2015-05-17T12:48:39
| 2015-05-17T12:48:39
| 35,864,157
| 1
| 0
| null | 2015-05-19T06:43:55
| 2015-05-19T06:43:55
| null |
UTF-8
|
Python
| false
| false
| 264
|
py
|
# coding:utf-8
from model import BaseModel
from sqlalchemy import Column, Integer, String, Float, ForeignKey, Boolean
class Test(BaseModel):
__tablename__ = 'test'
id = Column(Integer, primary_key=True, autoincrement=True)
test = Column(String)
|
[
"fy0748@gmail.com"
] |
fy0748@gmail.com
|
f024156d78d09ee8e5f5f354df9ed6ad177785c7
|
2d14f56f474db7b91b8ca9bf56a67568b3af2e4c
|
/mdp/agents/per_agent.py
|
a82cda77fa4ebf246fb1c9d03234fa4cf0e14845
|
[] |
no_license
|
movefast/non-uniform-sampling
|
5c6876f791055a7523a35e48ff78603a5f96c4c9
|
d2f83281f7fe18832c294608a4474a9e605101d1
|
refs/heads/main
| 2023-04-16T09:05:27.917838
| 2021-04-21T02:58:47
| 2021-04-21T02:58:47
| 315,916,514
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,489
|
py
|
import agent
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mdp import autograd_hacks
from mdp.prioritized_memory import Memory
from mdp.replay_buffer import Transition
criterion = torch.nn.MSELoss()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
class SimpleNN(nn.Module):
def __init__(self, input_size, output_size):
super(SimpleNN, self).__init__()
self.tanh = nn.ReLU()
# 2-layer nn
self.i2h = nn.Linear(input_size, input_size//2+1, bias=False)
self.h2o = nn.Linear(input_size//2+1, output_size, bias=False)
# linear
# self.i2o = nn.Linear(input_size, output_size, bias=False)
def forward(self, x):
# 2-layer nn
x = self.i2h(x)
x = self.tanh(x)
x = self.h2o(x)
# linear
# x = self.i2o(x)
return x
class LinearAgent(agent.BaseAgent):
def agent_init(self, agent_init_info):
"""Setup for the agent called when the experiment first starts.
Args:
agent_init_info (dict), the parameters used to initialize the agent. The dictionary contains:
{
num_states (int): The number of states,
num_actions (int): The number of actions,
epsilon (float): The epsilon parameter for exploration,
step_size (float): The step-size,
discount (float): The discount factor,
}
"""
# Store the parameters provided in agent_init_info.
self.num_actions = agent_init_info["num_actions"]
self.num_states = agent_init_info["num_states"]
self.epsilon = agent_init_info["epsilon"]
self.step_size = agent_init_info["step_size"]
self.num_meta_update = agent_init_info["num_meta_update"]
self.discount = agent_init_info["discount"]
self.rand_generator = np.random.RandomState(agent_init_info["seed"])
self.batch_size = agent_init_info.get("batch_size", 10)
self.buffer_size = agent_init_info.get("buffer_size", 1000)
self.buffer_alpha = agent_init_info["buffer_alpha"]
self.buffer_beta = agent_init_info["buffer_beta"]
self.beta_increment = agent_init_info.get("beta_increment", 0.001)
self.correction = agent_init_info["correction"]
self.recency_bias = agent_init_info.get("recency_bias", True)
self.use_grad_norm = agent_init_info.get("grad_norm", False)
self.p = agent_init_info.get("p", 0.1)
self.nn = SimpleNN(self.num_states, self.num_actions).to(device)
self.weights_init(self.nn)
autograd_hacks.add_hooks(self.nn)
self.target_nn = SimpleNN(self.num_states, self.num_actions).to(device)
self.update_target()
self.optimizer = torch.optim.Adam(self.nn.parameters(), lr=self.step_size)
self.buffer = Memory(self.buffer_size, self.buffer_alpha, self.buffer_beta, self.beta_increment, self.p)
self.tau = 0.5
self.updates = 0
self.sampled_state = np.zeros(self.num_states)
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
torch.nn.init.xavier_uniform(m.weight)
def get_state_feature(self, state):
state, is_door = state
state = np.eye(self.num_states)[state]
state = torch.Tensor(state).to(device)[None, ...]
return state
def agent_start(self, state):
"""The first method called when the episode starts, called after
the environment starts.
Args:
state (int): the state from the
environment's evn_start function.
Returns:
action (int): the first action the agent takes.
"""
# Choose action using epsilon greedy.
self.is_door = None
self.feature = None
state = self.get_state_feature(state)
with torch.no_grad():
current_q = self.nn(state)
current_q.squeeze_()
if self.rand_generator.rand() < self.epsilon:
action = self.rand_generator.randint(self.num_actions)
else:
action = self.argmax(current_q)
self.prev_action_value = current_q[action]
self.prev_state = state
self.prev_action = action
self.steps = 0
return action
def agent_step(self, reward, state):
"""A step taken by the agent.
Args:
reward (float): the reward received for taking the last action taken
state (int): the state from the
environment's step based on where the agent ended up after the
last step.
Returns:
action (int): the action the agent is taking.
"""
# Choose action using epsilon greedy.
state = self.get_state_feature(state)
with torch.no_grad():
current_q = self.nn(state)
current_q.squeeze_()
if self.rand_generator.rand() < self.epsilon:
action = self.rand_generator.randint(self.num_actions)
else:
action = self.argmax(current_q)
# template from fpp_new
# error = torch.abs(self.prev_action_value - reward - self.discount * target_q.max(1)[0]).item()
error = torch.abs(self.prev_action_value - reward - self.discount * current_q.max()).item()
if self.recency_bias:
self.buffer.add(self.buffer.maxp(), self.prev_state, self.prev_action, state, action, reward, self.discount)
else:
self.buffer.add(error, self.prev_state, self.prev_action, state, action, reward, self.discount)
self.prev_action_value = current_q[action]
self.prev_state = state
self.prev_action = action
self.steps += 1
if len(self.buffer) > self.batch_size:
self.batch_train()
return action
def agent_end(self, reward, state, append_buffer=True):
"""Run when the agent terminates.
Args:
reward (float): the reward the agent received for entering the
terminal state.
"""
state = self.get_state_feature(state)
if append_buffer:
if self.recency_bias:
self.buffer.add(self.buffer.maxp(), self.prev_state, self.prev_action, state, 0, reward, 0)
else:
error = torch.abs(self.prev_action_value - reward).item()
self.buffer.add(error, self.prev_state, self.prev_action, state, 0, reward, 0)
if len(self.buffer) > self.batch_size:
self.batch_train()
def batch_train(self):
self.updates += 1
self.nn.train()
for _ in range(self.num_meta_update):
transitions, idxs, is_weight = self.buffer.sample(self.batch_size)
batch = Transition(*zip(*transitions))
state_batch = torch.cat(batch.state)
action_batch = torch.LongTensor(batch.action).view(-1, 1).to(device)
new_state_batch = torch.cat(batch.new_state)
new_action_batch = torch.LongTensor(batch.new_action).view(-1, 1).to(device)
reward_batch = torch.FloatTensor(batch.reward).to(device)
discount_batch = torch.FloatTensor(batch.discount).to(device)
self.sampled_state += state_batch.sum(0).detach().cpu().numpy()
current_q = self.nn(state_batch)
q_learning_action_values = current_q.gather(1, action_batch)
with torch.no_grad():
# ***
# new_q = self.target_nn(new_state_batch)
new_q = self.nn(new_state_batch)
# max_q = new_q.max(1)[0]
# max_q = new_q.mean(1)[0]
max_q = new_q.gather(1, new_action_batch).squeeze_()
target = reward_batch
target += discount_batch * max_q
target = target.view(-1, 1)
# if self.updates < 1000:
# target = reward_batch.view(-1,1)
# 1) correct with is weight
if self.correction:
temp = F.mse_loss(q_learning_action_values, target,reduction='none')
loss = torch.Tensor(is_weight).to(device) @ temp
# 2) no is correction
else:
loss = criterion(q_learning_action_values, target)
if self.use_grad_norm:
errors = [0] * self.batch_size
self.optimizer.zero_grad()
loss.backward()
autograd_hacks.compute_grad1(self.nn)
for param in self.nn.parameters():
for i in range(self.batch_size):
errors[i] += param.grad1[i].norm(2).item() ** 2
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
autograd_hacks.clear_backprops(self.nn)
# if self.updates % 100 == 0:
# self.update()
for i in range(self.batch_size):
self.buffer.update(idxs[i], np.power(errors[i]**.5, self.per_power))
# errors = torch.abs((q_learning_action_values - target).squeeze_(dim=-1))
# for i in range(self.batch_size):
# # compare the loss term with grad norm
# self.optimizer.zero_grad()
# errors[i].backward(retain_graph=True)
# self.buffer.update(idxs[i], np.power(self.get_grad_norm(), self.per_power))
# self.optimizer.zero_grad()
# loss.backward()
# for param in self.nn.parameters():
# param.grad.data.clamp_(-1, 1)
# self.optimizer.step()
else:
with torch.no_grad():
errors = torch.abs((q_learning_action_values - target).squeeze_(dim=-1))
for i in range(self.batch_size):
self.buffer.update(idxs[i], np.power(errors[i].item(), self.per_power))
self.optimizer.zero_grad()
loss.backward()
for param in self.nn.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
# if self.updates % 100 == 0:
# self.update()
def update(self):
# target network update
for target_param, param in zip(self.target_nn.parameters(), self.nn.parameters()):
target_param.data.copy_(self.tau * param + (1 - self.tau) * target_param)
def get_grad_norm(self):
total_norm = 0
for p in self.nn.parameters():
# import pdb; pdb.set_trace()
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
return total_norm
def update_target(self):
self.target_nn.load_state_dict(self.nn.state_dict())
# old importance weight update code backup
# with torch.no_grad():
# errors = torch.abs((q_learning_action_values - target).squeeze_(dim=-1))
# for i in range(self.batch_size):
# compare the loss term with grad norm
# self.optimizer.zero_grad()
# errors[i].backward(retain_graph=True)
# print(self.get_grad_norm(), errors[i].item())
# self.buffer.update(idxs[i], np.power(self.get_grad_norm(), self.per_power))
# self.buffer.update(idxs[i], np.power(errors[i].item(), self.per_power))
|
[
"liderek269@gmail.com"
] |
liderek269@gmail.com
|
601d53a303e1b4fddbb8255b991385379a649658
|
5e6976a4531a13bd6fc31523bd55439852a2c487
|
/pycore_P2chain_graph.py
|
44246de0cab77fae9f629f4036412da9fbbe216e
|
[] |
no_license
|
colinetzel/Pycore-Experiments
|
05a93c143ee2ce386dd08f186783ed3207b52dd5
|
47dd0418e4893c42dbf97d023b31bd7ceb604954
|
refs/heads/master
| 2021-05-06T05:26:08.293207
| 2017-12-22T06:12:26
| 2017-12-22T06:12:26
| 115,082,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,063
|
py
|
#!/usr/bin/python
#Python version 2.7.12
import argparse
import pycore_experiment_functions as functions
parser = argparse.ArgumentParser()
parser.add_argument("-o", type=str, default=None, help="Output filename.")
parser.add_argument("-csv", type=str, default="data.csv", help="Csv filename for raw output.")
parser.add_argument("-x", type=str, default="bandwidth", choices=["bandwidth","delay", "jitter","loss","duplicate"]\
, help="Value to plot on x-axis") #add arg for ALL linkconfig values
parser.add_argument("-i", type=float, default=1, help="Interval between x-values (-1 is special log scale and selected by default)")
parser.add_argument("--length", type=int, default=10, help="Experiment length (number of points)")
parser.add_argument("--bandwidth", type=float, default=1, help="Constant/Initial bandwidth value for run (Default 1 Mb/s).")
parser.add_argument("--delay", type=float, default=0, help="Constant/Initial round-trip delay value for run.")
parser.add_argument("--jitter", type=float, default=0, help="Constant/Initial jitter value for run.")
parser.add_argument("--loss", type=float, default=0, help="Constant/Initial round-trip loss value for run.")
parser.add_argument("--duplicate", type=float, default=0, help="Constant/Initial round-trip duplicate value for run.")
parser.add_argument("--numRuns", type=int, default=1, help ="Number of runs to average together for each data point.")
parser.add_argument("--server", action='store_true', help="Use server logging instead of client logging.")
logPath = "" #Set this to a folder of your choice
args = parser.parse_args()
outfile = args.o
csv = args.csv
xType = args.x
interval = args.i
initBandwidth = args.bandwidth
initJitter = args.jitter
numRuns = args.numRuns
useServer = args.server
#convert to round-trip, half across each link
initDelay = args.delay/2.0
initLoss = args.loss/2.0
initDuplicate = args.duplicate/2.0
lossSpecial = False
logScale = False
if interval == -1:
logScale = True
experimentLength = 10
elif interval == -2:
lossSpecial = True
experimentLength = 8
else:
experimentLength = args.length
if logScale: #logscale of bandwidth currently
xVals = [0.1,0.2,0.5,1,2,5,10,20,50,100] #test
elif lossSpecial:
xVals = [0.0,0.05,0.10,0.25,0.5,1.0,2.5,5.0] #rt loss is twice link loss
elif xType == "bandwidth":
xVals = [interval*i for i in range(1,experimentLength+1)]
elif xType == "delay" or xType == "loss" or xType == "duplicate":
xVals = [interval*i/2.0 for i in range(experimentLength)]
else:
xVals = [interval*i for i in range(experimentLength)]
def main():
if useServer:
functions.runExperiment_serverLog(logPath, csv, xType, initBandwidth, initDelay, initJitter, initLoss, initDuplicate, xVals, numRuns)
else:
functions.runExperiment_clientLog(logPath, csv, xType, initBandwidth, initDelay, initJitter, initLoss, initDuplicate, xVals, numRuns)
functions.plotExperiment(xType, logPath, [csv], xVals, numRuns, experimentLength, outfile, logScale, lossSpecial, useServer)
main()
|
[
"colin.etzel@gmail.com"
] |
colin.etzel@gmail.com
|
bb840b73e198b70d675d30961a0b4435d02ae9a6
|
3e7ea6726692d17172ac897a331f53a587ee2869
|
/Source/Debug/printArr.py
|
22728890733383b21e5046c7a7933e9788291336
|
[] |
no_license
|
hyqiu/ensae_bigdatatools
|
2f57cf7442ff273d69d15fbe647ae917ae58e6ac
|
8d5dae01b105410decff155e864e10d86e9a6258
|
refs/heads/master
| 2021-01-21T14:24:16.054689
| 2017-06-24T05:12:59
| 2017-06-24T05:12:59
| 95,276,148
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import sys
import csv
sys.path.insert(0,'Source/MyFuncs/')
from initArrs import *
from initDirs import *
#m = 5
#SSArr,YYArr,SYArr,GArr = initArrs(m,3,m)
#filePath = 'Data/Arr/Tmp/arr'
def printArrs(SSArr,YYArr,SYArr,GArr,filePath,k):
printArr(SSArr,'SSArr',filePath,k)
printArr(YYArr,'YYArr',filePath,k)
printArr(SYArr,'SYArr',filePath,k)
printArr(GArr,'GArr',filePath,k)
def printArr(arr,arrName,filePath,k):
f = open(filePath, 'a')
print>>f,'-----------------------------------------------------'
print>>f,('Iteration: ' + str(k))
print>>f,('Arr: ' + arrName)
print>>f,'\n'
for i in arr:
print>>f, i
print>>f,'\n'
#printArrs(filePath,3)
|
[
"noreply@github.com"
] |
noreply@github.com
|
6fae2188362785f3847d5a721139ac0cff21f2b8
|
a41805fb84fe334166dd3256faaa9488ddc043ff
|
/GUIsubcomponents/plotwindow.py
|
9544d492a842add9112bd16fd82f8857fa3a76ad
|
[
"MIT"
] |
permissive
|
nicholasareynolds/gamut
|
8d8dde261084c2af1c1b4e79ba8c46343f94d13a
|
7119558389f4229b8462e3774237186167402064
|
refs/heads/master
| 2021-07-04T05:18:35.172030
| 2017-09-24T20:26:08
| 2017-09-24T20:26:08
| 104,009,537
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,662
|
py
|
###############################################################################
#
# gamut
# Copyright (C) 2017, Nicholas A. Reynolds
#
# Full License Available in LICENSE file at
# https://github.com/nicholasareynolds/gamut
#
###############################################################################
from PyQt5 import QtCore, QtGui, QtWidgets
import matplotlib
matplotlib.rcParams['backend'] = "Qt5Agg"
matplotlib.rcParams['font.size'] = 6
matplotlib.use("Qt5Agg")
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
class PlotWindow(QtWidgets.QMainWindow):
"""
PlotWindow is a child window to gamut that hosts plot canvases for the
PDF/CDF and the Probability Plots
"""
def __init__(self,
parent,
dist_obj,
dist_name,
plot_type="pplot"):
super().__init__(parent=parent)
self.dist_obj = dist_obj
self.plot_type = plot_type
if self.plot_type == "pplot":
self.plot_canvas = ProbabilityPlot(self,
dist_obj=dist_obj)
elif self.plot_type == "pdfcdf":
self.plot_canvas = PDFCDFPlot(self,
dist_obj=dist_obj)
self.windowWidget = QtWidgets.QWidget(self)
self.setWindowTitle(dist_name)
self.initUI()
def initUI(self):
# --- Declare Items to go in window ---
# Save Button (Save File Dialog)
saveButton_plot = QtWidgets.QPushButton()
saveButton_plot.setText("Save Plot")
# action
saveButton_plot.clicked.connect(self.savePlot)
# Spacers
spacerItem1 = QtWidgets.QSpacerItem(40, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
spacerItem2 = QtWidgets.QSpacerItem(40, 20,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Minimum)
# --- Assemble ---
horizontalLayout = QtWidgets.QHBoxLayout()
horizontalLayout.addItem(spacerItem1)
horizontalLayout.addWidget(saveButton_plot)
horizontalLayout.addItem(spacerItem2)
verticalLayout = QtWidgets.QVBoxLayout(self.windowWidget)
verticalLayout.addWidget(self.plot_canvas)
verticalLayout.addLayout(horizontalLayout)
self.windowWidget.setFocus()
self.setCentralWidget(self.windowWidget)
self.show()
def savePlot(self, *args):
"""Save a *.png of the present probability plot"""
fpath = QtWidgets.QFileDialog.getSaveFileName(self.windowWidget,
"Specify destination",
'',
"Portable Networks Graphic (*.png)")[0]
if fpath:
try:
import matplotlib.pyplot as plt
axes = plt.axes()
if self.plot_type == "pplot":
self.dist_obj.create_pplot(axes)
elif self.plot_type == "pdfcdf":
self.dist_obj.plot_pdfcdf(axes)
plt.savefig(fpath, dpi=600)
plt.close()
except:
pass
class PlotCanvas(FigureCanvas):
"""
Surfaces/axes onto which a matplotlib plot is made, and which can be
embedded in a GUI child-window
PlotCanvas is a master class
"""
def __init__(self,
parent=None,
dist_obj=None):
self.dist_obj=dist_obj
fig = Figure()
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self._plot()
self.draw()
class ProbabilityPlot(PlotCanvas):
def _plot(self):
"""
Draw probability plot on provided axes
"""
self.dist_obj.create_pplot(self.axes)
self.draw()
class PDFCDFPlot(PlotCanvas):
def _plot(self):
"""
Call on distribution object to draw combined PDF/CDF on class axes
"""
self.dist_obj.plot_pdfcdf(self.axes)
self.draw()
|
[
"nicholas.a.reynolds@gmail.com"
] |
nicholas.a.reynolds@gmail.com
|
84c635a8c29640c045018265155d9a6e38831710
|
b9a89db051f79b3ca74c328022e8884fbb81d41a
|
/dump_cdorked_config.py
|
c1ad5fb62a28c23e6f6af9d11d929bf2869fa2ae
|
[] |
no_license
|
petdance/scraps
|
ea1f8fab6d0e3597c315b4daa0dce8ee42bd6123
|
b6fa9b71a2329dbdce5a991b1a5c886e283ad258
|
refs/heads/main
| 2023-03-08T22:38:32.522190
| 2023-03-03T16:20:11
| 2023-03-03T16:20:11
| 4,495,364
| 5
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,080
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This script dumps the content of a shared memory block
# used by Linux/Cdorked.A into a file named httpd_cdorked_config.bin
# when the machine is infected.
#
# Some of the data is encrypted. If your server is infected and you
# would like to help, please send the httpd_cdorked_config.bin
# to our lab for analysis. Thanks!
#
# Marc-Etienne M.Léveillé, leveille at eset.com
#
#
# From http://www.welivesecurity.com/2013/04/26/linuxcdorked-new-apache-backdoor-in-the-wild-serves-blackhole/
from ctypes import *
SHM_SIZE = 6118512
SHM_KEY = 63599
OUTFILE="httpd_cdorked_config.bin"
try:
rt = CDLL('librt.so')
except:
rt = CDLL('librt.so.1')
shmget = rt.shmget
shmget.argtypes = [c_int, c_size_t, c_int]
shmget.restype = c_int
shmat = rt.shmat
shmat.argtypes = [c_int, POINTER(c_void_p), c_int]
shmat.restype = c_void_p
shmid = shmget(SHM_KEY, SHM_SIZE, 0o666)
if shmid < 0:
print "System not infected"
else:
addr = shmat(shmid, None, 0)
f = file(OUTFILE, 'wb')
f.write(string_at(addr,SHM_SIZE))
f.close()
|
[
"andy@petdance.com"
] |
andy@petdance.com
|
cf8a2f4f26fa6630e3ba72060653137675c03cbd
|
2ce65623088ba96ce57501ecbb27bb9468d56456
|
/cms/migrations/0001_initial.py
|
16ef974392f0658dca4506ae341c546e6e077924
|
[] |
no_license
|
mediastandardstrust/valueaddednews
|
15ff67f62a857a8c634b80d20f632d60bfd98791
|
dcef97d99f9245c7b9510a54f9274481cba52d93
|
refs/heads/master
| 2021-01-18T00:23:04.369109
| 2011-01-18T14:12:24
| 2011-01-18T14:12:24
| 747,487
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
from south.db import db
from django.db import models
from cms.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'cms.page': {
'content': ('models.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'published': ('models.BooleanField', [], {'default': 'False'}),
'slug': ('models.CharField', [], {'max_length': '255'}),
'title': ('models.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['cms']
|
[
"mark@markng.co.uk"
] |
mark@markng.co.uk
|
37423e07b41d1f3ab5e6bb839b8d4732d4a9d304
|
7fd0c4608e32c53fea935ac63cacf66e1a0c971d
|
/Canonical_Monojet/VectorModel/DMsimp_s_spin1_1750_500_800/parameters.py
|
91afdccfe54a967aad9a343a3a55bf02e074ce3e
|
[] |
no_license
|
Quantumapple/MadGraph5_cards
|
285f8a303b04b9745abfc83f5ea4fb06a2922fc9
|
3db368ada01f59bace11b48eab2f58ab40ba29f2
|
refs/heads/master
| 2020-05-02T20:43:23.791641
| 2020-01-17T16:10:46
| 2020-01-17T16:10:46
| 178,199,838
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,298
|
py
|
# This file was automatically created by FeynRules 2.3.7
# Mathematica version: 9.0 for Linux x86 (64-bit) (November 20, 2012)
# Date: Mon 24 Aug 2015 13:37:17
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# This is a default parameter object representing the renormalization scale (MU_R).
MU_R = Parameter(name = 'MU_R',
nature = 'external',
type = 'real',
value = 91.188,
texname = '\\text{\\mu_r}',
lhablock = 'LOOP',
lhacode = [1])
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
gVXc = Parameter(name = 'gVXc',
nature = 'external',
type = 'real',
value = 0.,
texname = 'g_{\\text{VXc}}',
lhablock = 'DMINPUTS',
lhacode = [ 1 ])
gVXd = Parameter(name = 'gVXd',
nature = 'external',
type = 'real',
value = 0.9999999,
texname = 'g_{\\text{VXd}}',
lhablock = 'DMINPUTS',
lhacode = [ 2 ])
gAXd = Parameter(name = 'gAXd',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{AXd}}',
lhablock = 'DMINPUTS',
lhacode = [ 3 ])
gVd11 = Parameter(name = 'gVd11',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vd11}}',
lhablock = 'DMINPUTS',
lhacode = [ 4 ])
gVu11 = Parameter(name = 'gVu11',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vu11}}',
lhablock = 'DMINPUTS',
lhacode = [ 5 ])
gVd22 = Parameter(name = 'gVd22',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vd22}}',
lhablock = 'DMINPUTS',
lhacode = [ 6 ])
gVu22 = Parameter(name = 'gVu22',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vu22}}',
lhablock = 'DMINPUTS',
lhacode = [ 7 ])
gVd33 = Parameter(name = 'gVd33',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vd33}}',
lhablock = 'DMINPUTS',
lhacode = [ 8 ])
gVu33 = Parameter(name = 'gVu33',
nature = 'external',
type = 'real',
value = 0.25,
texname = 'g_{\\text{Vu33}}',
lhablock = 'DMINPUTS',
lhacode = [ 9 ])
gAd11 = Parameter(name = 'gAd11',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Ad11}}',
lhablock = 'DMINPUTS',
lhacode = [ 10 ])
gAu11 = Parameter(name = 'gAu11',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Au11}}',
lhablock = 'DMINPUTS',
lhacode = [ 11 ])
gAd22 = Parameter(name = 'gAd22',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Ad22}}',
lhablock = 'DMINPUTS',
lhacode = [ 12 ])
gAu22 = Parameter(name = 'gAu22',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Au22}}',
lhablock = 'DMINPUTS',
lhacode = [ 13 ])
gAd33 = Parameter(name = 'gAd33',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Ad33}}',
lhablock = 'DMINPUTS',
lhacode = [ 14 ])
gAu33 = Parameter(name = 'gAu33',
nature = 'external',
type = 'real',
value = 1e-99,
texname = 'g_{\\text{Au33}}',
lhablock = 'DMINPUTS',
lhacode = [ 15 ])
gVh = Parameter(name = 'gVh',
nature = 'external',
type = 'real',
value = 0.,
texname = 'g_{\\text{Vh}}',
lhablock = 'DMINPUTS',
lhacode = [ 16 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MXr = Parameter(name = 'MXr',
nature = 'external',
type = 'real',
value = 10.,
texname = '\\text{MXr}',
lhablock = 'MASS',
lhacode = [ 5000001 ])
MXc = Parameter(name = 'MXc',
nature = 'external',
type = 'real',
value = 10.,
texname = '\\text{MXc}',
lhablock = 'MASS',
lhacode = [ 51 ])
MXd = Parameter(name = 'MXd',
nature = 'external',
type = 'real',
value = 500.0,
texname = '\\text{MXd}',
lhablock = 'MASS',
lhacode = [ 18 ])
MY1 = Parameter(name = 'MY1',
nature = 'external',
type = 'real',
value = 1750,
texname = '\\text{MY1}',
lhablock = 'MASS',
lhacode = [ 55 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00407,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
#WY1 = Parameter(name = 'WY1',
# nature = 'external',
# type = 'real',
# value = 10.,
# texname = '\\text{WY1}',
# lhablock = 'DECAY',
# lhacode = [ 55 ])
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
I2a33 = Parameter(name = 'I2a33',
nature = 'internal',
type = 'complex',
value = 'yt',
texname = '\\text{I2a33}')
I3a33 = Parameter(name = 'I3a33',
nature = 'internal',
type = 'complex',
value = 'yt',
texname = '\\text{I3a33}')
MFU = Parameter(name = 'MFU',
nature = 'internal',
type = 'real',
value = '0.002550',
texname = '\\text{MFU}')
MFC = Parameter(name = 'MFC',
nature = 'internal',
type = 'real',
value = '1.27',
texname = '\\text{MFC}')
MFD = Parameter(name = 'MFD',
nature = 'internal',
type = 'real',
value = '0.00504',
texname = '\\text{MFD}')
MFS = Parameter(name = 'MFS',
nature = 'internal',
type = 'real',
value = '0.101',
texname = '\\text{MFS}')
MFB = Parameter(name = 'MFB',
nature = 'internal',
type = 'real',
value = '4.7',
texname = '\\text{MFB}')
# vector, 1411.0535
WVuu = Parameter(name = 'WVuu',
nature = 'internal',
type = 'real',
value = '((gVd11**2)*(MY1**2 + 2*MFU**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFU**2/MY1**2))',
texname = '\\text{WVuu}')
WVcc = Parameter(name = 'WVcc',
nature = 'internal',
type = 'real',
value = '((gVd22**2)*(MY1**2 + 2*MFC**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFC**2/MY1**2))',
texname = '\\text{WVcc}')
WVtt = Parameter(name = 'WVtt',
nature = 'internal',
type = 'real',
value = '((gVd33**2)*(MY1**2 + 2*MT**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MT**2/MY1**2),0.01))',
texname = '\\text{WVtt}')
WVdd = Parameter(name = 'WVdd',
nature = 'internal',
type = 'real',
value = '((gVd11**2)*(MY1**2 + 2*MFD**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFD**2/MY1**2))',
texname = '\\text{WVdd}')
WVss = Parameter(name = 'WVss',
nature = 'internal',
type = 'real',
value = '((gVd22**2)*(MY1**2 + 2*MFS**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFS**2/MY1**2))',
texname = '\\text{WVss}')
WVbb = Parameter(name = 'WVbb',
nature = 'internal',
type = 'real',
value = '((gVd33**2)*(MY1**2 + 2*MFB**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFB**2/MY1**2))',
texname = '\\text{WVbb}')
WVDM = Parameter(name = 'WVDM',
nature = 'internal',
type = 'real',
value = '((gVXd**2)*(MY1**2 + 2*MXd**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MXd**2/MY1**2),0.01))',
texname = '\\text{WVDM}')
# axial, 1411.0535
WAuu = Parameter(name = 'WAuu',
nature = 'internal',
type = 'real',
value = '((gAd11**2)*(MY1**2 - 4*MFU**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFU**2/MY1**2))',
texname = '\\text{WAuu}')
WAcc = Parameter(name = 'WAcc',
nature = 'internal',
type = 'real',
value = '((gAd22**2)*(MY1**2 - 4*MFC**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFC**2/MY1**2))',
texname = '\\text{WAcc}')
WAtt = Parameter(name = 'WAtt',
nature = 'internal',
type = 'real',
value = '((gAd33**2)*(MY1**2 - 4*MT**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MT**2/MY1**2),0.01))',
texname = '\\text{WAtt}')
WAdd = Parameter(name = 'WAdd',
nature = 'internal',
type = 'real',
value = '((gAd11**2)*(MY1**2 - 4*MFD**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFD**2/MY1**2))',
texname = '\\text{WAdd}')
WAss= Parameter(name = 'WAss',
nature = 'internal',
type = 'real',
value = '((gAd22**2)*(MY1**2 - 4*MFS**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFS**2/MY1**2))',
texname = '\\text{WAss}')
WAbb= Parameter(name = 'WAbb',
nature = 'internal',
type = 'real',
value = '((gAd33**2)*(MY1**2 - 4*MFB**2)/(12*MY1*cmath.pi))*cmath.sqrt(1-(4*MFB**2/MY1**2))',
texname = '\\text{WAbb}')
WADM = Parameter(name = 'WADM',
nature = 'internal',
type = 'real',
value = '((gAXd**2)*(MY1**2 - 4*MXd**2)/(12*MY1*cmath.pi))*cmath.sqrt(max(1-(4*MXd**2/MY1**2),0.01))',
texname = '\\text{WADM}')
sumY1 = Parameter(name = 'sumY1',
nature = 'internal',
type = 'real',
value = 'WVDM + WADM + 3*(WVuu+WVcc+WVtt+WVdd+WVss+WVbb+WAuu+WAcc+WAtt+WAdd+WAss+WAbb)',
texname = '\\text{sumZpV}')
WY1 = Parameter(name = 'WY1',
nature = 'internal',
type = 'real',
value = 'sumY1',
texname = '\\text{WY1}',
lhablock = 'DECAY',
lhacode = [ 55 ])
|
[
"jongho.lee@cern.ch"
] |
jongho.lee@cern.ch
|
09b211e8ea77f71438cebbd750d645acca661a75
|
56710ab40655d576895ca72e25d51d20d4f96ead
|
/tools/speeddial
|
c1396e849838e14c9b5a07448864eb3dd17873ed
|
[
"Apache-2.0"
] |
permissive
|
veskuh/qmldialer
|
bc35345a484d9d7d84ee4632c7044f6cca36ad40
|
627b2e9dca575f5bf4f690dfb9387c4d7fcc798c
|
refs/heads/master
| 2016-09-06T03:55:55.403498
| 2012-01-06T08:40:56
| 2012-01-06T08:40:56
| 2,970,737
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
#! /usr/bin/env python
#
# Copyright (c) 2011, Tom Swindell.
#
# This program is licensed under the terms and conditions of the
# Apache License, version 2.0. The full text of the Apache License is at
# http://www.apache.org/licenses/LICENSE-2.0
#
import os, sys
def display_usage():
sys.stderr.write("Usage: " + sys.argv[0] + " <get|set> <preset (1-8)> [value]\n\n")
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.stderr.write("You must specify a command!\n")
display_usage()
sys.exit(1)
if len(sys.argv) < 3:
sys.stderr.write("You must specify a prefix!\n")
display_usage()
sys.exit(1)
command = sys.argv[1]
preset = int(sys.argv[2])
if preset < 1 or preset > 8:
sys.stderr.write("Preset has to be between 1 and 8!\n")
display_usage()
sys.exit(1)
if command == 'get':
os.system('gconftool-2 -g /apps/dialer/speeddial/' + str(preset))
elif command == 'set':
if len(sys.argv) < 4:
sys.stderr.write("You must supply a value for preset!\n")
display_usage()
sys.exit(1)
os.system('gconftool-2 -t string -s /apps/dialer/speeddial/' + str(preset) + ' ' + sys.argv[3])
else:
sys.stderr.write("Unrecognised command: " + command + "\n")
display_usage()
sys.exit(1)
|
[
"robin+git@viroteck.net"
] |
robin+git@viroteck.net
|
|
8f2d60b47c697cdc3e16a53a2e2201aeea2bc35f
|
2a55222f2c4e002b398ecc9523adbda5c3f28796
|
/mooc/mooc/settings.py
|
e65c7bfcfeec702c3973f4475d3df9053a74f742
|
[] |
no_license
|
zhenghaogithub/web
|
1b04de1f853edbb02fe9b1fc61d9d2ae46a5745c
|
bb471ce5338a1a805ccd72e6b3a389c96a0641a6
|
refs/heads/master
| 2021-07-10T06:54:59.626019
| 2020-03-04T12:58:11
| 2020-03-04T12:58:11
| 244,344,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,172
|
py
|
"""
Django settings for mooc project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#设置后台xadmin的路径
sys.path.insert(1,os.path.join(BASE_DIR,'extra_apps'))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o)yl-_g$@eeu2g%1i03c461sn0lg&^e*q1bmgjn^awxkq0e3^9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#xAdmin替换默认的admin
'xadmin',
'crispy_forms',
#DjangoUeditor富文本编辑器
'DjangoUeditor',
#Django的验证码
'captcha',
#Django的分页器
'pure_pagination',
'users',
'organization',
'courses',
'operation',
]
AUTH_USER_MODEL = 'users.UserProfile'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mooc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mooc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mooc',
'USER': 'root',
'PASSWORD': 'zhenghao123',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
# LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'zh-hans' #语言改为中文
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Shanghai' #时间改为中国上海
USE_I18N = True
USE_L10N = True
# USE_TZ = True
USE_TZ = False #改为False,让django使用本地时间
#格式化后台内容的显示时间
USE_L10N = False
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m-d'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'mooc', 'static')#用于生产环境
MEDIA_URL = '/Media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'Media')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'Media'),
)
|
[
"zhenghao20070826@163.com"
] |
zhenghao20070826@163.com
|
6e4dd4e629e9a48bb151508f9ec6c2120f4cb676
|
ce3964c7195de67e07818b08a43286f7ec9fec3e
|
/angle_peaks.py
|
3f18f3fab246542885ea6329ac9dc15a38b0f1c8
|
[] |
no_license
|
zhuligs/physics
|
82b601c856f12817c0cfedb17394b7b6ce6b843c
|
7cbac1be7904612fd65b66b34edef453aac77973
|
refs/heads/master
| 2021-05-28T07:39:19.822692
| 2013-06-05T04:53:08
| 2013-06-05T04:53:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#!/usr/bin/env python
"""
Create a data set of nn_dist peak distances vs rs & P, for a given neighbor
"""
import os, sys, commands, glob
# RS is a list of all the names of the rs directories
global RS
RS = commands.getoutput('ls -1 | grep "1\." | grep -v c').split()
def main():
# Open the output file
out = open('angle_EV.dat','w')
out.write('# rs, <angle>, P(GPa)\n')
for rs in RS:
# Get pressure
try:
P = commands.getoutput("tail -2 "+rs+"/analysis/pressure.blocker | head -1 | awk '{print $4}'").strip()
except:
P = '--------'
# Get location of peak
try:
EV = float(commands.getoutput("expectation_value.py "+rs+"/analysis/CO2_angles.dat 1 2").split()[-1])
except:
EV = '--------'
# Write to the output file
if '--' in P or '--' in str(EV):
out.write('#')
out.write(rs+' '+str(EV)+' '+P+'\n')
out.close()
if __name__ == '__main__':
main()
|
[
"boates@gmail.com"
] |
boates@gmail.com
|
37c6df9686c851389868af110179898a2a55def7
|
8775aac665c4011cc743d737c12342e1b08d8f41
|
/config/hosts.py
|
3766ccf2d2f934ea128736f30b19f3dc8166cf79
|
[] |
no_license
|
kongp3/sys_deploy
|
734dfa3815c93305eca77f5d3f9488968c90ef6f
|
8cd750c4df3f3f64515e3b0051038569d6e8bce2
|
refs/heads/master
| 2020-04-09T06:53:01.340569
| 2018-12-03T04:13:22
| 2018-12-03T04:13:22
| 160,131,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
# -*- coding: utf-8 -*-
from config import *
SERVER_HOSTS = [
SERVER1_USER + '@' + SERVER1_IP,
SERVER2_USER + '@' + SERVER2_IP,
SERVER3_USER + '@' + SERVER3_IP,
]
|
[
"kongp3@outlook"
] |
kongp3@outlook
|
9a39839275986cbd5e2c06251ec6ba300a70ba09
|
76b0a9dd3b238ce98d7322521750cf3169d2972e
|
/DLC/pupil_opto_stim_probes.py
|
6840ed6559ae2adcdcfeb821c91c9cf993d6cf96
|
[] |
no_license
|
mschart/IBL-Serotonin
|
15943eea9e94d7e97816f82539bb97c8b8cdc6b5
|
19da8b9ddc65d174e5851dd0c94f8ef51681b634
|
refs/heads/master
| 2023-07-26T23:00:17.166115
| 2021-08-31T17:02:15
| 2021-08-31T17:02:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,108
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 11:42:01 2021
@author: guido
"""
import numpy as np
from os.path import join
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import zscore
import seaborn as sns
from serotonin_functions import (load_trials, butter_filter, paths, px_to_mm, pupil_features)
from oneibl.one import ONE
one = ONE()
# Settings
TIME_BINS = np.arange(-1, 3, 0.1)
BIN_SIZE = 0.1
_, fig_path, save_path = paths()
fig_path = join(fig_path, 'opto-pupil')
subjects = pd.read_csv(join('..', 'subjects.csv'))
#subjects = subjects[subjects['subject'] == 'ZFM-01867'].reset_index(drop=True)
results_df = pd.DataFrame()
for i, nickname in enumerate(subjects['subject']):
print(f'Processing {nickname}..')
# Query sessions
if subjects.loc[i, 'date_range_probes'] == 'all':
eids = one.search(subject=nickname, task_protocol='_iblrig_tasks_opto_biasedChoiceWorld')
elif subjects.loc[i, 'date_range_probes'] == 'none':
continue
else:
eids = one.search(subject=nickname, task_protocol='_iblrig_tasks_opto_biasedChoiceWorld',
date_range=[subjects.loc[i, 'date_range_probes'][:10],
subjects.loc[i, 'date_range_probes'][11:]])
# Loop over sessions
pupil_size = pd.DataFrame()
for j, eid in enumerate(eids):
print(f'Processing session {j+1} of {len(eids)}')
# Load in trials and video data
try:
trials = load_trials(eid, laser_stimulation=True, one=one)
except:
print('could not load trials')
if trials is None:
continue
if 'laser_stimulation' not in trials.columns.values:
continue
if 'laser_probability' not in trials.columns.values:
trials['laser_probability'] = trials['laser_stimulation']
video_dlc, video_times = one.load(eid, dataset_types=['camera.dlc', 'camera.times'])
if video_dlc is None:
continue
# Assume frames were dropped at the end
if video_times.shape[0] > video_dlc.shape[0]:
video_times = video_times[:video_dlc.shape[0]]
else:
video_dlc = video_dlc[:video_times.shape[0]]
# Get pupil size
video_dlc = px_to_mm(video_dlc)
x, y, diameter = pupil_features(video_dlc)
# Remove blinks
likelihood = np.mean(np.vstack((video_dlc['pupil_top_r_likelihood'],
video_dlc['pupil_bottom_r_likelihood'],
video_dlc['pupil_left_r_likelihood'],
video_dlc['pupil_right_r_likelihood'])), axis=0)
diameter = diameter[likelihood > 0.8]
video_times = video_times[likelihood > 0.8]
# Remove outliers
video_times = video_times[diameter < 10]
diameter = diameter[diameter < 10]
# Low pass filter trace
fs = 1 / ((video_times[-1] - video_times[0]) / video_times.shape[0])
diameter_filt = butter_filter(diameter, lowpass_freq=0.5, order=1, fs=int(fs))
diameter_zscore = zscore(diameter_filt)
# Get trial triggered pupil diameter
for t, trial_start in enumerate(trials['goCue_times']):
this_diameter = np.array([np.nan] * TIME_BINS.shape[0])
for b, time_bin in enumerate(TIME_BINS):
this_diameter[b] = np.mean(diameter_zscore[
(video_times > (trial_start + time_bin) - (BIN_SIZE / 2))
& (video_times < (trial_start + time_bin) + (BIN_SIZE / 2))])
pupil_size = pupil_size.append(pd.DataFrame(data={
'diameter': this_diameter, 'eid': eid, 'subject': nickname, 'trial': t,
'sert': subjects.loc[i, 'sert-cre'], 'laser': trials.loc[t, 'laser_stimulation'],
'laser_prob': trials.loc[t, 'laser_probability'],
'time': TIME_BINS}))
# Plot this animal
if pupil_size.shape[0] > 0:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharey=True, dpi=300)
lineplt = sns.lineplot(x='time', y='diameter', hue='laser', data=pupil_size,
palette='colorblind', ci=68, ax=ax1)
ax1.set(title='%s, sert: %d, only probes sessions' % (nickname, subjects.loc[i, 'sert-cre']),
ylabel='z-scored pupil diameter', xlabel='Time relative to trial start(s)')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles=handles, labels=['No stim', 'Stim'], frameon=False)
plt.tight_layout()
sns.despine(trim=True)
plt.savefig(join(fig_path, f'{nickname}_pupil_opto_probes'))
# Add to overall dataframe
results_df = results_df.append(pupil_size[pupil_size['laser'] == 0].groupby(['time', 'laser']).mean())
results_df = results_df.append(pupil_size[pupil_size['laser'] == 1].groupby(['time', 'laser']).mean())
results_df['nickname'] = nickname
results_df.to_pickle(join(save_path, 'pupil_opto_probes.p'))
|
[
"guido.meijer@research.fchampalimaud.org"
] |
guido.meijer@research.fchampalimaud.org
|
cde20b6740ecff83f68335eb557cfb8d10af5ada
|
a6ce5f659a521b90c3a2ae07e52229c225d7a5e1
|
/news/migrations/0006_auto_20160605_0131.py
|
7337e39f976614b7be794061c2de60a443235e2e
|
[] |
no_license
|
rksksm/big-talk-india
|
bb7f5b57e788cf7ff9d18cc8a56f7431701f6a72
|
e28c432448ae247d8c2eff2d9864df0af1238257
|
refs/heads/master
| 2020-04-02T05:28:33.885097
| 2017-02-27T08:43:19
| 2017-02-27T08:43:19
| 65,835,006
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-04 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0005_auto_20160605_0013'),
]
operations = [
migrations.AddField(
model_name='card',
name='image_name',
field=models.TextField(default=''),
preserve_default=False,
),
migrations.AddField(
model_name='slides',
name='image_name',
field=models.TextField(default=''),
preserve_default=False,
),
]
|
[
"rksharma@bpl.netlink.com"
] |
rksharma@bpl.netlink.com
|
e92bb73592c96ad648bf9afbe5a0bd00d08745d8
|
7a3e9d88b21ef7e4b73d0632e08546d65a9df2ca
|
/modules/templates/BRCMS/RLP/anonymize.py
|
2928089c67d9122fb9f441fc5212a41cd94167e5
|
[
"MIT"
] |
permissive
|
nursix/drkcm
|
64eeb8ead30784d379d64a0ba2bc2c93bcafb8ca
|
7ec4b959d009daf26d5ca6ce91dd9c3c0bd978d6
|
refs/heads/master
| 2023-09-04T10:07:52.596460
| 2023-09-04T00:43:45
| 2023-09-04T00:43:45
| 97,222,001
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
"""
Anonymization Rules for BRCMS/RLP
License: MIT
"""
from uuid import uuid4
from gluon import current
def rlpcm_person_anonymize():
""" Rules to anonymize a case file """
auth = current.auth
s3db = current.s3db
ANONYMOUS = "-"
# Standard anonymizers
from s3db.pr import pr_address_anonymise as anonymous_address, \
pr_person_obscure_dob as obscure_dob
# Helper to produce an anonymous ID (pe_label)
anonymous_id = lambda record_id, f, v: "NN%s" % uuid4().hex[-8:].upper()
anonymous_code = lambda record_id, f, v: uuid4().hex
# Case Activity Default Closure
activity_closed = s3db.br_case_activity_default_status(closing=True)
# General rule for attachments
documents = ("doc_document", {
"key": "doc_id",
"match": "doc_id",
"fields": {"name": ("set", ANONYMOUS),
"file": "remove",
"url": "remove",
"comments": "remove",
},
"delete": True,
})
# Rule for direct offers (from the offerer perspective)
direct_offers = ("br_direct_offer", {
"key": "offer_id",
"match": "id",
"delete": True,
})
# Rules for user accounts
account = ("auth_user", {
"key": "id",
"match": "user_id",
"fields": {"id": auth.s3_anonymise_roles,
"first_name": ("set", "-"),
"last_name": "remove",
"email": anonymous_code,
"organisation_id": "remove",
"password": auth.s3_anonymise_password,
"deleted": ("set", True),
},
})
# Rules
rules = [
# Rules to remove PID from person record and case file
{"name": "default",
"title": "Names, IDs, Reference Numbers, Contact Information, Addresses",
"fields": {"first_name": ("set", ANONYMOUS),
"last_name": ("set", ANONYMOUS),
"pe_label": anonymous_id,
"date_of_birth": obscure_dob,
"comments": "remove",
},
"cascade": [("br_case", {
"key": "person_id",
"match": "id",
"fields": {"comments": "remove",
},
"cascade": [documents,
],
}),
("pr_contact", {
"key": "pe_id",
"match": "pe_id",
"fields": {"contact_description": "remove",
"value": ("set", ""),
"comments": "remove",
},
"delete": True,
}),
("pr_contact_emergency", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"relationship": "remove",
"phone": "remove",
"comments": "remove",
},
"delete": True,
}),
("pr_address", {
"key": "pe_id",
"match": "pe_id",
"fields": {"location_id": anonymous_address,
"comments": "remove",
},
}),
("pr_person_details", {
"key": "person_id",
"match": "id",
"fields": {"education": "remove",
"occupation": "remove",
},
}),
("pr_image", {
"key": "pe_id",
"match": "pe_id",
"fields": {"image": "remove",
"url": "remove",
"description": "remove",
},
"delete": True,
}),
("hrm_human_resource", {
"key": "person_id",
"match": "id",
"fields": {"status": ("set", 2),
"site_id": "remove",
"comments": "remove",
},
}),
],
},
# Rules to remove PID from activities and offers
{"name": "activities",
"title": "Needs Reports and Offers of Assistance",
"cascade": [("br_case_activity", {
"key": "person_id",
"match": "id",
"fields": {"location_id": anonymous_address,
"subject": ("set", ANONYMOUS),
"need_details": "remove",
"activity_details": "remove",
"outcome": "remove",
"comments": "remove",
"status_id": ("set", activity_closed),
},
"cascade": [documents,
],
}),
("br_assistance_offer", {
"key": "pe_id",
"match": "pe_id",
"fields": {"name": ("set", ANONYMOUS),
"description": "remove",
"capacity": "remove",
"location_id": anonymous_address,
"contact_name": "remove",
"contact_phone": "remove",
"contact_email": "remove",
"availability": ("set", "RTD"),
"comments": "remove",
},
"cascade": [direct_offers,
],
}),
],
},
# Rules to unlink and remove user account
{"name": "account",
"title": "User Account",
"cascade": [("pr_person_user", {
"key": "pe_id",
"match": "pe_id",
"cascade": [account,
],
"delete": True,
}),
],
},
]
return rules
|
[
"dominic@nursix.org"
] |
dominic@nursix.org
|
a9ffbe9fc364a586186dc4426bb3537894ca629f
|
139827a1d5ec231929af02398e8331cf5286431f
|
/pizza/urls.py
|
d4dc5cc85c645d37a672808d205f5eb194f41a98
|
[] |
no_license
|
proredkar31/pizza-site
|
79cf22fbf153b9b208e4221b342231d29aadefbc
|
c438f301a2175c96456c90d57579f70e3bec81d0
|
refs/heads/main
| 2023-04-24T15:05:31.555675
| 2021-05-12T06:06:45
| 2021-05-12T06:06:45
| 366,412,783
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home,name='pizza-home'),
path('create/', views.createPizza,name='pizza-create'),
path("delete/<str:delete_id>/", views.deletePizza,name='pizza-delete'),
path('update/<str:update_id>/', views.updatePizza,name='pizza-update'),
path('detail/<str:view_id>/', views.viewPizza,name='pizza-view'),
]
|
[
"proredkar31@gmail.com"
] |
proredkar31@gmail.com
|
315b4c0ac084b1d91f21ffdca3cc60a33bcb72f8
|
e027e663d850f84493c48aae04567b5f06558bc8
|
/gerenciamento_pet/app/views/funcionario_views.py
|
e769de31748e1946287191110e170c9f7a5ef89c
|
[] |
no_license
|
isaias0rt0n/django-sistema-de-gerenciamento-de-clinicas-pet
|
11e805db4d5db4c8d8de46f96e2b08d75a139c8c
|
a2c15b083febcd1802cbd1388cc288472b7fe744
|
refs/heads/main
| 2023-03-12T07:26:05.232748
| 2021-02-27T23:00:30
| 2021-02-27T23:00:30
| 336,663,155
| 0
| 0
| null | 2021-02-26T14:15:10
| 2021-02-07T00:06:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.hashers import make_password
from django.shortcuts import render, redirect
from ..entidades import funcionario
from ..forms.funcionario_forms import FuncionarioForm
from ..services import funcionario_service
@user_passes_test(lambda u: u.cargo == 2)
def listar_funcionarios(request):
funcionarios = funcionario_service.listar_funcionarios()
return render(request, 'funcionarios/lista_funcionarios.html', {'funcionarios': funcionarios})
@user_passes_test(lambda u: u.cargo == 2)
def inserir_funcionario(request):
if request.method == "POST":
form_funcionario = FuncionarioForm(request.POST)
if form_funcionario.is_valid():
nome = form_funcionario.cleaned_data["nome"]
nascimento = form_funcionario.cleaned_data["nascimento"]
cargo = form_funcionario.cleaned_data["cargo"]
username = form_funcionario.cleaned_data["username"]
password = make_password(form_funcionario.cleaned_data["password1"])
funcionario_novo = funcionario.Funcionario(nome=nome, nascimento=nascimento, cargo=cargo, usarname=username, password=password)
funcionario_service.cadastrar_funcionarios(funcionario_novo)
return redirect('listar_funcionarios')
else:
form_funcionario = FuncionarioForm()
return render(request, 'funcionarios/form_funcionario.html', {'form_funcionario': form_funcionario})
|
[
"isaiasorton@gmail.com"
] |
isaiasorton@gmail.com
|
f12ce4028eef8a875d3961103e02377c34e07746
|
7a1a65b0cda41ea204fad4848934db143ebf199a
|
/automatedprocesses_firststage/adsym_core_last60_test.py
|
c36727a8b0dc54c680f0dc9be9f8cf1ac23510a5
|
[] |
no_license
|
bpopovich44/ReaperSec
|
4b015e448ed5ce23316bd9b9e33966373daea9c0
|
22acba4d84313e62dbbf95cf2a5465283a6491b0
|
refs/heads/master
| 2021-05-02T18:26:11.875122
| 2019-06-22T15:02:09
| 2019-06-22T15:02:09
| 120,664,056
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,164
|
py
|
#!/usr/bin/python2.7
import json
from mysql.connector import MySQLConnection, Error
from python_dbconfig import read_db_config
import aol_api_R_test
def connect():
# """Gets AOL Data and writes them to a MySQL table"""
db = "mysql_sl"
api = "adsym"
# Connect To DB:
db_config = read_db_config(db)
try:
print('Connecting to database...')
conn = MySQLConnection(**db_config)
if conn.is_connected():
print('connection established.')
cursor = conn.cursor()
sql = "DROP TABLE IF EXISTS adsym_core_last60"
cursor.execute(sql)
sql = "CREATE TABLE adsym_core_last60 (date varchar(50), inventory_source varchar(255), ad_opportunities bigint, \
market_opportunities bigint, ad_attempts bigint, ad_impressions bigint, ad_errors bigint, ad_revenue decimal(15, 5), \
aol_cost decimal(15, 5), epiphany_gross_revenue decimal(15, 5), adsym_revenue decimal(15, 5), total_clicks int, \
iab_viewability_measurable_ad_impressions bigint, iab_viewable_ad_impressions bigint, platform int)"
cursor.execute(sql)
# calls get_access_token function and starts script
logintoken = aol_api_R_test.get_access_token(api)
print(logintoken)
result = aol_api_R_test.run_existing_report(logintoken, "161186")
#print(result)
info = json.loads(result)
#print(info)
for x in json.loads(result)['data']:
date = x['row'][0]
inventory_source = x['row'][1]
ad_opportunities = x['row'][2]
market_opportunities = x['row'][3]
ad_attempts = x['row'][4]
ad_impressions = x['row'][5]
ad_errors = x['row'][6]
ad_revenue = x['row'][7]
aol_cost = x['row'][7]
epiphany_gross_revenue = x['row'][7]
adsym_revenue = x['row'][7]
total_clicks = x['row'][8]
iab_viewability_measurable_ad_impressions = "0"
iab_viewable_ad_impressions = "0"
platform = '4'
list = (date, inventory_source, ad_opportunities, market_opportunities, ad_attempts, ad_impressions, \
ad_errors, ad_revenue, aol_cost, epiphany_gross_revenue, adsym_revenue, total_clicks, \
iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, platform)
#print(list)
sql = """INSERT INTO adsym_core_last60 VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s"*.20, "%s"*.56, \
"%s"*.24, "%s", "%s", "%s", "%s")""" % (date, inventory_source, ad_opportunities, market_opportunities, \
ad_attempts, ad_impressions, ad_errors, ad_revenue, aol_cost, epiphany_gross_revenue, adsym_revenue, \
total_clicks, iab_viewability_measurable_ad_impressions, iab_viewable_ad_impressions, platform)
cursor.execute(sql)
cursor.execute('commit')
else:
print('connection failed.')
except Error as error:
print(error)
finally:
conn.close()
print('Connection closed.')
if __name__ == '__main__':
connect()
|
[
"bpopovich4@gmail.com"
] |
bpopovich4@gmail.com
|
a69df7f43308fc5480efdd170214dcdb43a9bc12
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03288/s994428906.py
|
8f675834b3b195d3bece521191e038e01a6a4385
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def main():
n = int(input())
if n < 1200:
print("ABC")
elif 1200 <= n < 2800:
print("ARC")
else:
print("AGC")
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
fdf35068580c8dbfab42b28746ee4ea466f32f86
|
ce89a3a1e0fe4b5241c70ba7a3b8cb3bc5714901
|
/src/web/nonebot_hk_reporter/platform/monster_siren.py
|
1ad8bef2ec72b22b36ff964bbe5ff625b4a6a5c9
|
[
"MIT"
] |
permissive
|
FzWjScJ/CoolQBot
|
63d49480606e6a062adfea29d07dc7923be9b5f7
|
c02f0a103cfda9d3e52f6301fe57afc2bc738fe0
|
refs/heads/master
| 2023-07-16T17:54:03.503980
| 2021-08-29T09:43:33
| 2021-08-29T09:43:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,271
|
py
|
from typing import Any
import httpx
from .platform import NewMessage, NoTargetMixin
from ..types import RawPost
from ..post import Post
class MonsterSiren(NewMessage, NoTargetMixin):
categories = {}
platform_name = 'monster-siren'
enable_tag = False
enabled = True
is_common = False
schedule_type = 'interval'
schedule_kw = {'seconds': 30}
name = '塞壬唱片官网新闻'
@staticmethod
async def get_target_name(_) -> str:
return '塞壬唱片新闻'
async def get_sub_list(self, _) -> list[RawPost]:
async with httpx.AsyncClient() as client:
raw_data = await client.get(
'https://monster-siren.hypergryph.com/api/news')
return raw_data.json()['data']['list']
def get_id(self, post: RawPost) -> Any:
return post['cid']
def get_date(self, _) -> None:
return None
async def parse(self, raw_post: RawPost) -> Post:
url = f'https://monster-siren.hypergryph.com/info/{raw_post["cid"]}'
return Post('monster-siren',
text=raw_post['title'],
url=url,
target_name="塞壬唱片新闻",
compress=True,
override_use_pic=False)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c71470942db723c32638b7467d14c3df64db74a9
|
4b1789cb5dc76bab956cf6d167a005274c7e5751
|
/variant1.py
|
62c21c99294dd94377a3f4bba05e95e263d1a337
|
[] |
no_license
|
seka17/genome
|
59eeec2d8ac24817ecc718f7ff59a13ae1c3980c
|
45b842390e1cd2a2f06614bb620797ea604a4386
|
refs/heads/master
| 2021-01-10T04:09:01.283206
| 2016-03-29T23:51:21
| 2016-03-29T23:51:21
| 54,679,829
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
# -*- coding: utf-8 -*-
from math import log, e
from gmpy2 import mpfr
from gmpy2 import log, exp
from time import clock
import numpy as np
big_number = 300
def s(low, high):
t1 = clock()
a = to_sum(low, high)
print ("Time:", clock() - t1)
def to_sum(low, high):
# t1 = clock()
if low > high:
return 0.0
res = np.sum(np.log(np.arange(low, high + 1)))
# print ("Time:", clock() - t1)
return res
def to_sum2(low, high):
t1 = clock()
if low > high:
return 0.0
res = np.sum(np.log(np.arange(low, high + 1)))
print ("Time:", clock() - t1)
return res
def p_line(k, L, n, u):
t1 = clock()
summary = 0.0
tmin = k - 1
tmax = L - (n - k) - u
st = to_sum(L - n + 1, L)
st1 = to_sum(n - k, n)
st2 = ln_factorial(k - 1)
stat = exp(st1 - st - st2)
f = np.vectorize(variant, otypes=[np.float])
exp1 = np.vectorize(my_exp)
summary = np.sum(exp1(f(np.arange(tmin, tmax+1),u,k,n,L)))
# for t in xrange(tmin, tmax + 1):
# summary += exp(variant(t, u, k, n, L) - static + static1 - static2)
print ("P_line time:", clock()-t1)
return summary*stat
def my_exp(n):
return mpfr(e)**n
def variant(t, u, k, n, l):
# t1 = clock()
# a1 = to_sum(t - k + 2, t)
# print clock()-t1
# t1 = clock()
# a2 = to_sum(l - t - u - n + k + 1, l - t - u - 1)
# print clock()-t1
# t1 = clock()
# print clock()-t1
# res = to_sum(t - k + 2, t) + to_sum(l - t - u - n + k + 1, l - t - u - 1)
return to_sum(t - k + 2, t) + to_sum(l - t - u - n + k + 1, l - t - u - 1)
def ln_factorial(n):
# res = 0
# if n >= big_number:
# res = n* (np.log(n) - 1)
# else:
# res = np.sum(np.log(np.arange(1, n + 1)))
return np.sum(np.log(np.arange(1, n + 1)))
|
[
"ksi.seka@gmail.com"
] |
ksi.seka@gmail.com
|
022da20ca188bc1daaa3e234c57906a77887354a
|
e03a8219e9cf46603d88bb431d1598064b94794b
|
/dataset_loader/tfrecord_reader.py
|
4213de13af882a47ab6fc00403701b2bce2a5297
|
[
"MIT"
] |
permissive
|
pmorerio/useful_snippets
|
1f7541ab5b59b3bafcbd45ba9e4672ffec01c2d4
|
b2ec35d04ab8641d2fbc0445eb6776e9570849ba
|
refs/heads/master
| 2021-01-02T09:49:10.902426
| 2019-10-23T14:18:09
| 2019-10-23T14:18:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
import tensorflow as tf
import os
import time
import glob
SPLIT = 'train'
list_file = glob.glob1('/data/datasets/stl10/', '*{}*.tfrecords'.format(SPLIT))
list_file = [os.path.join('/data/datasets/stl10', i) for i in list_file]
raw_image_dataset = tf.data.TFRecordDataset(list_file)
def _parse_example(example_proto):
image_feature_description = {
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
'image_path': tf.io.VarLenFeature(tf.string),
}
# Parse the input tf.Example proto using the dictionary above.
example = tf.io.parse_single_example(example_proto, image_feature_description)
image = tf.cast(tf.io.decode_jpeg(example['image_raw']), tf.uint8)
label = tf.cast(example['label'], tf.int64)
return {
'image': image,
'label': label
}
raw_example = raw_image_dataset.map(_parse_example)
for e in range(5):
count = 1
t0 = time.time()
for example in raw_example:
print('{}/{} - {}'.format(e + 1, 5, count))
count += 1
print('Time for epoch: {}'.format(time.time() - t0))
|
[
"paolor82@gmail.com"
] |
paolor82@gmail.com
|
d0b528903a9a1e72d759138a3f5ab4c43d124a28
|
494b763f2613d4447bc0013100705a0b852523c0
|
/cnn/answer/M1_cp32_3_m_d512.py
|
f18fed9ba897d5083554c7a56ca3c5934c11fd9c
|
[] |
no_license
|
DL-DeepLearning/Neural-Network
|
dc4a2dd5efb1b4ef1a3480a1df6896c191ae487f
|
3160c4af78dba6bd39552bb19f09a699aaab8e9e
|
refs/heads/master
| 2021-06-17T05:16:22.583816
| 2017-06-07T01:21:39
| 2017-06-07T01:21:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,704
|
py
|
# libraries & packages
import numpy
import math
import sys
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
from os import listdir
from os.path import isfile, join
# this function is provided from the official site
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
# from PIL import Image
# def ndarray2image (arr_data, image_fn):
# img = Image.fromarray(arr_data, 'RGB')
# img.save(image_fn)
from scipy.misc import imsave
def ndarray2image (arr_data, image_fn):
imsave(image_fn, arr_data)
# set dataset path
dataset_path = '../cifar_10/'
# define the information of images which can be obtained from official website
height, width, dim = 32, 32, 3
classes = 10
''' read training data '''
# get the file names which start with "data_batch" (training data)
train_fns = [fn for fn in listdir(dataset_path) if isfile(join(dataset_path, fn)) & fn.startswith("data_batch")]
# list sorting
train_fns.sort()
# make a glace about the training data
fn = train_fns[0]
raw_data = unpickle(dataset_path + fn)
# type of raw data
type(raw_data)
# <type 'dict'>
# check keys of training data
raw_data_keys = raw_data.keys()
# output ['data', 'labels', 'batch_label', 'filenames']
# check dimensions of ['data']
raw_data['data'].shape
# (10000, 3072)
# concatenate pixel (px) data into one ndarray [img_px_values]
# concatenate label data into one ndarray [img_lab]
img_px_values = 0
img_lab = 0
for fn in train_fns:
raw_data = unpickle(dataset_path + fn)
if fn == train_fns[0]:
img_px_values = raw_data['data']
img_lab = raw_data['labels']
else:
img_px_values = numpy.vstack((img_px_values, raw_data['data']))
img_lab = numpy.hstack((img_lab, raw_data['labels']))
print img_px_values
print img_lab
c = raw_input("...")
# convert 1d-ndarray (0:3072) to 3d-ndarray(32,32,3)
X_train = numpy.asarray([numpy.dstack((r[0:(width*height)].reshape(height,width),
r[(width*height):(2*width*height)].reshape(height,width),
r[(2*width*height):(3*width*height)].reshape(height,width)
)) for r in img_px_values])
Y_train = np_utils.to_categorical(numpy.array(img_lab), classes)
# check is same or not!
# lab_eql = numpy.array_equal([(numpy.argmax(r)) for r in Y_train], numpy.array(img_lab))
# draw one image from the pixel data
ndarray2image(X_train[0],"test_image.png")
# print the dimension of training data
print 'X_train shape:', X_train.shape
print 'Y_train shape:', Y_train.shape
''' read testing data '''
# get the file names which start with "test_batch" (testing data)
test_fns = [fn for fn in listdir(dataset_path) if isfile(join(dataset_path, fn)) & fn.startswith("test_batch")]
# read testing data
raw_data = unpickle(dataset_path + fn)
# type of raw data
type(raw_data)
# check keys of testing data
raw_data_keys = raw_data.keys()
# ['data', 'labels', 'batch_label', 'filenames']
img_px_values = raw_data['data']
# check dimensions of data
print "dim(data)", numpy.array(img_px_values).shape
# dim(data) (10000, 3072)
img_lab = raw_data['labels']
# check dimensions of labels
print "dim(labels)",numpy.array(img_lab).shape
# dim(data) (10000,)
X_test = numpy.asarray([numpy.dstack((r[0:(width*height)].reshape(height,width),
r[(width*height):(2*width*height)].reshape(height,width),
r[(2*width*height):(3*width*height)].reshape(height,width)
)) for r in img_px_values])
Y_test = np_utils.to_categorical(numpy.array(raw_data['labels']), classes)
# scale image data to range [0, 1]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255.0
X_test /= 255.0
# print the dimension of training data
print 'X_test shape:', X_test.shape
print 'Y_test shape:', Y_test.shape
# normalize inputs from 0-255 to 0.0-1.0
'''CNN model'''
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=X_train[0].shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(classes))
model.add(Activation('softmax'))
'''setting optimizer'''
learning_rate = 0.01
learning_decay = 0.01/32
sgd = SGD(lr=learning_rate, decay=learning_decay, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# check parameters of every layers
model.summary()
''' training'''
batch_size = 128
epoch = 32
# validation data comes from training data
# model.fit(X_train, Y_train, batch_size=batch_size,
# nb_epoch=epoch, validation_split=0.1, shuffle=True)
# validation data comes from testing data
fit_log = model.fit(X_train, Y_train, batch_size=batch_size,
nb_epoch=epoch, validation_data=(X_test, Y_test), shuffle=True)
'''saving training history'''
import csv
history_fn = 'cp32_3_m_d512.csv'
with open(history_fn, 'wb') as csv_file:
w = csv.writer(csv_file)
temp = numpy.array(fit_log.history.values())
w.writerow(fit_log.history.keys())
for i in range(temp.shape[1]):
w.writerow(temp[:,i])
'''saving model'''
from keras.models import load_model
model.save('cp32_3_m_d512.h5')
del model
'''loading model'''
model = load_model('cp32_3_m_d512.h5')
'''prediction'''
pred = model.predict_classes(X_test, batch_size, verbose=0)
ans = [numpy.argmax(r) for r in Y_test]
# caculate accuracy rate of testing data
acc_rate = sum(pred-ans == 0)/float(pred.shape[0])
print "Accuracy rate:", acc_rate
|
[
"teinhonglo@gmail.com"
] |
teinhonglo@gmail.com
|
88743421203b00b54d21f449bdbbc3fddf47d0a0
|
faea85c8583771933ffc9c2807aacb59c7bd96e6
|
/python/pencilnew/visu/internal/MinorSymLogLocator.py
|
1e83f3c925453c62d8eeb6f112a86c81dcdb0538
|
[] |
no_license
|
JosephMouallem/pencil_code
|
1dc68377ecdbda3bd3dd56731593ddb9b0e35404
|
624b742369c09d65bc20fdef25d2201cab7f758d
|
refs/heads/master
| 2023-03-25T09:12:02.647416
| 2021-03-22T02:30:54
| 2021-03-22T02:30:54
| 350,038,447
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
##
## symlog tick helper
from matplotlib.ticker import Locator
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
import numpy as np
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
majorlocs = np.append(majorlocs, majorlocs[-1]*10.)
majorlocs = np.append(majorlocs[0]*0.1, majorlocs)
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in xrange(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
|
[
"j3mouall@uwaterloo.ca"
] |
j3mouall@uwaterloo.ca
|
8435fd79cd5b950c38d198c5c94e5af387273be4
|
c29cebe6509972e95f3ae8172a38a5893a651ec3
|
/test_app/models.py
|
56b9a0019a49f7732223a9da3d1b906342773289
|
[] |
no_license
|
masa-rock/test210515
|
8c3605ed7c41063f5818ba787cf599880eec4cfa
|
8286c554b723e1c022732e28a051b3cf8c0f8b56
|
refs/heads/master
| 2023-06-10T22:41:10.212053
| 2021-06-27T14:56:10
| 2021-06-27T14:56:10
| 367,621,109
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,803
|
py
|
from django.db import models
from django.urls import reverse_lazy
class Realsimulation_v1(models.Model):
id = models.IntegerField(primary_key=True)
Date = models.DateTimeField()
Open = models.FloatField(null=True,blank=True)
High = models.FloatField(null=True,blank=True)
Low = models.FloatField(null=True,blank=True)
Close = models.FloatField(null=True,blank=True)
Volume = models.IntegerField(null=True,blank=True)
Currency = models.CharField(max_length=255)
code = models.CharField(max_length=255)
rsi = models.FloatField(null=True,blank=True)
days50 = models.FloatField(null=True,blank=True)
days150 = models.FloatField(null=True,blank=True)
days200 = models.FloatField(null=True,blank=True)
weeks20 = models.FloatField(null=True,blank=True)
stdev = models.FloatField(null=True,blank=True)
BB = models.FloatField(null=True,blank=True)
buy = models.CharField(max_length=255)
sell = models.CharField(max_length=255)
version = models.CharField(max_length=255)
class Fanda(models.Model):
id = models.IntegerField(primary_key=True)
code = models.CharField(max_length=255)
name = models.CharField(max_length=255)
y_k_0 = models.CharField(max_length=255)
y_u_0 = models.IntegerField(null=True,blank=True)
y_er_0 = models.IntegerField(null=True,blank=True)
y_kr_0 = models.IntegerField(null=True,blank=True)
y_e_0 = models.IntegerField(null=True,blank=True)
y_k_1 = models.CharField(max_length=255)
y_u_1 = models.IntegerField(null=True,blank=True)
y_er_1 = models.IntegerField(null=True,blank=True)
y_kr_1 = models.IntegerField(null=True,blank=True)
y_e_1 = models.IntegerField(null=True,blank=True)
y_k_2 = models.CharField(max_length=255)
y_u_2 = models.IntegerField(null=True,blank=True)
y_er_2 = models.IntegerField(null=True,blank=True)
y_kr_2 = models.IntegerField(null=True,blank=True)
y_e_2 = models.IntegerField(null=True,blank=True)
y_k_3 = models.CharField(max_length=255)
y_u_3 = models.IntegerField(null=True,blank=True)
y_er_3 = models.IntegerField(null=True,blank=True)
y_kr_3 = models.IntegerField(null=True,blank=True)
y_e_3 = models.IntegerField(null=True,blank=True)
y_k_4 = models.CharField(max_length=255)
y_u_4 = models.IntegerField(null=True,blank=True)
y_er_4 = models.IntegerField(null=True,blank=True)
y_kr_4 = models.IntegerField(null=True,blank=True)
y_e_4 = models.IntegerField(null=True,blank=True)
y_k_5 = models.CharField(max_length=255)
y_u_5 = models.IntegerField(null=True,blank=True)
y_er_5 = models.IntegerField(null=True,blank=True)
y_kr_5 = models.IntegerField(null=True,blank=True)
y_e_5 = models.IntegerField(null=True,blank=True)
y_k_6 = models.CharField(max_length=255)
y_u_6 = models.IntegerField(null=True,blank=True)
y_er_6 = models.IntegerField(null=True,blank=True)
y_kr_6 = models.IntegerField(null=True,blank=True)
y_e_6 = models.IntegerField(null=True,blank=True)
y_k_7 = models.CharField(max_length=255)
y_u_7 = models.IntegerField(null=True,blank=True)
y_er_7 = models.IntegerField(null=True,blank=True)
y_kr_7 = models.IntegerField(null=True,blank=True)
y_e_7 = models.IntegerField(null=True,blank=True)
y_k_8 = models.CharField(max_length=255)
y_u_8 = models.IntegerField(null=True,blank=True)
y_er_8 = models.IntegerField(null=True,blank=True)
y_kr_8 = models.IntegerField(null=True,blank=True)
y_e_8 = models.IntegerField(null=True,blank=True)
y_k_9 = models.CharField(max_length=255)
y_u_9 = models.IntegerField(null=True,blank=True)
y_er_9 = models.IntegerField(null=True,blank=True)
y_kr_9 = models.IntegerField(null=True,blank=True)
y_e_9 = models.IntegerField(null=True,blank=True)
y_k_10 = models.CharField(max_length=255)
y_u_10 = models.IntegerField(null=True,blank=True)
y_er_10 = models.IntegerField(null=True,blank=True)
y_kr_10 = models.IntegerField(null=True,blank=True)
y_e_10 = models.IntegerField(null=True,blank=True)
y_k_11 = models.CharField(max_length=255)
y_u_11 = models.IntegerField(null=True,blank=True)
y_er_11 = models.IntegerField(null=True,blank=True)
y_kr_11 = models.IntegerField(null=True,blank=True)
y_e_11 = models.IntegerField(null=True,blank=True)
s_k_0 = models.CharField(max_length=255)
s_u_0 = models.IntegerField(null=True,blank=True)
s_er_0 = models.IntegerField(null=True,blank=True)
s_kr_0 = models.IntegerField(null=True,blank=True)
s_e_0 = models.IntegerField(null=True,blank=True)
s_k_1 = models.CharField(max_length=255)
s_u_1 = models.IntegerField(null=True,blank=True)
s_er_1 = models.IntegerField(null=True,blank=True)
s_kr_1 = models.IntegerField(null=True,blank=True)
s_e_1 = models.IntegerField(null=True,blank=True)
s_k_2 = models.CharField(max_length=255)
s_u_2 = models.IntegerField(null=True,blank=True)
s_er_2 = models.IntegerField(null=True,blank=True)
s_kr_2 = models.IntegerField(null=True,blank=True)
s_e_2 = models.IntegerField(null=True,blank=True)
s_k_3 = models.CharField(max_length=255)
s_u_3 = models.IntegerField(null=True,blank=True)
s_er_3 = models.IntegerField(null=True,blank=True)
s_kr_3 = models.IntegerField(null=True,blank=True)
s_e_3 = models.IntegerField(null=True,blank=True)
s_k_4 = models.CharField(max_length=255)
s_u_4 = models.IntegerField(null=True,blank=True)
s_er_4 = models.IntegerField(null=True,blank=True)
s_kr_4 = models.IntegerField(null=True,blank=True)
s_e_4 = models.IntegerField(null=True,blank=True)
s_k_5 = models.CharField(max_length=255)
s_u_5 = models.IntegerField(null=True,blank=True)
s_er_5 = models.IntegerField(null=True,blank=True)
s_kr_5 = models.IntegerField(null=True,blank=True)
s_e_5 = models.IntegerField(null=True,blank=True)
s_k_6 = models.CharField(max_length=255)
s_u_6 = models.IntegerField(null=True,blank=True)
s_er_6 = models.IntegerField(null=True,blank=True)
s_kr_6 = models.IntegerField(null=True,blank=True)
s_e_6 = models.IntegerField(null=True,blank=True)
s_k_7 = models.CharField(max_length=255)
s_u_7 = models.IntegerField(null=True,blank=True)
s_er_7 = models.IntegerField(null=True,blank=True)
s_kr_7 = models.IntegerField(null=True,blank=True)
s_e_7 = models.IntegerField(null=True,blank=True)
s_k_8 = models.CharField(max_length=255)
s_u_8 = models.IntegerField(null=True,blank=True)
s_er_8 = models.IntegerField(null=True,blank=True)
s_kr_8 = models.IntegerField(null=True,blank=True)
s_e_8 = models.IntegerField(null=True,blank=True)
s_k_9 = models.CharField(max_length=255)
s_u_9 = models.IntegerField(null=True,blank=True)
s_er_9 = models.IntegerField(null=True,blank=True)
s_kr_9 = models.IntegerField(null=True,blank=True)
s_e_9 = models.IntegerField(null=True,blank=True)
s_k_10 = models.CharField(max_length=255)
s_u_10 = models.IntegerField(null=True,blank=True)
s_er_10 = models.IntegerField(null=True,blank=True)
s_kr_10 = models.IntegerField(null=True,blank=True)
s_e_10 = models.IntegerField(null=True,blank=True)
s_k_11 = models.CharField(max_length=255)
s_u_11 = models.IntegerField(null=True,blank=True)
s_er_11 = models.IntegerField(null=True,blank=True)
s_kr_11 = models.IntegerField(null=True,blank=True)
s_e_11 = models.IntegerField(null=True,blank=True)
s_k_12 = models.CharField(max_length=255)
s_u_12 = models.IntegerField(null=True,blank=True)
s_er_12 = models.IntegerField(null=True,blank=True)
s_kr_12 = models.IntegerField(null=True,blank=True)
s_e_12 = models.IntegerField(null=True,blank=True)
s_k_13 = models.CharField(max_length=255)
s_u_13 = models.IntegerField(null=True,blank=True)
s_er_13 = models.IntegerField(null=True,blank=True)
s_kr_13 = models.IntegerField(null=True,blank=True)
s_e_13 = models.IntegerField(null=True,blank=True)
s_k_14 = models.CharField(max_length=255)
s_u_14 = models.IntegerField(null=True,blank=True)
s_er_14 = models.IntegerField(null=True,blank=True)
s_kr_14 = models.IntegerField(null=True,blank=True)
s_e_14 = models.IntegerField(null=True,blank=True)
s_k_15 = models.CharField(max_length=255)
s_u_15 = models.IntegerField(null=True,blank=True)
s_er_15 = models.IntegerField(null=True,blank=True)
s_kr_15 = models.IntegerField(null=True,blank=True)
s_e_15 = models.IntegerField(null=True,blank=True)
s_k_16 = models.CharField(max_length=255)
s_u_16 = models.IntegerField(null=True,blank=True)
s_er_16 = models.IntegerField(null=True,blank=True)
s_kr_16 = models.IntegerField(null=True,blank=True)
s_e_16 = models.IntegerField(null=True,blank=True)
s_k_17 = models.CharField(max_length=255)
s_u_17 = models.IntegerField(null=True,blank=True)
s_er_17 = models.IntegerField(null=True,blank=True)
s_kr_17 = models.IntegerField(null=True,blank=True)
s_e_17 = models.IntegerField(null=True,blank=True)
s_k_18 = models.CharField(max_length=255)
s_u_18 = models.IntegerField(null=True,blank=True)
s_er_18 = models.IntegerField(null=True,blank=True)
s_kr_18 = models.IntegerField(null=True,blank=True)
s_e_18 = models.IntegerField(null=True,blank=True)
s_k_19 = models.CharField(max_length=255)
s_u_19 = models.IntegerField(null=True,blank=True)
s_er_19 = models.IntegerField(null=True,blank=True)
s_kr_19 = models.IntegerField(null=True,blank=True)
s_e_19 = models.IntegerField(null=True,blank=True)
def __str__(self) :
return self.code
class Realsimulation_result_v1(models.Model):
Code = models.CharField(max_length=255)
Buy_date = models.DateTimeField()
Buy_cost = models.FloatField(null=True,blank=True)
Day50_day200 = models.FloatField(null=True,blank=True)
Sell_date = models.DateTimeField()
Sell_cost = models.FloatField(null=True,blank=True)
Profit_and_lost =models.FloatField(null=True,blank=True)
Sell_pattern = models.CharField(max_length=255)
Version = models.CharField(max_length=255)
No = models.IntegerField(null=True,blank=True)
Id = models.IntegerField(primary_key=True)
def __str__(self) :
return str(self.Code)
class Category(models.Model):
name = models.CharField(max_length=255,blank=False,null=False,unique=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=255,blank=False,null=False,unique=True)
def __str__(self):
return self.name
class Post(models.Model):
created = models.DateTimeField(auto_now_add=True,editable=False,blank=False,null=False)
updated = models.DateTimeField(auto_now=True,editable=False,blank=False,null=False)
title = models.CharField(max_length=255,blank=False,null=False)
body = models.TextField(blank=True,null=False)
category = models.ForeignKey(Category,on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag,blank=True)
def __str__(self):
return self.title
def get_absolute_url(self):
# reverse_lazy(urlconf,args=[id])はテンプレートでいう{% url "urlconf" id%}
return reverse_lazy("test_app:detail",args=[self.id])
|
[
"yoshimuramasato@yoshimuramasatonoMacBook-Air.local"
] |
yoshimuramasato@yoshimuramasatonoMacBook-Air.local
|
036b5f311d7c71f462bc035c75e1f709fda7d0c1
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-582.py
|
53920cabcfe9a30a69d9844b54243579ff6513d3
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,755
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return $Exp.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
09800dd699671fa5488fc29517a420cb80ffda21
|
4d2eeb1077a23694bce737269616d3dedf7de34c
|
/기본 수학 2/2581번 소수.py
|
1115dfe5002cabcd6cb52af2d0533a14a4557cba
|
[] |
no_license
|
askges20/baekjoon_code
|
ae7d7a3d44ccecaafef0b5b43c7978974b5bd5db
|
46d2a3d305f60df9607a48c4660d63c71d835bba
|
refs/heads/main
| 2023-06-04T02:31:13.720700
| 2021-06-20T05:09:50
| 2021-06-20T05:09:50
| 354,603,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
def isPrime(num):
cnt = 0
if num == 1:
return False
for k in range(1, num+1):
if num % k == 0:
cnt += 1
if cnt == 2:
return True
else:
return False
min = int(input())
max = int(input())
sum = 0
minPrime = 0
for j in range(min, max+1):
if isPrime(int(j)):
sum += j
if minPrime == 0:
minPrime = j
if sum != 0:
print(sum)
print(minPrime)
else:
print(-1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
e2777af9f639044887deda42904df167a66306de
|
d0b609e12fbcc58df13d834478018b2cf989be77
|
/web/app/routes.py
|
f48cc3a3952c7aa4f5c33e244969ae76a6f64367
|
[] |
no_license
|
TimurAshrafzyanov/PythonWebProject
|
b5a17b358213d9ae22e14df3c681506c0a4a50a1
|
363c58a2e380243f9e7c5103a467d1b9eeab185f
|
refs/heads/master
| 2022-07-26T07:22:27.592654
| 2020-05-20T14:35:37
| 2020-05-20T14:35:37
| 262,716,860
| 0
| 0
| null | 2020-05-20T14:35:39
| 2020-05-10T05:11:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from flask import render_template, redirect, request
from app import app
from app.forms import StartForm
from app.functions import get_information
@app.route('/', methods=['GET', 'POST'])
def start():
form = StartForm()
if form.validate_on_submit():
return redirect('/choise')
return render_template('start.html', form=form)
@app.route('/choise', methods=['GET', 'POST'])
def choose():
if request.method == 'POST':
city = request.form['city']
return redirect('/cities?city={}'.format(city))
return render_template('choise.html')
@app.route('/cities')
def city_searching():
current_city = request.args.get('city')
if not current_city:
current_city = 'kazan'
args = get_information(current_city)
if args['is_error']:
return render_template('final.html', error=True, message=args['error'])
return render_template('final.html', error=False, name=args['name'],
temp=args['temperature'], pres=args['pressure'], hum=args['humidity'])
|
[
"Timur-atm@mail.ru"
] |
Timur-atm@mail.ru
|
0f06963dc4565fec53882f9f5c4bd9329b4f3d85
|
da1c56016a69b68fdb9010130a650778c363b4fa
|
/arrays/merge_sorted_arrays.py
|
3dea6cecbe1c118e7b6f0577b3d66ebd297d8696
|
[] |
no_license
|
sandeepbaldawa/Programming-Concepts-Python
|
519ab60fc9ca5bf2f52e098bab0885218a1d9411
|
178a1d26443066f469987467bda0390f5422561e
|
refs/heads/master
| 2023-03-06T21:35:14.585039
| 2023-02-26T04:38:44
| 2023-02-26T04:38:44
| 40,800,329
| 12
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
idx1, idx2 = m-1, n-1
while (idx1 >= 0 and idx2 >= 0):
if nums1[idx1] > nums2[idx2]:
nums1[idx1 + idx2 + 1] = nums1[idx1]
idx1 -= 1
else:
nums1[idx1 + idx2 + 1] = nums2[idx2]
idx2 -= 1
while(idx2 >= 0):
nums1[idx1] = nums2[idx2]
idx2 -= 1
|
[
"noreply@github.com"
] |
noreply@github.com
|
942bad6052ac0e1168ff2fd57652246ca6e3a2fd
|
3416464630bc3322dd677001811de1a6884c7dd0
|
/others/q14_longest_common_prefix/__init__.py
|
2e8fe4c2d56cc68aee767b789c99e32124d7ef6d
|
[] |
no_license
|
ttomchy/LeetCodeInAction
|
f10403189faa9fb21e6a952972d291dc04a01ff8
|
14a56b5eca8d292c823a028b196fe0c780a57e10
|
refs/heads/master
| 2023-03-29T22:10:04.324056
| 2021-03-25T13:37:01
| 2021-03-25T13:37:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
FileName: __init__.py.py
Description:
Author: Barry Chow
Date: 2020/12/4 10:45 PM
Version: 0.1
"""
|
[
"zhouenguo@163.com"
] |
zhouenguo@163.com
|
d56f4a1195c7653d1f55388c4416fa51b7b127c5
|
278d81ef98513edb8f9a0a4407936ba7db01b1e3
|
/scripts/quarel.py
|
5555415872a786273fba5b886a6c5ff23093d9b5
|
[] |
no_license
|
SaurabhGodse/Major_Project
|
5e121c3fb26d9e980669ea71d6619199f584c83a
|
5127d1d83a4c73310a17b2b1eb4f849d07f2230d
|
refs/heads/master
| 2022-11-14T00:28:09.771364
| 2020-07-12T10:25:54
| 2020-07-12T10:25:54
| 277,986,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,644
|
py
|
import json
import nltk
# import string
from nltk import *
import pandas as pd
# nltk.download('wordnet')
from nltk.corpus import wordnet
import pickle
verbs = ['VB', 'VBG', 'VBD', 'VBN', 'VB', 'VBP', 'VBZ']
adjectives = ['JJ', 'JJR', 'JJS', 'RB', 'RBR', 'RBS']
lemmatizer = WordNetLemmatizer()
# keys = pd.read_csv("dictionary_keys.csv")
f = open("glossary.txt", "r")
key_set = set()
for key in f:
print(key)
key_set.add(key.strip())
print(key_set)
# for key in keys.iloc[:, 0]:
# key_set.add(key)
# print(key_set)
def filt(x):
return x.label()=='NP'
def alphaconv(s):
return "".join([i for i in s if(i.isalpha())])
def tokenize_lemmitize(text, lemmitized_text, name, definition, word):
# global key_set
taggedText = pos_tag(text.split())
# print("this is text : ", text)
# print("this is tagged text : ", taggedText)
# lemmitized_text = []
for a, b in taggedText:
if b in verbs:
temp = alphaconv(lemmatizer.lemmatize(a, pos = 'v'))
# if temp in key_set:
# # print("this is temp : ", temp)
# #fres.write("\n")
# #fres.write(word + "\n")
# #fres.write(name + "\n")
# #fres.write(definition + "\n")
# #fres.write("The word found in dictionary --> " + temp)
# #fres.write("\n")
# print(word)
# print(name)
# print(definition)
# print(taggedText)
lemmitized_text.append(temp)
elif b in adjectives:
temp = alphaconv(lemmatizer.lemmatize(a, pos = 'a'))
# if temp in key_set:
# #fres.write("\n")
# #fres.write(word + "\n")
# #fres.write(name + "\n")
# #fres.write(definition + "\n")
# #fres.write("The word found in dictionary --> " + temp)
# #fres.write("\n")
# # print("this is temp : ", temp)
# print(word)
# print(name)
# print(definition)
# print(taggedText)
lemmitized_text.append(temp)
else:
temp = alphaconv(lemmatizer.lemmatize(a))
# if temp in key_set:
# #fres.write("\n")
# #fres.write(word + "\n")
# #fres.write(name + "\n")
# #fres.write(definition + "\n")
# #fres.write("The word found in dictionary --> " + temp)
# #fres.write("\n")
# # print("this is temp1 : ", temp)
# print(word)
# print(name)
# print(definition)
# print(taggedText)
# print("this is temp : ", temp)
lemmitized_text.append(temp)
# return lemmitized_text
# def filtsubtree(y):
# return y.label() in ['VB', 'VBG', 'VBD', 'VBN', 'VB', 'VBP', 'VBZ']
# f1 = open("PhyConcepts.txt", 'w')
train_set = dict()
file_list = ["quarel-v1-train.json"]
entity_set_list = []
#fres = open("new_result.txt", "w")
for file in file_list:
with open(file, 'r') as f:
entity_list = []
line_number = 1
for line in f:
# print("line : ", line_number)
data = json.loads(line)
# text = "Marcus's son took the pool ball off the pool table to play with it around the house. The son noticed that the pool ball rolled a longer distance on the marble floor than on the shag rug. The smoother surface is (A) the marble floor or (B) the shag rug"
text = data['question']
# f1.write("Question number : " + str(line_number) + "\n" + text + "\n")
# f1.write("-------->" + "\n")
# print(text)
sentence = text.split(".")
# myTagger = StanfordPOSTagger("/home/saurabh/Downloads/stanford-postagger-full-2018-10-16/models/english-left3words-distsim.tagger", "/home/saurabh/Downloads/stanford-postagger-full-2018-10-16/stanford-postagger-3.9.2.jar")
# taggedText = myTagger.tag(text.split())
result = ""
lemmitized_text = []
if line_number == 3:
print(text)
#fres.write("Question --> \n" + text)
#fres.write("\n\n\n")
#fres.write("Subtrees --> \n")
for text in sentence:
taggedText = pos_tag(text.split())
# print(taggedText)
grammar = "NP: {(<JJ>*<RB>*)*(<VB>|<VBG>|<VBD>|<VBN>|<VB>|<VBP>|<VBZ>)+(<IN>*<DT>*<PRP>*<JJR>*<JJ>*<NN>*<RP>*<RBR>*<RBS>*<TO>*<RB>*)+}"
cp = nltk.RegexpParser(grammar)
res = cp.parse(taggedText)
# result += str(res)
# print(res)
for subtree in res.subtrees(filter = filt): # Generate all subtrees
# if line_number == 3:
#fres.write(str(subtree))
#fres.write("\n")
# print(subtree)
for a, b in subtree.leaves():
if b in verbs or b in adjectives:
# print("--------------------------------------------------")
# print(a, b)
syns = wordnet.synsets(a)
for i in syns:
name = i.name()
definition = i.definition()
# print("this is name : ", name)
# print("this is definition : ", definition)
# print("name tokenize_lemmitize -- >")
# if line_number == 3:
tokenize_lemmitize(name.split(".")[0], lemmitized_text, name, definition, a)
# print("definition tokenize_lemmitize -- >")
tokenize_lemmitize(definition, lemmitized_text, name, definition, a)
# print("\n\n\n")
# f1.write(str(key_set.intersection(set(lemmitized_text))))
# if line_number == 3:
#fres.write("\n\nResults --> \n")
curr_res = str(key_set.intersection(set(lemmitized_text)))
#fres.write(curr_res)
print(line_number)
train_set[line_number] = curr_res
# break
# f1.write("\n\n\n")
line_number += 1
# break
# if(line_number == 10):
# break
# entity_set_list.append(set(entity_list))
print(train_set)
pickle.dump(train_set, open("train_set", "wb"))
# entity_set = set()
# for s in entity_set_list:
# entity_set = entity_set | s
# f = open("/mnt/dell/garima/verbs_in_quarel.txt", "w")
# for item in entity_set:
# f.write("%s\n"%item)
# f.close()
# <VBD>
# <VBG>
# <VBN>
# <VB>
# <VBP>
# <VBZ>
# print(alphaconv("saurabh 34234 32"))
|
[
"godsesaurabhgsm@gmail.com"
] |
godsesaurabhgsm@gmail.com
|
78013710f300eb4c9fbec08d64ea520f781fd729
|
97a14d0e3143fd59442e42d79632285e324fefb8
|
/Python_exercises/19-6_parsowanie/19-6-2_parsowanie.py
|
65be4a743ca894ce399f8b259fb5b9e942f32734
|
[] |
no_license
|
olszewskip/kodolamacz_harasym
|
8bf732cb1f69e599854157c97458b688b605f43f
|
7b319a7a017bf79f4235734f49574a3a9c7841e6
|
refs/heads/master
| 2020-03-19T16:27:13.873337
| 2018-06-23T16:48:56
| 2018-06-23T16:48:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
from pprint import pprint
FILENAME = './hosts'
with open(FILENAME) as file:
hosts = {}
for line in file:
line = line.strip()
if not line or line[0] == '#':
continue
ip, *host_names = line.split()
prot = '.' in ip
decode_prot = {False: 'ipv6', True: 'ipv4'}
protocol = decode_prot[prot]
if ip not in hosts:
hosts[ip] = {
'ip': ip,
'hostnames': host_names,
'protocol': protocol
}
else:
hosts[ip]['hostnames'].extend(host_names)
pprint(list(hosts.values()))
|
[
"olspaw@gmail.com"
] |
olspaw@gmail.com
|
9fbd4414790f6c01e7a84591c3d5093412933571
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/1293ec85dd68dfc31183ae9ec654333301103660-<test_distribution_version>-fix.py
|
fb83f3cec75b577fead88273ad25d671dc08b97c
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
@pytest.mark.parametrize('testcase', TESTSETS, ids=(lambda x: x['name']))
def test_distribution_version(testcase):
'tests the distribution parsing code of the Facts class\n\n testsets have\n * a name (for output/debugging only)\n * input files that are faked\n * those should be complete and also include "irrelevant" files that might be mistaken as coming from other distributions\n * all files that are not listed here are assumed to not exist at all\n * the output of pythons platform.dist()\n * results for the ansible variables distribution* and os_family\n '
from ansible.module_utils import basic
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={
}))
with swap_stdin_and_argv(stdin_data=args):
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(argument_spec=dict())
_test_one_distribution(facts, module, testcase)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
c5d381413e74e6f10fc0ce1f13f915b255360198
|
8ffe2fb0e378afd97de1889eefcd4d3b95d659b6
|
/Django/Django/settings.py
|
89a6fb3903ba23b859743fe91b66ec6d25f258ae
|
[] |
no_license
|
teemutoikkanen/SulkapalloDjango
|
820b7eefb44fff54afb1d5251c0d05d877469774
|
04aeb2e08f305742b9dd127210d05f889bdc247d
|
refs/heads/master
| 2020-03-12T12:21:47.890845
| 2018-04-22T23:46:29
| 2018-04-22T23:46:29
| 130,616,673
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,689
|
py
|
"""
Django settings for Django project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=5tfi2ss!xoj6e%jkgir8=yt+q5vg9(b)8ob9wlftsyrf$1eo&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '10.0.2.2']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'user.apps.UserConfig',
'ilmoitukset.apps.IlmoituksetConfig',
'haasteet.apps.HaasteetConfig',
'matsit.apps.MatsitConfig',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
# 'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
# ]
}
REST_SESSION_LOGIN = False
|
[
"teemu.toikkanen@aalto.fi"
] |
teemu.toikkanen@aalto.fi
|
a50a1ec030f76876a6d96c3e54bb2903824f887a
|
416fc7edb7e19cf3d649b2cb7cc83981ebf69a59
|
/tests/test_gdef_importer.py
|
185ff37b54fb833f9349ec97cb53e610ccba7c26
|
[
"MIT"
] |
permissive
|
natter1/gdef_reader
|
2af9ecc6c3a3431abcaa0f711507a33c06126f49
|
6f161ba3ae82449e10238ae92f8e06bd785f2105
|
refs/heads/master
| 2023-01-24T01:42:38.435664
| 2023-01-05T09:50:59
| 2023-01-05T09:50:59
| 243,775,694
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 958
|
py
|
"""
This file contains tests for gdef_importer.py.
@author: Nathanael Jöhrmann
"""
# todo: add temporary folder and test export of *.pygdf and *.png (export_measurements())
from gdef_reader.gdef_importer import GDEFImporter
from tests.conftest import AUTO_SHOW
class TestGDEFImporter:
def test_init(self, gdf_example_01_path):
importer = GDEFImporter(gdf_example_01_path)
assert importer.basename == "example_01"
def test_export_measurements(self, gdef_importer):
measurements = gdef_importer.export_measurements()
assert len(measurements) == 4
def test_export_measurements_with_create_images(self, gdef_importer):
measurements = gdef_importer.export_measurements(create_images=AUTO_SHOW)
assert len(measurements) == 4
def test_load(self, gdf_example_01_path):
importer = GDEFImporter()
importer.load(gdf_example_01_path)
assert importer.basename == "example_01"
|
[
"njoehrmann@gmail.com"
] |
njoehrmann@gmail.com
|
aafe3051a64a14a57d3f1ca72752c575dc969f49
|
1cd6b1fadc6f241108fc365250fc6774548e143d
|
/Chapter 3/03_string_function.py
|
78487af421285861d586d5f51a72add738ba828b
|
[] |
no_license
|
Tarun-Rao00/Python-CodeWithHarry
|
c0275065c078c6d43b2ed388b0b079ce0a37af04
|
c0b6d78feedb58a464ddbe20a885e42e030fd8f0
|
refs/heads/master
| 2023-08-02T11:13:06.065126
| 2021-10-09T13:40:19
| 2021-10-09T13:40:19
| 415,316,821
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
story = "once upon a time there was a youtuber Harry who uploaded free python course with handwritten notes"
name = "Tarun Rao"
name_small = "tarun Rao"
# String Functions
# Length
print(len(story))
print(len(name))
string = "This will number of characters in the string"
print(len(string))
# Endswith
print(string.endswith("ing"))
print(string.endswith("ING")) # Tells that endswith function is case sensitive
print(string.endswith("The"))
# Story Count
print("Number of 'a' in story :",story.count("a"))
print("Number of 'r' in story :",story.count("r"))
# Capitalise (Also decapitalizes other than first character- Changes "small Capital" to "Small capital")
print(story.capitalize())
print(name_small.capitalize())
# Find Function (Tells the position of a string, returns -1 if string is not present, tells the position of only first occurance)
print(story.find("uploaded"))
print(story.find("unknown"))
# Replace Function (Replaces all occurances)
print(story.replace("Harry","Tarun"))
|
[
"royalraotarun@gmail.com"
] |
royalraotarun@gmail.com
|
0c1dfa95811d44484a4859222f7f852fb83673bf
|
60c797b7747d5ebb0bf9f15cfe5324a6ad301a81
|
/doc/source/conf.py
|
8625442f1a91c1642573b8cb0ef7c163efba42a5
|
[
"Apache-2.0"
] |
permissive
|
b3c/mod_auth
|
cec46a535d131d6f55ce436ee3b58dc3ac306952
|
cacff60ecda51babbe5c55e5f46b6db36ab6752e
|
refs/heads/master
| 2020-05-31T09:33:54.700362
| 2013-03-27T13:30:14
| 2013-03-27T13:30:14
| 5,192,214
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,796
|
py
|
# -*- coding: utf-8 -*-
#
# mod_auth documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 25 15:43:15 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mod_auth'
copyright = u'2012, Alfredo Saglimbeni'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mod_authdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'mod_auth.tex', u'mod\\_auth Documentation',
u'Alfredo Saglimbeni', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mod_auth', u'mod_auth Documentation',
[u'Alfredo Saglimbeni'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mod_auth', u'mod_auth Documentation',
u'Alfredo Saglimbeni', 'mod_auth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
[
"a.saglimbeni@scsitaly.com"
] |
a.saglimbeni@scsitaly.com
|
2a624d33c2c930bf7d80f938ddce0229b033e9ae
|
760a4c9fb6a608be483220163476611f92d4cc65
|
/itertools.py
|
75dbf21efbd7be556665c354058b46814f3ef76a
|
[] |
no_license
|
thanuganesh/thanuganeshtestprojects
|
6bdace11523da6b4b1b29cd8f30193a0eb4cd88e
|
f09fda7b761274c855a1347d5055b439c1a3ac84
|
refs/heads/master
| 2020-04-05T13:13:14.622938
| 2018-11-17T13:04:13
| 2018-11-17T13:04:13
| 156,892,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,198
|
py
|
""" This script helps to identify how itertools are working"""
import itertools
counter = itertools.count(start=5)
print(next(counter))
print(next(counter))
data = [100,200,300,400]
data_count = list(zip(itertools.count(), data))
data_count1 = list(zip(range(1, 10), data)) # prints only the data items count
data_count2 = list(itertools.zip_longest(range(1, 10),[1,2,3,4], data)) # prints all items till range comes
print(data_count)
print(data_count1)
print(data_count2)
counter1 = itertools.cycle([1,8,9])
print(next(counter1))
print(next(counter1))
print(next(counter1))
print(next(counter1))
counter3 = itertools.repeat(3)
thanus = lambda x,y : x * y
squers = map(thanus, ["thanu","vino"], counter3)
##############################################starmap#############################################
squers_one = itertools.starmap(thanus, [(0,1),(1,2),(3,6)])
print(list(squers))
print(list(squers_one))
################################Combinations&permutations##########################################
letters = ['a', 'b', 'c', 'd']
numbers = [0,1,2,3]
names = ["thanu", "ganesh"]
#results = itertools.combinations(letters, 2) #combinations only given a single pair (a,b) not (b,a_ b ecaz it is considered both as same
#for item in results:
#print(item)
#if i need to print all the values then use permutations
results = itertools.permutations(letters, 2)
for item in results:
print(item)
##########################################chain##############################
#combine all the squences and we need to iterate then chain
cobined = itertools.chain(letters, numbers, names)
for i in cobined:
print(i)
##################islice#####################################
resp = itertools.islice(range(10), 2, 4, 2)
for ite in resp:
print (ite)
with open("isslice_example.txt") as f:
header = itertools.islice(f,3)
for item in header:
print(item)
#####################compress######################
selectors =[True, True, False, True]
first_result = itertools.compress(numbers, selectors)
for itr in first_result:
print(itr)
######################filter Function#####################################
|
[
"thanuganesh24@gmail.com"
] |
thanuganesh24@gmail.com
|
b798c7cbe257643e2dfdb4ec1ece8de729257d4b
|
889b81ef12c0b4c9590e3ec07024c86787cf3196
|
/algorithms/google/RemoveComment.py
|
624ca51f0f687560fa8c889dfd381c54e5cccf84
|
[] |
no_license
|
james4388/algorithm-1
|
ab06ff40819994483e879f2f9c950e6b6711832c
|
2d5c09b63438aee7925252d5c6c4ede872bf52f1
|
refs/heads/master
| 2020-11-25T04:43:24.249904
| 2019-11-18T06:12:48
| 2019-11-18T06:12:48
| 228,507,062
| 1
| 0
| null | 2019-12-17T01:26:59
| 2019-12-17T01:26:59
| null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
# https://leetcode.com/problems/remove-comments/
# Source contains list of lines
# comment block: /*..*/ can span multiple line, and line //
# Remove comment from code
# Special case: /*/
def removeComments(source):
res = []
hasBlock = False
blockStartIdx = None
text = ''
for num in range(len(source)):
line = source[num]
idx = 0
while idx < len(line):
chars = line[idx: idx+2]
if hasBlock:
if chars == '*/':
if (num, idx - 1) != blockStartIdx:
hasBlock = False
idx += 1
else:
if chars == '//':
break
elif chars == '/*':
hasBlock = True
blockStartIdx = (num, idx)
idx += 1
else:
text += line[idx]
idx += 1
if text and not hasBlock:
res.append(text)
text = ''
if text:
res.append(text)
return res
source = ["/*Test program */", "int main()", "{ ", " // variable declaration ", "int a, b, c;", "/* This is a test", " multiline ", " comment for ", " testing */", "a = b + c;", "}"]
print("remove comment...", removeComments(source))
|
[
"tamnghoang@gmail.com"
] |
tamnghoang@gmail.com
|
14167fcc8c9e9bcc4a97f995bda3a742067eaa30
|
752ba86555cd6e5dcdbe85e68f311c2cbf8c7de9
|
/1.Pythonプログラミングの基本/2章.フロー/exitExample.py
|
283c1d3599f4d1acf25e1768402ca08a26d23fa8
|
[] |
no_license
|
Pyo0811/CodeCamp
|
e2615f476e2a5b863542d0b3f91bf68b46f30d4c
|
43b62ac953293d15fb02cd2e4ccffb0f2afec4ae
|
refs/heads/master
| 2020-03-30T09:15:18.408363
| 2018-11-19T14:55:06
| 2018-11-19T14:55:06
| 151,068,040
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 215
|
py
|
from sys import *
while True :
print('終了するにはexitと入力してください。')
response = input()
if response == 'exit':
exit()
print(response + 'と入力されました。')
|
[
"sjpyo91@gmail.com"
] |
sjpyo91@gmail.com
|
dc2b75d3d1c4afa7f4a66aed083cf6b96ac73a86
|
83fbf7296ca911d774af0c3ce54e4d39ab2aa670
|
/model_api/model_api/wsgi.py
|
0255074081344b6a767a9e2f841e6ce94887d5cf
|
[] |
no_license
|
Xavizkid/Python_For_Data_Analysis
|
6085e7a2925c9478aa3bb27dfcb52aefe07f6837
|
f90d5f4b305c1359afec4bf8d2b10c6d93a5a36f
|
refs/heads/main
| 2023-02-14T03:53:46.454377
| 2021-01-10T18:40:31
| 2021-01-10T18:40:31
| 323,577,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
WSGI config for model_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'model_api.settings')
application = get_wsgi_application()
|
[
"noreply@github.com"
] |
noreply@github.com
|
7e5bb7c4fd4c0b14d3a1b3190ac870bc303b7697
|
a20c2e03720ac51191c2807af29d85ea0fa23390
|
/vowelorconsonant.py
|
18231b547eae391f8d07f55d552ba6abc0453b56
|
[] |
no_license
|
KishoreKicha14/Guvi1
|
f71577a2c16dfe476adc3640dfdd8658da532e0d
|
ddea89224f4f20f92ebc47d45294ec79040e48ac
|
refs/heads/master
| 2020-04-29T23:32:13.628601
| 2019-08-05T17:48:18
| 2019-08-05T17:48:18
| 176,479,262
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
n=input()
a=ord(n)
f=0
if (a in range(97,123))or(a in range(65,98)):
v=[65,69,73,79,85,97,101,105,111,117]
for i in v:
if(i==a):
f=1
print(vowel)
break
if(f==0):
print("Consonant")
else:
print("invalid")
|
[
"noreply@github.com"
] |
noreply@github.com
|
f64984a3fbdce4d894725b8e850866123ab62660
|
6b82695358c309bd09da9153dbedf26720fa7dc6
|
/2020/18.py
|
5a857f9455b8ffffd6731e00165c580483ea65aa
|
[] |
no_license
|
viliampucik/adventofcode
|
0e7b4cca7d7aaed86bdc2b8c57d1056b4620e625
|
e7e0ab44ace3cf762b796730e582ab222a45f7d0
|
refs/heads/master
| 2023-01-04T18:19:07.064653
| 2022-12-26T19:42:59
| 2022-12-26T19:42:59
| 226,700,744
| 33
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
#!/usr/bin/env python
import re
import sys
class I(int):
def __add__(a, b):
return I(a.real + b.real)
def __sub__(a, b):
return I(a.real * b.real)
__mul__ = __add__
lines = re.sub(r"(\d+)", r"I(\1)", sys.stdin.read()).replace("*", "-").splitlines()
print(sum(eval(line) for line in lines))
print(sum(eval(line.replace("+", "*")) for line in lines))
|
[
"viliampucik@users.noreply.github.com"
] |
viliampucik@users.noreply.github.com
|
78884c794524fa71c1414efcdae273a4bfb641fb
|
9ee32a4170db4639e904a14c582aaac66a7fa265
|
/spyvm/test/jittest/base.py
|
3bf38e1086e5b19ef1eb55615f3f1bb7f457b744
|
[] |
no_license
|
timfel/lang-smalltalk
|
b5f3e2e3da1894c18c1ea46211dad6cfdc67a19c
|
e326304466adab077f7dcddeea174a35cd045942
|
refs/heads/master
| 2016-09-05T23:59:33.936718
| 2014-01-17T08:21:50
| 2014-01-17T08:21:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,067
|
py
|
import subprocess
import os
# TODO:
from pypy.tool.jitlogparser.parser import SimpleParser, Op
from pypy.tool.jitlogparser.storage import LoopStorage
from rpython.jit.metainterp.resoperation import opname
from rpython.jit.tool import oparser
from rpython.tool import logparser
BasePath = os.path.abspath(
os.path.join(
os.path.join(os.path.dirname(__file__), os.path.pardir),
os.path.pardir,
os.path.pardir
)
)
BenchmarkImage = os.path.join(os.path.dirname(__file__), "benchmark.image")
class BaseJITTest(object):
def run(self, spy, tmpdir, code):
proc = subprocess.Popen(
[str(spy), "-r", code.replace("\n", "\r\n"), BenchmarkImage],
cwd=str(tmpdir),
env={"PYPYLOG": "jit-log-opt:%s" % tmpdir.join("x.pypylog")}
)
proc.wait()
data = logparser.parse_log_file(str(tmpdir.join("x.pypylog")), verbose=False)
data = logparser.extract_category(data, "jit-log-opt-")
storage = LoopStorage()
traces = [SimpleParser.parse_from_input(t) for t in data]
main_loops = storage.reconnect_loops(traces)
traces_w = []
for trace in traces:
if trace in main_loops:
traces_w.append(Trace(trace))
else:
traces_w[len(traces_w) - 1].addbridge(trace)
return traces_w
def assert_matches(self, trace, expected):
expected_lines = [
line.strip()
for line in expected.splitlines()
if line and not line.isspace()
]
parser = Parser(None, None, {}, "lltype", None, invent_fail_descr=None, nonstrict=True)
expected_ops = [parser.parse_next_op(l) for l in expected_lines]
aliases = {}
assert len(trace) == len(expected_ops)
for op, expected in zip(trace, expected_ops):
self._assert_ops_equal(aliases, op, expected)
def _assert_ops_equal(self, aliases, op, expected):
assert op.name == expected.name
assert len(op.args) == len(expected.args)
for arg, expected_arg in zip(op.args, expected.args):
if arg in aliases:
arg = aliases[arg]
elif arg != expected_arg and expected_arg not in aliases.viewvalues():
aliases[arg] = arg = expected_arg
assert arg == expected_arg
class Parser(oparser.OpParser):
def get_descr(self, poss_descr, allow_invent):
if poss_descr.startswith(("TargetToken", "<Guard")):
return poss_descr
return super(Parser, self).get_descr(poss_descr, allow_invent)
def getvar(self, arg):
return arg
def create_op(self, opnum, args, res, descr):
return Op(opname[opnum].lower(), args, res, descr)
class Trace(object):
def __init__(self, trace):
self._trace = trace
self._bridges = []
self._bridgeops = None
self._loop = None
def addbridge(self, trace):
self._bridges.append(trace)
@property
def bridges(self):
if self._bridgeops:
return self._bridgeops
else:
self._bridgeops = []
for bridge in self._bridges:
self._bridgeops.append([op for op in bridge.operations if not op.name.startswith("debug_")])
return self._bridgeops
@property
def loop(self):
if self._loop:
return self._loop
else:
self._loop = self._parse_loop_from(self._trace)
return self._loop
def _parse_loop_from(self, trace, label_seen=None):
_loop = []
for idx, op in enumerate(self._trace.operations):
if label_seen and not op.name.startswith("debug_"):
_loop.append(op)
if op.name == "label":
if label_seen is None: # first label
label_seen = False
else:
label_seen = True # second label
if len(_loop) == 0:
raise ValueError("Loop body couldn't be found")
return _loop
|
[
"timfelgentreff@gmail.com"
] |
timfelgentreff@gmail.com
|
eaa28600c1e21f92f60215d35f16fee5c8077d0a
|
f965d10cf1d47f3e76ccc6b1b67492d6d927bcf6
|
/app/trackerapp/forms.py
|
234230e30b4344e2142f2150aabf3ae79523a40d
|
[] |
no_license
|
idalmasso/my_tracker_app
|
413f34338f3e4dbf77963376578fefa35a0d5f3b
|
da9de8a5b81c757c54d1627309c0a24108341fde
|
refs/heads/master
| 2020-04-01T01:02:13.429194
| 2018-11-10T16:28:40
| 2018-11-10T16:28:40
| 152,723,342
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,348
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, MultipleFileField, TextAreaField, SelectField,SelectMultipleField
from wtforms.validators import DataRequired
from .lookup import *
class AddTrackerForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
priority = SelectField('Priority', choices = Lookup(PRIORITIES))
project = SelectField('Project', validators=[DataRequired()])
user_assigned = SelectField('Assigned to', validators=[DataRequired()])
categories = SelectMultipleField('Categories',choices=Lookup(CATEGORIES))
images = MultipleFileField('Images')
submit = SubmitField('Submit')
class EditTrackerForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
description = TextAreaField('Description', validators=[DataRequired()])
status = SelectField('Status', choices =Lookup(STATUSES))
priority = SelectField('Priority', choices =Lookup(PRIORITIES))
project = SelectField('Project', validators=[DataRequired()])
user_assigned = SelectField('Assigned to', validators=[DataRequired()])
categories = SelectMultipleField('Categories',choices=Lookup(CATEGORIES))
images = MultipleFileField('Images')
submit = SubmitField('Submit')
|
[
"ivano.dalmasso@gmail.com"
] |
ivano.dalmasso@gmail.com
|
571953564834f3a234047bd87a97f90651b0e1ab
|
0205291dfd8c971c14848db38297b8ada75081f8
|
/Pong.py
|
35f0182a266d242d067c2455b81e50d6f69b67fe
|
[] |
no_license
|
Mattmtech/Pong
|
4721dd381f2f4dccb70eea597f7f07a2a5bd959a
|
0adff27d9849a4c73a3abc17f255c4c6119f457f
|
refs/heads/master
| 2020-04-04T00:56:18.756148
| 2018-11-01T04:39:28
| 2018-11-01T04:39:28
| 155,662,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,097
|
py
|
import turtle
window = turtle.Screen()
window.title("Pong")
window.bgcolor("black")
window.setup(width=800, height=600)
window.tracer(0)
# Left Paddle
left_paddle = turtle.Turtle()
left_paddle.speed(0)
left_paddle.shape("square")
left_paddle.color("white")
left_paddle.shapesize(stretch_wid=5, stretch_len=1)
left_paddle.penup()
left_paddle.goto(-350, 0)
#Right Paddle
right_paddle = turtle.Turtle()
right_paddle.speed(0)
right_paddle.shape("square")
right_paddle.color("white")
right_paddle.shapesize(stretch_wid=5, stretch_len=1)
right_paddle.penup()
right_paddle.goto(350, 0)
# Ball
ball = turtle.Turtle()
ball.speed(0)
ball.shape("square")
ball.color("white")
ball.penup()
ball.goto(0, 0)
ball.dx = .2
ball.dy = -.2
#Pen
pen = turtle.Turtle()
pen.speed(0)
pen.color("white")
pen.penup()
pen.hideturtle()
pen.goto(0, 255)
pen.write("Player 1: 0 Player 2: 0", align = "center", font = ("Courier", 24, "normal"))
#Functions
def left_paddle_up():
y = left_paddle.ycor()
y+=20
left_paddle.sety(y)
def left_paddle_down():
y = left_paddle.ycor()
y-=20
left_paddle.sety(y)
def right_paddle_up():
y = right_paddle.ycor()
y+=20
right_paddle.sety(y)
def right_paddle_down():
y = right_paddle.ycor()
y-=20
right_paddle.sety(y)
#Key Listeners
window.listen()
window.onkeypress(left_paddle_up, "w")
window.onkeypress(left_paddle_down, "s")
window.onkeypress(right_paddle_up, "Up")
window.onkeypress(right_paddle_down, "Down")
p1_score = 0
p2_score = 0
while True:
window.update()
#move ball
ball.setx(ball.xcor() + ball.dx)
ball.sety(ball.ycor() + ball.dy)
#boundary checking for the ball
if ball.ycor() > 290:
ball.sety(290)
ball.dy *= -1
if ball.ycor() < -290:
ball.sety(-290)
ball.dy *= -1
if ball.xcor() > 390:
p1_score += 1
pen.clear()
pen.write("Player 1: {} Player 2: {}".format(p1_score, p2_score), align="center",
font=("Courier", 24, "normal"))
ball.goto(0, 0)
ball.dx *= -1
if ball.xcor() < -390:
p2_score += 1
pen.clear()
pen.write("Player 1: {} Player 2: {}".format(p1_score, p2_score), align="center",
font=("Courier", 24, "normal"))
ball.goto(0, 0)
ball.dx *= -1
#Setting boundaries for the paddles
if right_paddle.ycor() > 250:
right_paddle.goto(right_paddle.xcor(), 250)
if right_paddle.ycor() < -250:
right_paddle.goto(right_paddle.xcor(), -250)
if left_paddle.ycor() > 250:
left_paddle.goto(left_paddle.xcor(), 250)
if left_paddle.ycor() < -250:
left_paddle.goto(left_paddle.xcor(), -250)
#Paddle Collision
if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < right_paddle.ycor() + 50 and ball.ycor() > right_paddle.ycor() -50):
ball.setx(340)
ball.dx *= -1
if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < left_paddle.ycor() + 50 and ball.ycor() > left_paddle.ycor() - 50):
ball.setx(-340)
ball.dx *= -1
|
[
"mmarroquin@callutheran.edu"
] |
mmarroquin@callutheran.edu
|
dcae758306f16f23f0d2f0b82fe613cc29366596
|
bb36b7af4f1eebba054657d79d55bcf8e5f1926a
|
/spider/migrations/0001_initial.py
|
4e1a7510c9e8c0a1a2c89df3427ec8cabf09d2ac
|
[] |
no_license
|
WetrInk/Spiderite
|
0918fc6db0ceb1fec76a39b8b7b1d137682c97f2
|
c49a3e89c6481c2ef8fec3e95d8081112caff3f8
|
refs/heads/master
| 2020-04-02T04:22:14.810768
| 2018-10-24T14:00:59
| 2018-10-24T14:00:59
| 154,013,609
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 672
|
py
|
# Generated by Django 2.1.2 on 2018-10-21 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bulletin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('source', models.CharField(max_length=300)),
('title', models.CharField(max_length=50)),
('time', models.CharField(max_length=20)),
('content', models.CharField(max_length=300)),
],
),
]
|
[
"weever56@gmail.com"
] |
weever56@gmail.com
|
fc2d879b50dddeb1fbae959fb47c53365375b981
|
1b5312c1c3e439eb52a1de981df0bf8fe34af048
|
/venv/bin/easy_install
|
26db7d7b5205a44f54aeb48b126b572be99675c2
|
[] |
no_license
|
michelle951111/SI507lec24
|
2d9501146a6c7bbcf6fe047cec11ff53c681a0c7
|
3ec364aea76171814631b67a59d733702884ec03
|
refs/heads/master
| 2020-03-09T20:14:05.062492
| 2018-04-10T18:36:14
| 2018-04-10T18:36:14
| 128,979,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
#!/Users/michelleyuan/Documents/GitHub/lec24/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"yuanmeng@umich.edu"
] |
yuanmeng@umich.edu
|
|
4e8f8843efccc8e62b784f72e0a0c8e2cf550a4f
|
161d43b73438c4423949f4d4898e44f015691a0e
|
/jia/coords2grid_Wx_stampede.py
|
580269d80af282a4a2011cfbeec9d4df619480a3
|
[] |
no_license
|
apetri/CFHTLens_analysis
|
a44f754114a6a6129088f0771cc558baed987462
|
b19343b43b54870f7950bcd9ea76bbe829448c44
|
refs/heads/master
| 2020-05-21T22:06:24.551906
| 2017-12-14T16:17:08
| 2017-12-14T16:17:08
| 16,521,933
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,704
|
py
|
##########################################################
### This code is for Jia's thesis project B. It does the following:
### 1) organizes the CFHT catalogue to 4 Wx fields, with proper
### format that's easy to use in the future
### 2) pick out a random redshift and 2 peak redshift
### 3) converts RA DEC to (x, y) radian, using Genomonic projection,
### centered at map center
### 4) final products: convergence maps and galcount maps split in
### different redshift bins
import numpy as np
from scipy import *
from pylab import *
import os
import WLanalysis
#from emcee.utils import MPIPool
#from multiprocessing import Pool
cat_dir='/home1/02977/jialiu/CFHT_cat/'
#cat_dir = '/Users/jia/CFHTLenS/catalogue/'
split_dir = cat_dir+'split/'
W_dir = lambda Wx: cat_dir+'W%s/'%(Wx) #dir for W1..W4 field
splitfiles = os.listdir(split_dir)
zbins = array([0.4, 0.5, 0.6, 0.7, 0.85, 1.3])#arange(0.3,1.35,0.1)
centers = array([[34.5, -7.5], [134.5, -3.25],[214.5, 54.5],[ 332.75, 1.9]])
sigmaG_arr = (0.1,)#(0.5, 1, 1.8, 3.5, 5.3, 8.9)
############################################################
########## calculate map size ##############################
#RA1 =(30.0, 39.0)#starting RA for W1
#DEC1=(-11.5,-3.5)
#RA2 =(132.0, 137.0)
#DEC2=(-6.0,-0.5)
#RA3 =(208.0, 221.0)
#DEC3=(51.0, 58.0)
#RA4 =(329.5, 336.0)
#DEC4=(-1.2, 5.0)
#RAs=(RA1,RA2,RA3,RA4)
#DECs=(DEC1,DEC2,DEC3,DEC4)
###dpp=0.0016914558667664816#degrees per pixel = sqrt(12)/2048
#dpp = 0.0067658234670659265#degrees per pixel = sqrt(12)/512
#xnum = lambda RA: round((amax(RA)-amin(RA))/dpp+1)
#ynum = lambda DEC:round((amax(DEC)-amin(DEC))/dpp+1)
##sized calculated using above 3 lines:
## W1 1331, W2 814, W3 1922, W4 962
sizes = (1330, 800, 1120, 950)
############################################################
z_arr = arange(0.025,3.5,.05)
idx2z = lambda idx:z_arr[idx]
field2int = lambda str: int(str[1])
#DrawRedshifts = lambda iPz: concatenate([[z_arr[argmax(iPz)],], WLanalysis.DrawFromPDF(z_arr, iPz, 2)])
DrawRedshifts = lambda iPz: WLanalysis.DrawFromPDF(z_arr, iPz, 1)
def list2coords(radeclist, Wx):
'''Input: radeclist = (Wfield, ra, dec), a 3xN matrix, (ra, dec) in degrees
Return: (subfield, x, y), a 3xN matrix, (x, y) in radians
'''
xy = zeros(shape = radeclist.shape) #create xy list
center = centers[Wx-1] #find the center for Wx field
f_Wx = WLanalysis.gnom_fun(center)
xy = degrees(array(map(f_Wx,radeclist)))
return xy
def OrganizeSplitFile(ifile):
'''read in one of the split file, pick out the redshift, and sort by fields, e2 is c2 correted, with e2-=c2'''
field = genfromtxt(split_dir+ifile,usecols=0,dtype=str)
field = array(map(field2int,field))
print ifile
# generate 2 random redshift and 1 peak
Pz = genfromtxt(split_dir+ifile,usecols=arange(14,84),dtype=str)
Pz = (np.core.defchararray.replace(Pz,',','')).astype(float)
seed(99)
z_rand1 = array(map(DrawRedshifts,Pz)).ravel()
seed(88)
z_rand2 = array(map(DrawRedshifts,Pz)).ravel()
z_peak = z_arr[argmax(Pz,axis=1)]
z_all = concatenate([[z_peak,], [z_rand1,], [z_rand2,]]).T
sheardata = genfromtxt(split_dir+ifile,usecols=[1,2,5,6,7,8,9,10,11,12,13,84])
ra, dec, e1, e2, w, fitclass, r, snr, mask, m, c2, mag = sheardata.T
e2 -= c2
i=0
for Wx in range(1,5):
idx=where((field==Wx)&(mask<=1.0)&(fitclass==0)&(amin(z_all,axis=-1)>=0.2)&(amax(z_all,axis=-1)<=1.3))[0]
print ifile, Wx, len(idx)/50000.0
if len(idx) > 0:
#data = (np.array([ra,dec,e1,e2,w,r,snr,m,c2,mag]).T)[idx]
data = (np.array([ra,dec,e1,e2,w,r,snr,m,c2,mag,z_peak, z_rand1, z_rand2]).T)[idx]
radeclist = sheardata[idx][:,[0,1]]
xylist = list2coords(radeclist, Wx)
xy_data = concatenate([xylist,data],axis=1)
WLanalysis.writeFits(xy_data, W_dir(Wx)+ifile+'.fit')#,fmt=['%i','%i','%s','%s','%s','%.3f'])
i+=1
def SumSplitFile2Grid(Wx):
'''For Wx field, read in each split file,
and create e1, e2 grid for mass construction.
Input: Wx=1,2,3,4
Output: (Me1, Me2, Mw, galn) split in each redshift bins'''
isize = sizes[Wx-1]
ishape = (len(zbins), isize, isize)
ishape_hi = (len(zbins)-1, isize, isize)#no need to do hi for zcut=1.3 since it's everything
Me1_hi = zeros(shape=ishape_hi)
Me2_hi = zeros(shape=ishape_hi)#hi is for higher redshift bins, lo is lower redshift
Mw_hi = zeros(shape=ishape_hi)
#Mk_hi = zeros(shape=ishape_hi)
galn_hi = zeros(shape=ishape_hi)
Me1_lo = zeros(shape=ishape)
Me2_lo = zeros(shape=ishape)
Mw_lo = zeros(shape=ishape)
#Mk_lo = zeros(shape=ishape)
galn_lo = zeros(shape=ishape)
Wfiles = os.listdir(W_dir(Wx))#get the list of split file for Wx
for iW in Wfiles:
datas = WLanalysis.readFits(W_dir(Wx)+iW)
#cols: x, y, ra, dec, e1, e2, w, r, snr, m, c2, mag, z_peak, z_rand1, z_rand2
z = datas.T[-3]#z_peak, -2 is z_rand1, -1 is z_rand2
i = 0 #zbin count
for zcut in zbins:
idx0 = where(z<zcut)[0]
idx1 = where(z>=zcut)[0]
for idx in [idx0,idx1]:
y, x, e1, e2, w, m = (datas[idx].T)[[0,1,4,5,6,9]]#note x, y is reversed in python
k = array([e1*w, e2*w, (1+m)*w])
x = radians(x)
y = radians(y)
print 'W'+str(Wx), iW, 'coords2grid, zbin =',zbins[i]
A, galn = WLanalysis.coords2grid(x, y, k, size=isize)
if len(idx)==0:#no need to calculate hi bin for zcut=1.3
continue
elif idx[0] == idx0[0]:
Me1_lo[i] += A[0]
Me2_lo[i] += A[1]
Mw_lo[i] += A[2]
galn_lo[i] += galn
else:
Me1_hi[i] += A[0]
Me2_hi[i] += A[1]
Mw_hi[i] += A[2]
galn_hi[i] += galn
i+=1
print 'Done collecting small fields for W'+str(Wx)
for i in range(len(zbins)):
for hl in ('lo','hi'):
Me1_fn = cat_dir+'Me_Mw_galn/W%i_Me1w_%s_%s.fit'%(Wx, zbins[i],hl)
Me2_fn = cat_dir+'Me_Mw_galn/W%i_Me2w_%s_%s.fit'%(Wx, zbins[i],hl)
Mw_fn = cat_dir+'Me_Mw_galn/W%i_Mwm_%s_%s.fit'%(Wx, zbins[i],hl)
galn_fn = cat_dir+'Me_Mw_galn/W%i_galn_%s_%s.fit'%(Wx, zbins[i],hl)
if hl=='hi' and i==len(zbins)-1:
continue
elif hl=='lo':
WLanalysis.writeFits(Me1_lo[i],Me1_fn, rewrite = True)
WLanalysis.writeFits(Me2_lo[i],Me2_fn, rewrite = True)
WLanalysis.writeFits(Mw_lo[i],Mw_fn, rewrite = True)
WLanalysis.writeFits(galn_lo[i],galn_fn, rewrite = True)
else:
WLanalysis.writeFits(Me1_hi[i],Me1_fn, rewrite = True)
WLanalysis.writeFits(Me2_hi[i],Me2_fn, rewrite = True)
WLanalysis.writeFits(Mw_hi[i],Mw_fn, rewrite = True)
WLanalysis.writeFits(galn_hi[i],galn_fn, rewrite = True)
PPA512=2.4633625
def KSmap(iinput):
'''Input:
i = ith zbin for zcut
hl = 'hi' or 'lo' for higher/lower z of the zcut
sigmaG: smoothing scale
Wx = 1..4 of the field
Output:
smoothed KS map and galn map.
'''
Wx, sigmaG, i, hl = iinput
print 'Wx, sigmaG, i, hl:', Wx, sigmaG, i, hl
kmap_fn = cat_dir+'KS/W%i_KS_%s_%s_sigmaG%02d.fit'%(Wx, zbins[i],hl,sigmaG*10)
galn_smooth_fn = cat_dir+'KS/W%i_galn_%s_%s_sigmaG%02d.fit'%(Wx, zbins[i],hl,sigmaG*10)
isfile_kmap, kmap = WLanalysis.TestFitsComplete(kmap_fn, return_file = True)
if isfile_kmap == False:
Me1_fn = cat_dir+'Me_Mw_galn/W%i_Me1w_%s_%s.fit'%(Wx, zbins[i],hl)
Me2_fn = cat_dir+'Me_Mw_galn/W%i_Me2w_%s_%s.fit'%(Wx, zbins[i],hl)
Mw_fn = cat_dir+'Me_Mw_galn/W%i_Mwm_%s_%s.fit'%(Wx, zbins[i],hl)
Me1 = WLanalysis.readFits(Me1_fn)
Me2 = WLanalysis.readFits(Me2_fn)
Mw = WLanalysis.readFits(Mw_fn)
Me1_smooth = WLanalysis.weighted_smooth(Me1, Mw, PPA=PPA512, sigmaG=sigmaG)
Me2_smooth = WLanalysis.weighted_smooth(Me2, Mw, PPA=PPA512, sigmaG=sigmaG)
kmap = WLanalysis.KSvw(Me1_smooth, Me2_smooth)
WLanalysis.writeFits(kmap,kmap_fn)
isfile_galn, galn_smooth = WLanalysis.TestFitsComplete(galn_smooth_fn, return_file = True)
if isfile_galn == False:
galn_fn = cat_dir+'Me_Mw_galn/W%i_galn_%s_%s.fit'%(Wx, zbins[i],hl)
galn = WLanalysis.readFits(galn_fn)
galn_smooth = WLanalysis.smooth(galn, sigma=sigmaG*PPA512)
WLanalysis.writeFits(galn_smooth, galn_smooth_fn)
#return kmap, galn_smooth
def Bmode(iinput):
'''Input:
i = ith zbin for zcut
hl = 'hi' or 'lo' for higher/lower z of the zcut
sigmaG: smoothing scale
Wx = 1..4 of the field
Output:
smoothed KS map and galn map.
'''
Wx, sigmaG, i, hl = iinput
print 'Bmode - Wx, sigmaG, i, hl:', Wx, sigmaG, i, hl
bmap_fn = cat_dir+'KS/W%i_Bmode_%s_%s_sigmaG%02d.fit'%(Wx, zbins[i],hl,sigmaG*10)
#galn_smooth_fn = cat_dir+'KS/W%i_galn_%s_%s_sigmaG%02d.fit'%(Wx, zbins[i],hl,sigmaG*10)
isfile_kmap, bmap = WLanalysis.TestFitsComplete(bmap_fn, return_file = True)
if isfile_kmap == False:
Me1_fn = cat_dir+'Me_Mw_galn/W%i_Me1w_%s_%s.fit'%(Wx, zbins[i],hl)
Me2_fn = cat_dir+'Me_Mw_galn/W%i_Me2w_%s_%s.fit'%(Wx, zbins[i],hl)
Mw_fn = cat_dir+'Me_Mw_galn/W%i_Mwm_%s_%s.fit'%(Wx, zbins[i],hl)
Me1 = WLanalysis.readFits(Me1_fn)
Me2 = WLanalysis.readFits(Me2_fn)
Mw = WLanalysis.readFits(Mw_fn)
Me1_smooth = WLanalysis.weighted_smooth(Me1, Mw, PPA=PPA512, sigmaG=sigmaG)
Me2_smooth = WLanalysis.weighted_smooth(Me2, Mw, PPA=PPA512, sigmaG=sigmaG)
### Bmode conversion is equivalent to
### gamma1 -> gamma1' = -gamma2
### gamma2 -> gamma2' = gamma1
bmap = WLanalysis.KSvw(-Me2_smooth, Me1_smooth)
WLanalysis.writeFits(bmap,bmap_fn)
#return bmap
def Noise(iinput):
'''Input: (Wx, iseed)
Return: files of noise KS map, using randomly rotated galaxy.
'''
Wx, iseed = iinput
seed(iseed)
print 'Bmode - Wx, iseed:', Wx, iseed
bmap_fn = cat_dir+'Noise/W%i/W%i_Noise_sigmaG10_%04d.fit'%(Wx, Wx, iseed)
isfile_kmap, bmap = WLanalysis.TestFitsComplete(bmap_fn, return_file = True)
if isfile_kmap == False:
Me1_fn = cat_dir+'Me_Mw_galn/W%i_Me1w_1.3_lo.fit'%(Wx)
Me2_fn = cat_dir+'Me_Mw_galn/W%i_Me2w_1.3_lo.fit'%(Wx)
Mw_fn = cat_dir+'Me_Mw_galn/W%i_Mwm_1.3_lo.fit'%(Wx)
Me1_init = WLanalysis.readFits(Me1_fn)
Me2_init = WLanalysis.readFits(Me2_fn)
#### randomly rotate Me1, Me2 ###
Me1, Me2 = WLanalysis.rndrot(Me1_init, Me2_init)
#################################
Mw = WLanalysis.readFits(Mw_fn)
Me1_smooth = WLanalysis.weighted_smooth(Me1, Mw, PPA=PPA512, sigmaG=sigmaG)
Me2_smooth = WLanalysis.weighted_smooth(Me2, Mw, PPA=PPA512, sigmaG=sigmaG)
bmap = WLanalysis.KSvw(Me1_smooth, Me2_smooth)
WLanalysis.writeFits(bmap,bmap_fn)
plot_dir = '/Users/jia/CFHTLenS/plot/obsPK/'
def plotimshow(img,ititle,vmin=None,vmax=None):
#if vmin == None and vmax == None:
imgnonzero=img[nonzero(img)]
if vmin == None:
std0 = std(imgnonzero)
x0 = median(imgnonzero)
vmin = x0-3*std0
vmax = x0+3*std0
im=imshow(img,interpolation='nearest',origin='lower',aspect=1,vmin=vmin,vmax=vmax)
colorbar()
title(ititle,fontsize=16)
savefig(plot_dir+'%s.jpg'%(ititle))
close()
test_dir = '/Users/jia/CFHTLenS/obsPK/'
def TestCrossCorrelate (Wx, zcut, sigmaG):
'''Input:
Wx - one of the W1..W4 field (= 1..4)
zcut - redshift cut between KS background galaxies and forground cluster probe
sigmaG - smoothing
Output:
ell_arr, CCK, CCB
'''
galn_hi = WLanalysis.readFits(test_dir+'W%i_galn_%s_hi_sigmaG%02d.fit'%(Wx,zcut,sigmaG*10))
galn_lo = WLanalysis.readFits(test_dir+'W%i_galn_%s_lo_sigmaG%02d.fit'%(Wx,zcut,sigmaG*10))
galn_cut = 0.5*0.164794921875 #5gal/arcmin^2*arcmin^2/pix, arcmin/pix = 12.0*60**2/512.0**2 =
bmap = WLanalysis.readFits(test_dir+'W%i_Bmode_%s_hi_sigmaG%02d.fit'%(Wx,zcut,sigmaG*10))
kmap = WLanalysis.readFits(test_dir+'W%i_KS_%s_hi_sigmaG%02d.fit'%(Wx,zcut,sigmaG*10))
mask = where(galn_hi<galn_cut)
bmap[mask]=0
kmap[mask]=0
edges=linspace(5,100,11)
ell_arr, CCB = WLanalysis.CrossCorrelate (bmap,galn_lo,edges=edges)
ell_arr, CCK = WLanalysis.CrossCorrelate (kmap,galn_lo,edges=edges)
f=figure(figsize=(8,6))
ax=f.add_subplot(111)
ax.plot(ell_arr, CCB, 'ro',label='B-mode')
ax.plot(ell_arr, CCK, 'bo', label='KS')
legend()
#ax.set_xscale('log')
ax.set_xlabel('ell')
ax.set_ylabel(r'$\ell(\ell+1)P_{n\kappa}(\ell)/2\pi$')
ax.set_title('W%i_zcut%shi_sigmaG%02d'%(Wx,zcut,sigmaG*10))
#show()
savefig(plot_dir+'CC_edges_W%i_zcut%shi_sigmaG%02d.jpg'%(Wx,zcut,sigmaG*10))
close()
#plotimshow(kmap,'kmap_W%i_zcut%shi_sigmaG%02d.jpg'%(Wx,zcut,sigmaG*10))
#plotimshow(bmap,'bmap_W%i_zcut%shi_sigmaG%02d.jpg'%(Wx,zcut,sigmaG*10))
#plotimshow(galn_lo,'galn_W%i_zcut%shi_sigmaG%02d.jpg'%(Wx,zcut,sigmaG*10))
concWx = lambda Wx: array([WLanalysis.readFits(W_dir(Wx)+iW) for iW in os.listdir(W_dir(Wx))])
def sortWx(Wx):
#collect all the ifiles for each of the 4 Wx field, and store into one .npy file
#with columns:
#y, x, ra, dec, e1, e2, w, r, snr, m, c2, mag, z_peak, z_rand1, z_rand2
print Wx
ifile_arr = concWx(Wx)
sum_arr = array([ifile_arr[i][j] for i in range(len(ifile_arr)) for j in range(len(ifile_arr[i])) ])
#np.save(cat_dir+'W%s_cat_z0213'%(Wx), sum_arr)
#save only the columns needed for project-B
np.save(cat_dir+'W%s_cat_z0213_ra_dec_mag_zpeak'%(Wx), sum_arr[:,[2,3,11,12]])
Wx_sigmaG_i_hl_arr = [[Wx, sigmaG, i, hl] for Wx in range(1,5) for sigmaG in sigmaG_arr for i in range(0,len(zbins)-1) for hl in ['hi','lo']]+[[Wx, sigmaG, -1, 'lo'] for Wx in range(1,5) for sigmaG in sigmaG_arr]
################################################
###(1) split file organizing ###################
### uncomment next 1 line ###################
#pool.map(OrganizeSplitFile,splitfiles)
################################################
###(2) sum up the split file into 4 Wx fields###
### uncomment next 2 line ###################
#for Wx in range(1,5):
#SumSplitFile2Grid(Wx)
################################################
###(3) create KS maps for 6 zbins 6 sigmaG #####
### total should have 528 files (galn, KS)###
### uncomment next 1 line ###################
#map(KSmap, Wx_sigmaG_i_hl_arr[::-1])
################################################
###(4) B mode for picking out signals
### use 1000 maps with galaxies randomly
### rotated
### uncomment the next 1 line
#map(Bmode, Wx_sigmaG_i_hl_arr)
################################################
###(5) Create Noise KS maps by randomly rotate
### galaxies (2014/09/09)
#noise_input_arr =[[Wx, iseed] for Wx in range(1,5) for iseed in range(200,500)]
#map(Noise, noise_input_arr)
################################################
###(6) cross corrrelation
### put mask on KS map, and cross correlate
### for both B-mode(for compare), and true KS
### test on sigmaG=1.0, zcut=0.85
#Wx=1
#for zcut in zbins[:-1]:
#for sigmaG in sigmaG_arr:
#print 'Wx, zcut, sigmaG',Wx, zcut, sigmaG
#TestCrossCorrelate (Wx, zcut, sigmaG)
################################################
###(7) organize the Wx file into 4 catalogues with
### columns:
### y, x, ra, dec, e1, e2, w, r, snr, m, c2, mag, z_peak, z_rand1, z_rand2
#map(sortWx, range(1,5))
################################################
print 'DONE-DONE-DONE'
|
[
"liuxx479@gmail.com"
] |
liuxx479@gmail.com
|
8331cad7bb3dc5eb44b3b3c8b18a4d3cc6de8ad1
|
3abc672f601d44e74879c3641f967d957be336e5
|
/ImageOperations.py
|
25325ff0bf9e1730339e427fc7ccbb8367341137
|
[] |
no_license
|
OrganicGrow-solutions/Broccoli-counter
|
935cbcf66a8171f2763ae14cc745268889fb418a
|
0de3baf018709b17ef98f84777f77741bf9f4ebb
|
refs/heads/master
| 2023-02-27T21:20:11.818780
| 2021-02-05T05:18:40
| 2021-02-05T05:18:40
| 295,911,822
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,416
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 22:57:12 2020
@author: Todd Gillies
This is a class file which contains methods that perform various operations on the image.
"""
import ImageTests
import cv2
import numpy as np
class ImageOperator:
def __init__(self, erosionKernel, filter2dKernel):
self.erosionKernel = erosionKernel
# a kernel used for eroding
self.filter2dKernel = filter2dKernel
# a sharpen kernel for after erosion
def erode(self, img, iter):
# With this function, we "erode" the image, so that blobs that may be connected, even sparsely, to other blobs
# become stand-alone blobs, hopefully
erosion = cv2.erode(img, self.erosionKernel, iterations = iter)
negErosion = cv2.bitwise_not(erosion)
negErosion = cv2.filter2D(negErosion, -1, self.filter2dKernel)
cv2.imwrite("./images_for_fine-tuning/05_eroded.JPG", negErosion)
return negErosion
def whiteOutKeypoints(self, img, keypoints, counter, color):
# The main program functions in a loop, detecting plants each time through the loop
# In order so that the same plants are not detected twice, this function draws over the
# already-detected plants with a white circle.
# Also, if you give this function "black" as a color argument,
# it draws black circles over the detected plants
# (this is important in the final step of the process)
if (color == "white"):
col = (255, 255, 255)
else:
col = (0, 0, 0)
for k in keypoints:
cv2.circle(img, (int(k.pt[0]), int(k.pt[1])), int(k.size/2), col, -1)
return img
def mergeNeighbors(self, arr, img):
# The program has a tendency to recognize leaves, rather than plants, so
# this function merges keypoints which are close to eachother both horizontally and vertically
merged = []
neighborHoods = []
neighborsDeleted = []
self.drawImage(img, arr, "./images_for_fine-tuning/13_beforeMerge.jpg")
# Get ONE keypoint from the list of all keypoints
for kk in range(0, len(arr)):
# This is an array which will hold all the neighbors of this ONE point
thisGuysNeighbors = []
x1 = arr[kk].pt[0]
y1 = arr[kk].pt[1]
r1 = (arr[kk].size/2) + 5
# Compare that ONE keypoint with all other keypoints in the list
for k in range(0, len(arr)):
# (Actually, only compare if that ONE keypoint is not the keypoint in question)
if (kk != k):
x2 = arr[k].pt[0]
y2 = arr[k].pt[1]
r2 = (arr[k].size/2) + 5
if ImageTests.ImageTester.areTheyClose(x1, y1, r1, x2, y2, r2) == True:
# Add the keypoint that was near to that ONE point to the array thisGuysNeighbors
thisGuysNeighbors.append(arr[k])
# If the kk point in question had between 3 and 6 close neighbors...
if (6 > len(thisGuysNeighbors) > 3):
# put that neighbor array into the 'neighborHoods' array
neighborHoods.append(thisGuysNeighbors)
# Now, we delete all the keypoints in the 'neighborHoods' array from the main array
neighborsDeleted = self.deletePoints(neighborHoods, arr)
# Finally, using all the sub-arrays in the 'neighborHoods' array, we re-add keypoints to the main array
merged = self.reAddPoints(neighborHoods, neighborsDeleted)
self.drawImage(img, merged, "./images_for_fine-tuning/14_afterMerge.jpg")
return merged
def deletePoints(self, pointsToDelete, originalPoints):
# This deletes all the keypoints in an array of arrays (variable 'pointsToDelete')
# It is a function called by mergeNegibors
for o in range(0, len(pointsToDelete)):
thisGroup = pointsToDelete[o]
for keypoint in thisGroup:
try:
originalPoints.remove(keypoint)
except:
pass
return originalPoints
def reAddPoints(self, pointsToAdd, originalPoints):
# This gets each sub-array in the 2D array 'pointsToAdd'. It is a sub-array of keypoints
# It averages the sizes and positions of each keypoint in the sub array, then creates a new keypoint with that average size and position
# Then, it adds that keypoint to the 'originalPoints' array
for a in range(0, len(pointsToAdd)):
thisGroup = pointsToAdd[a]
if (len(thisGroup) > 0):
vesselPoint = thisGroup[0]
exes = 0
whys = 0
sizes = 0
for add in range(0, len(thisGroup)):
exes += thisGroup[add].pt[0]
whys += thisGroup[add].pt[1]
sizes += thisGroup[add].size
ptTuple = (exes/len(thisGroup), whys/len(thisGroup))
vesselPoint.pt = ptTuple
vesselPoint.size = (sizes/len(thisGroup))
originalPoints.append(vesselPoint)
return originalPoints
def drawImage(self, baseImage, thesePoints, filename):
# In the process of fine-tuning and debugging, I was checking images alot manually
# This function draws keypoints on a chosen image, and saves it with a filename
im_with_keypoints = cv2.drawKeypoints(baseImage, thesePoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite(filename, im_with_keypoints)
def showImage(self, image):
# Debugging here, and just wanted some function to show an image
# in real-time, rather than having to save it
number = ""
window_name = 'image'
cv2.imshow(window_name, image)
while True:
done = cv2.waitKey(0)
if done == 13:
cv2.destroyAllWindows()
return number
else:
number += str(done-48)
def findGaps(self, image, unerodedImage, returnImage, xstart, xend, ystart, yend, keypoints):
# Trying to segment the rows better here, for the purposes of drawing
# boxes around them and showing plant count figures.
# This function tries to find the spaces between the rows
xDim = image.shape[1]
yDim = image.shape[0]
lineStarts = []
lineEnds = []
totalCount = 0
# Due to the image stitching program, large sections of the image
# are sometimes black (having been slid left or right). The counter
# doesn't do well with these black sections, so let's find the actual
# start of the image by going through the image horizontally and finding
# the 1st non-black pixel, and put the coordinates in the variables
# startOfImage and xDim, respectively
startOfImage = 0
for x in range(0, xDim):
if np.any(image[1, x] > 30):
startOfImage = x
break
for x in range(xDim-1, 0, -1):
if np.any(image[1, x] > 30):
xDim = x
break
# Get a pixel "bar" (the height of the image and a width of 10 pixels)
# from the image, and slide it
# from the left edge to the right edge, getting the average pixel value at each
# step. When the average value is 255, we know we are in a gap, and a flag
# is triggered, along with a counter (howWide). When the average value
# is no longer 255, we know we have hit some plants, so we take the
# middle value of that counter and call it the middle of the gap
startsAndEnds = []
topAndBottom = [1, yDim-1]
howWide = 0
for verticalPosition in topAndBottom:
for x in range(startOfImage, xDim):
avPix = np.average(unerodedImage[yDim-verticalPosition:yDim, x-10:x])
if avPix > 252:
howWide += 1
if avPix < 255:
if howWide > 3:
middleHere = x - int(howWide / 2)
cv2.circle(image, (middleHere, verticalPosition), 2, (255,0,0), -1)
if verticalPosition == 1:
lineStarts.append(middleHere)
else:
lineEnds.append(middleHere)
howWide = 0
howWide = 0
startsAndEnds.append(startOfImage)
for start in lineStarts:
for end in lineEnds:
if abs(start-end) < 30:
startsAndEnds.append(start)
startsAndEnds.append(end)
break
startsAndEnds.append(xDim-1)
for x in range(0, len(startsAndEnds), 2):
thisPolygon = np.array([[startsAndEnds[x], 1], [startsAndEnds[x+1], 1], [startsAndEnds[x+1], yDim-1], [startsAndEnds[x], yDim-1]], np.int32)
tp = cv2.polylines(image, [thisPolygon], True, (255,0,0), 2)
image, count = self.countThisPolygon(image, unerodedImage, keypoints, tp, startsAndEnds[x], startsAndEnds[x+1], 1, yDim-1)
totalCount += count
returnImage[ystart:yend, xstart:xend, :] = image
return returnImage, totalCount
def countThisPolygon(self, image, imgForCountAdjustment, keypoints, thisPolygon, px1, px2, py1, py2):
keyPointsInThisBox = 0
#adjustedTotal = 0
total = 0
print(px1,px2,py1,py2)
if (px1 < 0):
px1 = 0
for k in keypoints:
if (px1 <= k.pt[0] <= px2) and (py1 <= k.pt[1] <= py2):
keyPointsInThisBox += 1
if (keyPointsInThisBox > 0):
# now we get the average pixel value in that rectangle
# so we can use it to adjust the crop counts
avPix = np.average(imgForCountAdjustment[py1:py2, px1:px2])
# Here's the magic re-adjustment!
adjustedCount = int(79.8124387478213 + (-0.303317043317345*avPix) + (0.0646497500776435*(px2-px1)) + (-0.112239905250904*keyPointsInThisBox))
if (keyPointsInThisBox < 10):
textCenteringValue = px1 + 3
else:
textCenteringValue = px1
image = cv2.circle(image, (px1+10, py1+10), 16, (55, 170, 72), -1)
image = cv2.putText(image, str(adjustedCount), (textCenteringValue, py1+16), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (19, 168, 232), 2, cv2.LINE_AA)
# ---- This is for gathering data to do linear regression and therefore alter
# ---- the formula for "adjustedCount"
# -------------------------------------
#howManyForReal = self.showImage(image[py1:py2, px1:px2])
# f = open("trainingData.txt", "a")
# dataString = str(round(avPix, 3)) + "," + str(px2-px1) + "," + str(keyPointsInThisBox) + "," + str(howManyForReal) + "\n"
# f.write(dataString)
# f.close()
#print(howManyForReal)
total += adjustedCount
#adjustedTotal += adjustedCount
keyPointsInThisBox = 0
return image, total #adjustedTotal
|
[
"noreply@github.com"
] |
noreply@github.com
|
9fe52b0d2955064f02e33f7ef81e170492fd5ec5
|
1ee90596d52554cb4ef51883c79093897f5279a0
|
/Sisteme/[C++] Ox Event Manager - Top 5 winners by Vegas [PREMIUM]/03. Client/root/uioxevent.py
|
1c602cfdb96c46c0b87d52f75e480054a6e1e75f
|
[] |
no_license
|
Reizonr1/metin2-adv
|
bf7ecb26352b13641cd69b982a48a6b20061979a
|
5c2c096015ef3971a2f1121b54e33358d973c694
|
refs/heads/master
| 2022-04-05T20:50:38.176241
| 2020-03-03T18:20:58
| 2020-03-03T18:20:58
| 233,462,795
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,095
|
py
|
# -*- coding: utf-8 -*-
###################################################################
# title_name : Metin2 - Ox Event Manager & Top 5 winners [Full source]
# date_created : 2017.01.21
# filename : uioxevent.py
# author : VegaS
# version_actual : Version 0.0.1
#
import ui
import dbg
import app
import chat
import uiToolTip
import wndMgr
import localeInfo
import playerSettingModule
import oxevent
import net
import introLogin, item_proto_list, item, ime, grp, uiCommon
EMPIRE_NAME = {
net.EMPIRE_A : localeInfo.EMPIRE_A, net.EMPIRE_B : localeInfo.EMPIRE_B, net.EMPIRE_C : localeInfo.EMPIRE_C
}
PATH_IMAGE__UNKNOWN_WINNER = "d:/ymir work/ui/path_oxevent/face/face_unknown.tga"
FACE_IMAGE_DICT = {
playerSettingModule.RACE_WARRIOR_M : "d:/ymir work/ui/path_oxevent/face/face_warrior_m_01.sub",
playerSettingModule.RACE_WARRIOR_W : "d:/ymir work/ui/path_oxevent/face/face_warrior_w_01.sub",
playerSettingModule.RACE_ASSASSIN_M : "d:/ymir work/ui/path_oxevent/face/face_assassin_m_01.sub",
playerSettingModule.RACE_ASSASSIN_W : "d:/ymir work/ui/path_oxevent/face/face_assassin_w_01.sub",
playerSettingModule.RACE_SURA_M : "d:/ymir work/ui/path_oxevent/face/face_sura_m_01.sub",
playerSettingModule.RACE_SURA_W : "d:/ymir work/ui/path_oxevent/face/face_sura_w_01.sub",
playerSettingModule.RACE_SHAMAN_M : "d:/ymir work/ui/path_oxevent/face/face_shaman_m_01.sub",
playerSettingModule.RACE_SHAMAN_W : "d:/ymir work/ui/path_oxevent/face/face_shaman_w_01.sub",
}
#if app.ENABLE_WOLFMAN_CHARACTER:
#FACE_IMAGE_DICT.update({playerSettingModule.RACE_WOLFMAN_M : "d:/ymir work/ui/path_oxevent/face/face_wolf_m_01.sub",})
class OxEventManagerLogin(ui.ScriptWindow):
def __init__(self):
ui.ScriptWindow.__init__(self)
self.Initialize()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Initialize(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/OxEventManagerLogin.py")
except:
import exception
exception.Abort("OxEventManagerLogin.Initialize.LoadObject")
try:
self.GetChild("accept_button").SetEvent(self.Open)
self.GetChild("cancel_button").SetEvent(self.Close)
self.GetChild("titlebar").SetCloseEvent(self.Close)
self.LinePassword = self.GetChild("currentLine_Value")
self.LinePassword.SetFocus()
except:
import exception
exception.Abort("OxEventManagerLogin.Initialize.BindObject")
self.SetCenterPosition()
def Destroy(self):
self.ClearDictionary()
def Close(self):
self.Hide()
def Login(self):
oxevent.Manager(oxevent.LOGIN, str(self.LinePassword.GetText()), oxevent.EMPTY_VALUE, oxevent.EMPTY_VALUE)
self.Hide()
def Open(self):
self.connectDialog = introLogin.ConnectingDialog()
self.connectDialog.Open(3.0)
self.connectDialog.SetText(localeInfo.OXEVENT_MANAGER_BTN_LOGIN)
self.connectDialog.SAFE_SetTimeOverEvent(self.Login)
self.connectDialog.SAFE_SetExitEvent(self.Close)
class OxEventManager(ui.ScriptWindow):
def __init__(self):
ui.ScriptWindow.__init__(self)
self.row = 0
self.key = 0
self.itemName = ""
self.vnumIndex = []
self.listKeys = []
self.wndOpenQuestion = {}
self.pageKey = ["open_event", "close_gates", "close_event", "reward_players", "ask_question", "close_force", "clear_reward"]
self.Initialize()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Initialize(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/OxEventManager.py")
except:
import exception
exception.Abort("OxEventManager.Initialize.LoadObject")
try:
GetObject = self.GetChild
self.main = {
"rewards" : {
"password" : oxevent.EMPTY_PASSWORD,
"vnum" : oxevent.EMPTY_VNUM,
"count" : oxevent.EMPTY_COUNT
},
"elements" :
{
"board" : GetObject("Board"),
"participants" : GetObject("current_participants"),
"slot_vnum" : GetObject("slot_vnum_value"),
"slot_count" : GetObject("slot_count_value"),
"slot_image" : GetObject("slot_image_value"),
"listbox_bar" : GetObject("listbox_bar"),
"listbox" : GetObject("ListBox")
},
"btn" :
{
"open_event" : GetObject("open_button_btn"),
"close_gates" : GetObject("close_gates_btn"),
"close_event" : GetObject("close_event_btn"),
"close_force" : GetObject("force_close_event_btn"),
"reward_players" : GetObject("reward_players_btn"),
"ask_question" : GetObject("ask_question_btn"),
"clear_reward" : GetObject("clear_reward_btn"),
}
}
for key in xrange(oxevent.APPEND_WINDOW):
self.main["btn"][self.pageKey[key]].SAFE_SetEvent(self.AskQuestionPacket, key + 1)
self.main["elements"]["slot_vnum"].SetEscapeEvent(self.Close)
self.main["elements"]["slot_vnum"].OnIMEUpdate = ui.__mem_func__(self.OnUpdateKeyVnum)
self.main["elements"]["slot_count"].SetEscapeEvent(self.Close)
self.main["elements"]["slot_count"].OnIMEUpdate = ui.__mem_func__(self.OnUpdateKeyCount)
self.main["elements"]["slot_count"].SetNumberMode()
self.main["elements"]["listbox"].SetEvent(self.OnClick)
self.main["elements"]["board"].SetCloseEvent(self.Close)
except:
import exception
exception.Abort("OxEventManager.Initialize.BindObject")
self.main["elements"]["slot_image"].Hide()
self.itemTooltip = uiToolTip.ItemToolTip()
self.itemTooltip.HideToolTip()
self.SetStatusListBox(0)
self.SetCenterPosition()
self.UpdateRect()
def SetStatusListBox(self, key):
for it in [self.main["elements"]["listbox"], self.main["elements"]["listbox_bar"]]:
if key == 0:
it.Hide()
else:
it.Show()
def GetIsSearchedVnum(self):
return ((self.main["rewards"]["vnum"] != oxevent.EMPTY_VNUM) and (self.main["rewards"]["count"] != oxevent.EMPTY_COUNT))
def OnUpdateKeyCount(self):
ui.EditLine.OnIMEUpdate(self.main["elements"]["slot_count"])
def GetText():
return self.main["elements"]["slot_count"].GetText()
def GetIsTextDigit(val):
return (val.isdigit())
def IsStackable():
item.SelectItem(int(self.main["rewards"]["vnum"]))
return (item.IsAntiFlag(item.ITEM_ANTIFLAG_STACK))
def IsDenied(val):
return (int(val) > oxevent.ITEM_MAX_COUNT)
val = GetText()
if GetIsTextDigit(val):
it = int(val)
if IsDenied(it):
self.main["elements"]["slot_count"].SetText(str(oxevent.ITEM_MAX_COUNT))
self.main["rewards"]["count"] = it
def OnUpdateKeyVnum(self):
ui.EditLine.OnIMEUpdate(self.main["elements"]["slot_vnum"])
def GetText():
return str(self.main["elements"]["slot_vnum"].GetText())
def GetTextSize():
return len(GetText())
def SetKey(key):
if key in [oxevent.CLEAR_DATA, oxevent.REFRESH_DATA]:
self.vnumIndex = []
self.listKeys = []
self.row = 0
if key != oxevent.REFRESH_DATA:
self.SetStatusListBox(0)
self.main["elements"]["listbox"].ClearItem()
def GetItemCountListBox():
return self.main["elements"]["listbox"].GetItemCount()
def SetSizeListBox():
for it in [self.main["elements"]["listbox"], self.main["elements"]["listbox_bar"]]:
it.SetSize(200, 17.5 * GetItemCountListBox())
if GetTextSize() <= oxevent.NEED_SIZE:
SetKey(oxevent.CLEAR_DATA)
return
SetKey(oxevent.REFRESH_DATA)
c_szText = GetText()
for key in item_proto_list.DICT:
c_szItem, c_szName = key["vnum"], key["name"]
if len(c_szName) >= len(c_szText) and c_szName[:len(c_szText)].lower() == c_szText.lower():
self.listKeys.append(c_szItem)
for key in xrange(len(self.listKeys)):
if self.row >= oxevent.MAX_ROWS:
break
item.SelectItem(self.listKeys[key])
c_szName = item.GetItemName()
self.main["elements"]["listbox"].InsertItem(key, c_szName)
self.vnumIndex.append(self.listKeys[key])
self.row += 1
self.SetStatusListBox(1)
SetSizeListBox()
if len(self.vnumIndex) == (oxevent.NEED_SIZE - 1):
SetKey(oxevent.CLEAR_DATA)
return
def AppendTextLine(self, c_szName):
self.itemName = c_szName
self.main["elements"]["slot_vnum"].SetText(c_szName)
self.main["elements"]["slot_vnum"].SetFocus()
def OnClick(self, key, c_szName):
def Clear():
self.SetStatusListBox(0)
self.main["elements"]["listbox"].ClearItem()
self.row = 0
def ShowImage():
if self.GetIsSearchedVnum():
item.SelectItem(self.main["rewards"]["vnum"])
try:
self.main["elements"]["slot_image"].LoadImage(item.GetIconImageFileName())
self.main["elements"]["slot_image"].Show()
except:
dbg.TraceError("OxEventManager.LoadImage - Failed to find item data")
def MoveCursor(text):
ime.SetCursorPosition(len(text) + 1)
def SetItemVnum(key):
self.key = key
self.main["rewards"]["vnum"] = self.vnumIndex[self.key]
self.main["elements"]["slot_count"].SetText(str(oxevent.NEED_SIZE))
self.main["rewards"]["count"] = 1
self.AppendTextLine(c_szName)
SetItemVnum(key)
MoveCursor(c_szName)
ShowImage()
Clear()
def OnUpdate(self):
def PermisionOnToolTip():
return ((self.main["elements"]["slot_image"].IsShow() and self.main["elements"]["slot_image"].IsIn()) and self.GetIsSearchedVnum())
if PermisionOnToolTip():
self.itemTooltip.SetItemToolTip(self.main["rewards"]["vnum"])
else:
self.itemTooltip.HideToolTip()
def ClearList(self):
self.main["rewards"]["vnum"] = 0
self.AppendTextLine(oxevent.EMPTY_PASSWORD)
self.main["elements"]["slot_count"].SetText(str(oxevent.NEED_SIZE))
self.main["elements"]["slot_image"].Hide()
def RefreshCounter(self, participantsCount, observersCount):
self.main["elements"]["participants"].SetText(localeInfo.OXEVENT_MANAGER_USER_COUNT % (participantsCount, observersCount))
def AnswerWithKey(self, answer, key):
if not self.wndOpenQuestion[key]:
return
self.wndOpenQuestion[key].Close()
self.wndOpenQuestion[key] = None
if not answer:
return
if key in (oxevent.OPEN_EVENT, oxevent.CLOSE_GATES, oxevent.CLOSE_EVENT, oxevent.ASK_QUESTION, oxevent.FORCE_CLOSE_EVENT):
oxevent.Manager(key, oxevent.EMPTY_PASSWORD, oxevent.EMPTY_VALUE, oxevent.EMPTY_VALUE)
else:
if self.GetIsSearchedVnum():
oxevent.Manager(key, oxevent.EMPTY_PASSWORD, self.main["rewards"]["vnum"], self.main["rewards"]["count"])
self.ClearList()
def AskQuestionPacket(self, key):
def resize(key):
return ("|cFFb6ff7d%s|r" % str(key))
self.QUESTION_DESCRIPTION = {
oxevent.OPEN_EVENT : localeInfo.OXEVENT_MANAGER_QUEST_OPEN_GATES,
oxevent.CLOSE_GATES : localeInfo.OXEVENT_MANAGER_QUEST_CLOSE_GATES,
oxevent.CLOSE_EVENT : localeInfo.OXEVENT_MANAGER_QUEST_FINISH_EVENT,
oxevent.REWARD_PLAYERS : (localeInfo.OXEVENT_MANAGER_QUEST_GIVE_REWARD % (resize(self.main["rewards"]["vnum"]), resize(self.itemName), resize(self.main["rewards"]["count"]))),
oxevent.ASK_QUESTION : localeInfo.OXEVENT_MANAGER_QUEST_RUN_QUIZ,
oxevent.FORCE_CLOSE_EVENT : localeInfo.OXEVENT_MANAGER_QUEST_FORCE_CLOSE,
oxevent.CLEAR_REWARD : localeInfo.OXEVENT_MANAGER_QUEST_CLEAR_REWARD
}
if key == oxevent.REWARD_PLAYERS and not self.GetIsSearchedVnum():
return
self.wndOpenQuestion[key] = uiCommon.QuestionDialog()
self.wndOpenQuestion[key].SetText(self.QUESTION_DESCRIPTION[key])
self.wndOpenQuestion[key].SetWidth(450)
self.wndOpenQuestion[key].SetAcceptEvent(lambda arg = TRUE, key = key: self.AnswerWithKey(arg, key))
self.wndOpenQuestion[key].SetCancelEvent(lambda arg = FALSE, key = key: self.AnswerWithKey(arg, key))
self.wndOpenQuestion[key].Open()
def OpenWindow(self):
self.Show()
def Close(self):
self.itemTooltip.HideToolTip()
self.Hide()
class OxEventWinners(ui.ScriptWindow):
def __init__(self):
ui.ScriptWindow.__init__(self)
self.textToolTip = None
self.Initialize()
def __del__(self):
ui.ScriptWindow.__del__(self)
def Initialize(self):
try:
pyScrLoader = ui.PythonScriptLoader()
pyScrLoader.LoadScriptFile(self, "uiscript/OxEventWinners.py")
except:
import exception
exception.Abort("OxEventWinners.Initialize.LoadObject")
try:
GetObject = self.GetChild
self.main = {
"data" : {
"real_name" : {}, "name" : {}, "level" : {}, "guild" : {}, "empire" : {}, "real_job" : {}, "job" : {}, "date" : {}, "real_correct_answers" : {}, "correct_answers" : {}
},
"elements" :
{
"board" : GetObject("board"),
"slot" : [GetObject("character_slot_%d" % (i + 1)) for i in xrange(oxevent.MAX_RANGE)],
"name" : [GetObject("character_name_%d" % (i + 1)) for i in xrange(oxevent.MAX_RANGE)],
"face" : [GetObject("character_face_%d" % (i + 1)) for i in xrange(oxevent.MAX_RANGE)],
"answers" : [GetObject("character_answers_%d" % (i + 1)) for i in xrange(oxevent.MAX_RANGE)],
}
}
except:
import exception
exception.Abort("OxEventWinners.Initialize.BindObject")
self.main["elements"]["board"].SetSize(175, 235)
self.SetPosition(5, wndMgr.GetScreenHeight() - 600)
self.UpdateRect()
def GetCurrentKeys(self):
return ([self.main["data"]["real_name"], self.main["data"]["name"], self.main["data"]["level"], self.main["data"]["guild"], self.main["data"]["empire"], self.main["data"]["real_job"], self.main["data"]["job"], self.main["data"]["date"], self.main["data"]["real_correct_answers"], self.main["data"]["correct_answers"]])
def GetExistKey(self, key):
self.sumKeys = self.GetCurrentKeys()
return (self.sumKeys[key].get(key) != localeInfo.OXEVENT_TOOLTIP_EMPTY)
def GetRealFace(self, index):
return FACE_IMAGE_DICT[index]
def AppendTextLine(self):
for key in xrange(oxevent.MAX_RANGE):
if self.GetExistKey(key):
self.main["elements"]["name"][key].SetText(self.main["data"]["real_name"].get(key))
self.main["elements"]["answers"][key].SetText(str(self.main["data"]["real_correct_answers"].get(key)))
self.main["elements"]["face"][key].LoadImage(self.GetRealFace(self.main["data"]["real_job"].get(key)))
else:
self.main["elements"]["face"][key].LoadImage(PATH_IMAGE__UNKNOWN_WINNER)
def Append(self):
def resize(key):
return ("|cFFfffbaa%s|r" % str(key))
def GetEmpire(index):
return resize(EMPIRE_NAME[index])
for key in xrange(oxevent.MAX_RANGE):
row = oxevent.GetWinners(key)
name, level, guild, empire, job, date, correct_answers = row[0], row[1], row[2], row[3], row[4], row[5], row[6]
if level == oxevent.EMPTY_DATA:
for keyEmpty in self.GetCurrentKeys():
keyEmpty.update({key : localeInfo.OXEVENT_TOOLTIP_EMPTY})
else:
self.main["data"]["real_name"].update({key : name})
self.main["data"]["name"].update({key : (localeInfo.OXEVENT_TOOLTIP_NAME % resize(name))})
self.main["data"]["level"].update({key : (localeInfo.OXEVENT_TOOLTIP_LEVEL % resize(level))})
self.main["data"]["guild"].update({key : (localeInfo.OXEVENT_TOOLTIP_GUILD % resize(guild))})
self.main["data"]["empire"].update({key : (localeInfo.OXEVENT_TOOLTIP_EMPIRE % GetEmpire(empire))})
self.main["data"]["real_job"].update({key : job})
self.main["data"]["job"].update({key : self.GetRealFace(job)})
self.main["data"]["date"].update({key : (localeInfo.OXEVENT_TOOLTIP_DATE % resize(date))})
self.main["data"]["real_correct_answers"].update({key : correct_answers})
self.main["data"]["correct_answers"].update({key : (localeInfo.OXEVENT_TOOLTIP_ANSWERS % resize(correct_answers))})
self.AppendTextLine()
self.Show()
def OnUpdate(self):
(x, y) = wndMgr.GetMousePosition()
for key in xrange(oxevent.MAX_RANGE):
if self.main["elements"]["slot"][key].IsIn() ^ self.main["elements"]["face"][key].IsIn():
if self.GetExistKey(key):
self.textToolTip = uiToolTip.ToolTip()
self.textToolTip.SetPosition(x + 15, y)
self.textToolTip.AppendPlayersDesc(self.main["data"]["name"].get(key),self.main["data"]["level"].get(key),self.main["data"]["guild"].get(key), self.main["data"]["empire"].get(key),self.main["data"]["job"].get(key), self.main["data"]["date"].get(key),self.main["data"]["correct_answers"].get(key))
def Openwindow(self):
if self.IsShow():
self.Hide()
else:
self.Show()
|
[
"59807064+Reizonr1@users.noreply.github.com"
] |
59807064+Reizonr1@users.noreply.github.com
|
c51c47fe98b99582b8290fe9dc891c501ea895a2
|
8b5f0776902ae7a3c68e3907bf4595af173396e6
|
/pythondemo/apps/request_demo/httpdemo.py
|
dfcaa35f586a9e01113e7488631e9b317baad57b
|
[] |
no_license
|
coronahana/runpython
|
8fcdd57b0c826b86f630d29b9b256861196ab58f
|
c03bdc369c8f69a542b716a33a533f240d89e5bf
|
refs/heads/master
| 2021-05-11T04:05:10.558549
| 2019-12-19T10:25:03
| 2019-12-19T10:25:03
| 117,932,850
| 0
| 0
| null | 2019-12-12T15:07:04
| 2018-01-18T04:35:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,210
|
py
|
import requests
import json
from logind import LoginDemo
class InterfaceDemo(LoginDemo):
name=19
"""
def __init__(self):
print("interfaceDemo___init__")
"""
def newproject(self):
print("interfaceDemo__newproject")
url = 'https://xxxx.xxx.xx/login?src=https%3A%2F%2Fwww.vip.com%2F'
data01 = {"loginName": "18126132321", "password": "long1234", "remUser": 0, "vipc": "", "captcha": "",
"anticache": "1534909211992", "whereFrom": ""}
data02 = json.dumps(data01)
print(data02)
res = requests.post(url, data02)
# res = requests.get(url, data) # 直接用requests.get(url,data)即可,其中.get表示为get方法,不需要对字典类型的data进行处理
# res=res.text#text方法是获取到响应为一个str,也不需要对res进行转换等处理
res01 = res.text
# res = res.json()# 当返回的数据是json串的时候直接用.json即可将res转换成字典
print(res01)
def newpro(self):
print("interfaceDemo__newpro")
print("name="+str(self.name))
return "proid"
if __name__ == '__main__':
InterfaceDemo().newpro()
|
[
"1224140873@qq.com"
] |
1224140873@qq.com
|
9f2065bd4b9b05b7fd5593a811ca15c881262e18
|
9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612
|
/exercises/1901100074/1001S02E04_control_flow.py
|
277c23ad93ad113524aafbc37d76b134afe8be9f
|
[] |
no_license
|
shen-huang/selfteaching-python-camp
|
e8410bfc06eca24ee2866c5d890fd063e9d4be89
|
459f90c9f09bd3a3df9e776fc64dfd64ac65f976
|
refs/heads/master
| 2022-05-02T05:39:08.932008
| 2022-03-17T07:56:30
| 2022-03-17T07:56:30
| 201,287,222
| 9
| 6
| null | 2019-08-08T15:34:26
| 2019-08-08T15:34:25
| null |
UTF-8
|
Python
| false
| false
| 355
|
py
|
print('打印九九乘法表')
for i in range(1,10):
for j in range(1,i + 1):
print(i, '*' , j, '=', i* j, end='\t')
print()
print('\n打印跳过偶数行的九九乘法表')
i = 1
while i < 10:
if i % 2 == 0:
print()
else:
for j in range(1,i + 1):
print(i, '*' , j, '=', i * j, end='\t')
i += 1
|
[
"3507842712@qq.com"
] |
3507842712@qq.com
|
c7097703377e37843c47abd796ec4f333f4d2e77
|
fde31c14f7a31bc98221e3959748748c32bfc7ad
|
/stock/tests.py
|
6c296e822db3a61d931e039b2f4a94f5f04f77dd
|
[] |
no_license
|
schoolofnetcom/django-avancado
|
d557d05a96db6bc8471fec6cfc1bc80b78ea2266
|
0a9e0c92c437928caf3e647b7d9a35a0633d1ff2
|
refs/heads/master
| 2021-05-04T19:47:14.636565
| 2018-12-19T22:13:39
| 2018-12-19T22:13:39
| 106,818,135
| 5
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
from django.contrib.auth.models import User
from django.test import TestCase
# Create your tests here.
from django.test.testcases import SimpleTestCase
from django.urls.base import reverse
from stock.models import Product, TimestampableMixin, StockEntry
class ProductTest(SimpleTestCase):
def test_value_initial_stock_field(self):
product = Product()
self.assertEquals(0, product.stock)
# self.assertEquals(1, product.stock)
def test_product_has_timestampable(self):
product = Product()
self.assertIsInstance(product, TimestampableMixin)
def test_exception_when_stock_less_zero(self):
product = Product()
with self.assertRaises(ValueError) as exception:
product.stock = 10
product.decrement(11)
self.assertEquals('Sem estoque disponível', str(exception.exception))
class ProductDatabaseTest(TestCase):
fixtures = ['data.json']
def setUp(self):
self.product = Product.objects.create(
name="Produto YY", stock_max=200, price_sale=50.50, price_purchase=25.25,
)
def test_product_save(self):
self.assertEquals('Produto YY', self.product.name)
self.assertEquals(0, self.product.stock)
def test_if_user_exists(self):
user = User.objects.all().first()
self.assertIsNotNone(user)
class StockEntryHttpTest(TestCase):
fixtures = ['data.json']
def test_list(self):
response = self.client.get('/stock_entries/')
self.assertEquals(200, response.status_code)
self.assertIn('Produto A', str(response.content))
def test_create(self):
url = reverse('entries_create')
self.client.post(url, {'product': 1, 'amount': 20})
entry = StockEntry.objects.filter(amount=20, product_id=1).first()
self.assertIsNotNone(entry)
self.assertEquals(31, entry.product.stock)
|
[
"argentinaluiz@gmail.com"
] |
argentinaluiz@gmail.com
|
bee0d4a64d8b86383ac57f7631cf62041079a8ed
|
b66bf5a58584b45c76b9d0c5bf828a3400ecbe04
|
/week-04/4-recursion/6.py
|
757f22bf72fb91301f22cd313315ff7f695c6926
|
[] |
no_license
|
greenfox-velox/szepnapot
|
1196dcb4be297f12af7953221c27cd1a5924cfaa
|
41c3825b920b25e20b3691a1680da7c10820a718
|
refs/heads/master
| 2020-12-21T08:11:41.252889
| 2016-08-13T10:07:15
| 2016-08-13T10:07:15
| 58,042,932
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 501
|
py
|
# 6. We have bunnies standing in a line, numbered 1, 2, ... The odd bunnies
# (1, 3, ..) have the normal 2 ears. The even bunnies (2, 4, ..) we'll say
# have 3 ears, because they each have a raised foot. Recursively return the
# number of "ears" in the bunny line 1, 2, ... n (without loops or
# multiplication).
def handicap_bunny_ears(n):
if n == 1:
return 2
elif n % 2 == 0:
return 3 + handicap_bunny_ears(n - 1)
else:
return 2 + handicap_bunny_ears(n - 1)
print(handicap_bunny_ears(6))
|
[
"silentpocok@gmail.com"
] |
silentpocok@gmail.com
|
6fe458712832c481350feecfbc01f759822fdc4d
|
9307f8cae01d94fca805fcb2d221018aa0159e90
|
/prototype/prototype/openstack/common/periodic_task.py
|
250497c6a4c843c257aeb71fcdaf8224fe25b27b
|
[
"Apache-2.0"
] |
permissive
|
beibei1989/oslo.prototype
|
22ff20615876e239fce42edc35d02ca7cfb066ee
|
806870780194e45a66b28cd7260fa9bfd422aecb
|
refs/heads/master
| 2021-05-29T13:08:56.802185
| 2015-02-11T07:12:01
| 2015-02-11T07:12:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,322
|
py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import logging
import random
import time
from oslo.config import cfg
import six
from prototype.openstack.common._i18n import _, _LE, _LI
periodic_opts = [
cfg.BoolOpt('run_external_periodic_tasks',
default=True,
help='Some periodic tasks can be run in a separate process. '
'Should we run them here?'),
]
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
def list_opts():
"""Entry point for oslo.config-generator."""
return [(None, copy.deepcopy(periodic_opts))]
class InvalidPeriodicTaskArg(Exception):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
def periodic_task(*args, **kwargs):
"""Decorator to indicate that a method is a periodic task.
This decorator can be used in two ways:
1. Without arguments '@periodic_task', this will be run on the default
interval of 60 seconds.
2. With arguments:
@periodic_task(spacing=N [, run_immediately=[True|False]]
[, name=[None|"string"])
this will be run on approximately every N seconds. If this number is
negative the periodic task will be disabled. If the run_immediately
argument is provided and has a value of 'True', the first run of the
task will be shortly after task scheduler starts. If
run_immediately is omitted or set to 'False', the first time the
task runs will be approximately N seconds after the task scheduler
starts. If name is not provided, __name__ of function is used.
"""
def decorator(f):
# Test for old style invocation
if 'ticks_between_runs' in kwargs:
raise InvalidPeriodicTaskArg(arg='ticks_between_runs')
# Control if run at all
f._periodic_task = True
f._periodic_external_ok = kwargs.pop('external_process_ok', False)
if f._periodic_external_ok and not CONF.run_external_periodic_tasks:
f._periodic_enabled = False
else:
f._periodic_enabled = kwargs.pop('enabled', True)
f._periodic_name = kwargs.pop('name', f.__name__)
# Control frequency
f._periodic_spacing = kwargs.pop('spacing', 0)
f._periodic_immediate = kwargs.pop('run_immediately', False)
if f._periodic_immediate:
f._periodic_last_run = None
else:
f._periodic_last_run = time.time()
return f
# NOTE(sirp): The `if` is necessary to allow the decorator to be used with
# and without parenthesis.
#
# In the 'with-parenthesis' case (with kwargs present), this function needs
# to return a decorator function since the interpreter will invoke it like:
#
# periodic_task(*args, **kwargs)(f)
#
# In the 'without-parenthesis' case, the original function will be passed
# in as the first argument, like:
#
# periodic_task(f)
if kwargs:
return decorator
else:
return decorator(args[0])
class _PeriodicTasksMeta(type):
def _add_periodic_task(cls, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
:return: whether task was actually enabled
"""
name = task._periodic_name
if task._periodic_spacing < 0:
LOG.info(_LI('Skipping periodic task %(task)s because '
'its interval is negative'),
{'task': name})
return False
if not task._periodic_enabled:
LOG.info(_LI('Skipping periodic task %(task)s because '
'it is disabled'),
{'task': name})
return False
# A periodic spacing of zero indicates that this task should
# be run on the default interval to avoid running too
# frequently.
if task._periodic_spacing == 0:
task._periodic_spacing = DEFAULT_INTERVAL
cls._periodic_tasks.append((name, task))
cls._periodic_spacing[name] = task._periodic_spacing
return True
def __init__(cls, names, bases, dict_):
"""Metaclass that allows us to collect decorated periodic tasks."""
super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
# NOTE(sirp): if the attribute is not present then we must be the base
# class, so, go ahead an initialize it. If the attribute is present,
# then we're a subclass so make a copy of it so we don't step on our
# parent's toes.
try:
cls._periodic_tasks = cls._periodic_tasks[:]
except AttributeError:
cls._periodic_tasks = []
try:
cls._periodic_spacing = cls._periodic_spacing.copy()
except AttributeError:
cls._periodic_spacing = {}
for value in cls.__dict__.values():
if getattr(value, '_periodic_task', False):
cls._add_periodic_task(value)
def _nearest_boundary(last_run, spacing):
"""Find nearest boundary which is in the past, which is a multiple of the
spacing with the last run as an offset.
Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24,
31, 38...
0% to 5% of the spacing value will be added to this value to ensure tasks
do not synchronize. This jitter is rounded to the nearest second, this
means that spacings smaller than 20 seconds will not have jitter.
"""
current_time = time.time()
if last_run is None:
return current_time
delta = current_time - last_run
offset = delta % spacing
# Add up to 5% jitter
jitter = int(spacing * (random.random() / 20))
return current_time - offset + jitter
@six.add_metaclass(_PeriodicTasksMeta)
class PeriodicTasks(object):
def __init__(self):
super(PeriodicTasks, self).__init__()
self._periodic_last_run = {}
for name, task in self._periodic_tasks:
self._periodic_last_run[name] = task._periodic_last_run
def add_periodic_task(self, task):
"""Add a periodic task to the list of periodic tasks.
The task should already be decorated by @periodic_task.
"""
if self.__class__._add_periodic_task(task):
self._periodic_last_run[task._periodic_name] = (
task._periodic_last_run)
def run_periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
idle_for = DEFAULT_INTERVAL
for task_name, task in self._periodic_tasks:
full_task_name = '.'.join([self.__class__.__name__, task_name])
spacing = self._periodic_spacing[task_name]
last_run = self._periodic_last_run[task_name]
# Check if due, if not skip
idle_for = min(idle_for, spacing)
if last_run is not None:
delta = last_run + spacing - time.time()
if delta > 0:
idle_for = min(idle_for, delta)
continue
LOG.debug("Running periodic task %(full_task_name)s",
{"full_task_name": full_task_name})
self._periodic_last_run[task_name] = _nearest_boundary(
last_run, spacing)
try:
task(self, context)
except Exception as e:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
time.sleep(0)
return idle_for
|
[
"yangyuan@users.noreply.github.com"
] |
yangyuan@users.noreply.github.com
|
36d9e846608b55ec03ebe783ec8794af6d624e6e
|
8c7c35fa3bb907a0257051db626bdd4a1ad56c75
|
/Pulsar/PulsarLib.py
|
8da0877e6c20f4efa48d7d974708af13f92a5962
|
[] |
no_license
|
fermi-lat/celestialSources-new
|
b9635156a11fe62c384beab881efee4d723094cd
|
07af8b12227db0602a03c2c9fc3af95694a70f02
|
refs/heads/master
| 2021-07-10T14:11:45.734241
| 2017-09-30T18:03:46
| 2017-09-30T18:03:46
| 106,605,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 942
|
py
|
def generate(env, **kw):
if not kw.get('depsOnly',0):
env.Tool('addLibrary', library = ['Pulsar'])
if env['PLATFORM'] == 'win32' and env.get('CONTAINERNAME','')=='GlastRelease':
env.Tool('findPkgPath', package = 'Pulsar')
env.Tool('findPkgPath', package = 'SpectObj')
env.Tool('findPkgPath', package = 'flux')
env.Tool('findPkgPath', package = 'astro')
env.Tool('findPkgPath', package = 'facilities')
env.Tool('fluxLib')
env.Tool('SpectObjLib')
env.Tool('astroLib')
env.Tool('addLibrary', library = env['rootLibs'])
env.Tool('addLibrary', library = env['rootGuiLibs'])
if kw.get('incsOnly', 0) == 1:
env.Tool('findPkgPath', package = 'SpectObj')
env.Tool('findPkgPath', package = 'flux')
env.Tool('findPkgPath', package = 'astro')
env.Tool('findPkgPath', package = 'facilities')
def exists(env):
return 1
|
[
""
] | |
fb5dc4f1adab784f0f9aee9cb07451f4c9406aa0
|
5b6fe4eba4540e37bb8a0c7dbea3b0a416769041
|
/CRABSERVER/src/python/ProxyTarballAssociator/__init__.py
|
9b9fcf70c13ea2f611e28e32c87fb4cb4c103b14
|
[] |
no_license
|
bbockelm/CRAB
|
55814f3ff8cf56ccf19e60a0b815fe3144063d83
|
48a12c644164b5142c44ef023ac103af178922d4
|
refs/heads/master
| 2021-01-10T20:38:51.683231
| 2013-04-15T15:20:23
| 2013-04-15T15:20:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
#!/usr/bin/env python
"""
_ProxyTarballAssociatorComponent_
"""
__author__ = "farinafa@cern.ch"
__revision__ = "$Id: __init__.py,v 1.0 2006/11/20 14:38:00 farinafa Exp $"
__version__ = "$Revision: 1.0 $"
__all__ = []
|
[
""
] | |
3ddc19354fa1633b9ee31d7c381329a6f26e45c6
|
224f9685ef222d26284560c365c407431255c389
|
/python/mantis/__init__.py
|
393c8326147e0abe70c05d77165ab7bba356c602
|
[] |
no_license
|
EyalSel/mantis-module
|
e66d3b0a7058dc20307e0b2aafdb729e5ecb103e
|
62f6a0910bc05cfeb5bf971b0564abf8fa11bfe2
|
refs/heads/master
| 2020-11-26T17:40:03.185276
| 2019-12-15T23:29:40
| 2019-12-15T23:29:40
| 229,162,354
| 0
| 0
| null | 2019-12-20T00:45:08
| 2019-12-20T00:45:07
| null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
import click
from mantis.consume import consume
from mantis.load_gen import load_gen
from mantis.metric import metric_monitor, result_writer
@click.group()
def cli():
pass
cli.add_command(consume)
cli.add_command(load_gen)
cli.add_command(metric_monitor)
cli.add_command(result_writer)
def wrapper():
cli(standalone_mode=False)
|
[
"xmo@berkeley.edu"
] |
xmo@berkeley.edu
|
7bd5b4b65ead10f64533fc5df322bc6ee73431a3
|
873eff0466f282d627c1a8ee3866240f63d31ed6
|
/220. Contains Duplicate III.py
|
98157c085e3141f2f83d81d778e9a4b2a0573716
|
[] |
no_license
|
wooloba/LeetCode861Challenge
|
c5a6ab6a35384c18d5a968e9898edff21ac2e13a
|
33db8a064cc7fdbe9e7ed1b4fdc2c6ef74deb29b
|
refs/heads/master
| 2021-08-07T10:39:38.062779
| 2021-06-21T01:44:59
| 2021-06-21T01:44:59
| 141,374,370
| 4
| 1
| null | 2018-07-18T15:11:15
| 2018-07-18T03:10:35
|
Python
|
UTF-8
|
Python
| false
| false
| 842
|
py
|
####################
# Yaozhi Lu #
# Nov 14 2018 #
####################
#Origin: https://leetcode.com/problems/contains-duplicate-iii/
class Solution(object):
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
"""
#time limit exceed
dic = dict(zip([i for i in range(0,len(nums))],nums))
for i in range(len(dic)-1):
for j in range(i+1,len(dic)):
print(i,j)
if abs(i-j) > k:
break
if abs(dic[i] - dic[j])<= t:
return True
return False
def main():
so = Solution()
print(so.containsNearbyAlmostDuplicate([1,2,3,1],3,0))
if __name__ == "__main__":
main()
|
[
"luyaozhiusing@gmail.com"
] |
luyaozhiusing@gmail.com
|
2bf06e0ad8127a620f73166fdad183db2ad4a00b
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/abc026/A/4904316.py
|
f402a614334f49b3bcdec3a0f4f6dd30b813e978
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 42
|
py
|
a = int(input())
print(int((a ** 2) / 4))
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
5d2344e318665298f82748bfeb35193bd3a1797f
|
be56ade48d7cc44db281ce2d24df723d94b300d0
|
/site/libs/tests.py
|
aed98444b40c15722a2eb195832cb2031a972a6f
|
[] |
no_license
|
olymk2/stomatopod
|
e4c16815112e432c2c74dae23f14d292ffd3f8d4
|
e8844e0f92ae76a44bb16fe31770a77bf47c2edf
|
refs/heads/master
| 2020-12-24T16:50:20.805943
| 2015-10-26T09:21:49
| 2015-10-26T09:21:49
| 38,419,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,493
|
py
|
class testset(object):
count = 0
def __init__(self, name='testset', success_msg='Great scott these tests passed !', failure_msg='This encounter could create a time paradox !', verbose=False):
sys.stdout = StringIO()
self.name = name
self.success_message = success_msg
self.failure_message = failure_msg
self.results = []
self.verbose = verbose
self.count_tests = 0
self.count_success = 0
self.count_failures = 0
def append(self, test, error='\033[94mGreat scott something is wrong in the universe!'):
self.results.append((
test,
error
))
def finish(self):
sys.stdout = sys.__stdout__
for result, error in self.results:
if result is True:
self.count_success += 1
else:
print('\t\033[94mError in test %s %s' % (self.count_tests, error))
self.count_failures += 1
self.count_tests += 1
self.count += self.count_tests
if self.count_tests == self.count_success:
if self.verbose is True:
print('\t\033[92m%s out of %s %s tests passed - %s\033[0m' % (self.count_success, self.count_tests, self.name, self.success_message))
else:
print('\t\033[93m%s out of %s %s tests failed - %s\033[0m' % (self.count_failures, self.count_tests, self.name, self.failure_message))
return self.count_failures
|
[
"oliver.marks@influentialsoftware.com"
] |
oliver.marks@influentialsoftware.com
|
3d48ec33045fb784ead90b898d450e65020f22cd
|
40ce4d7545309ca57f0670a3aa27573d43b18552
|
/com.ppc.Microservices/intelligence/daylight/location_midnight_microservice.py
|
c6ef5c09622b3603af1f31da45b9aac04e419e01
|
[
"Apache-2.0"
] |
permissive
|
slrobertson1/botlab
|
769dab97cca9ee291f3cccffe214544663d5178e
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
refs/heads/master
| 2020-07-28T06:45:37.316094
| 2019-09-18T15:34:08
| 2019-09-18T15:34:08
| 209,341,818
| 0
| 0
|
Apache-2.0
| 2019-09-18T15:23:37
| 2019-09-18T15:23:37
| null |
UTF-8
|
Python
| false
| false
| 878
|
py
|
'''
Created on February 25, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from intelligence.intelligence import Intelligence
class LocationMidnightMicroservice(Intelligence):
"""
Announce midnight throughout the microservices framework
"""
def schedule_fired(self, botengine, schedule_id):
"""
The bot executed on a hard coded schedule specified by our runtime.json file
:param botengine: BotEngine environment
:param schedule_id: Schedule ID that is executing from our list of runtime schedules
"""
self.parent.track(botengine, "midnight")
if schedule_id == "MIDNIGHT":
self.parent.distribute_datastream_message(botengine, "midnight_fired", None, internal=True, external=False)
|
[
"dmoss@peoplepowerco.com"
] |
dmoss@peoplepowerco.com
|
033e3ba2b09c1724cbe12c3e9791b64c137b54e6
|
c953e8a7d251e4aba08910156a1bcf6997940c80
|
/2015/9/9_1.py
|
e3eb309bbed582025e5b5ca8fccb90513ac36d95
|
[] |
no_license
|
simonbrahan/adventofcode
|
848ca9ab2fdd7b22e1eb13d43bb5cf93972e2e5f
|
8e0053399defb2a8a83cd4bb4062f7e213b10174
|
refs/heads/master
| 2021-06-04T00:52:30.269561
| 2018-12-12T22:55:04
| 2018-12-12T22:55:04
| 56,775,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
ex_input = open('input.txt', 'r')
distance_between_places = {}
routes = []
for line in ex_input:
start, ignore, end, ignore, distance = line.split(' ')
distance = int(distance)
if start not in distance_between_places:
distance_between_places[start] = []
distance_between_places[start].append({ 'loc': end, 'distance': distance })
if end not in distance_between_places:
distance_between_places[end] = []
distance_between_places[end].append({ 'loc': start, 'distance': distance })
for place in distance_between_places:
distance_between_places[place].sort(key = lambda val: val['distance'])
distance_between_places[place].reverse()
# Starting from each place in turn, get the longest available route
# by always going to the furthest unvisited node
longest_route = 0
for place in distance_between_places:
visited = dict.fromkeys(place for place in distance_between_places)
visited[place] = True
current_place = place
current_route = 0
# While there are unvisited nodes...
while None in visited.values():
# For each destination from current place...
for destination in distance_between_places[current_place]:
# If destination has not been visited...
if not visited[destination['loc']]:
# Move to destination
# It is known to be the furthest destination due to the sort by distance above
current_route += destination['distance']
visited[destination['loc']] = True
current_place = destination['loc']
break
if current_route > longest_route:
longest_route = current_route
print longest_route
|
[
"simon.brahan@frogeducation.com"
] |
simon.brahan@frogeducation.com
|
1375f01b452c64c019fa1eae9b93de292611dfcf
|
5b2d4ef4e3c45e52b554fc3722e7ae1e7d5403ad
|
/myforum/accounts/test/test_moderate.py
|
550b3a5426f2e00ad0618478806975ff447c86b3
|
[] |
no_license
|
nithylsairam/myforum-master
|
f375bac715e35aaf97c0d22cb8668c95b34253ee
|
dbc98a60197d0b1c5dda37b8e9a021049ff742fa
|
refs/heads/master
| 2020-07-17T00:38:01.285470
| 2019-09-02T17:37:57
| 2019-09-02T17:37:57
| 205,902,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
import logging
from django.test import TestCase
from django.shortcuts import reverse
from myforum.accounts.models import User
from myforum.accounts.util import get_uuid
from myforum.accounts.test.util import fake_request
from myforum.accounts import views, forms
class ModerateUser(TestCase):
def setUp(self):
self.user1 = User.objects.create(username=f"foo1", email="foo@tested.com",
password="foo", is_superuser=True, is_staff=True)
self.user2 = User.objects.create(username=f"foo2", email="foo2@tested.com",
password="foo2")
pass
def test_user_moderate(self):
"Test user moderation"
for action, _ in forms.UserModerate.CHOICES:
url = reverse("user_moderate", kwargs=dict(uid=self.user2.profile.uid))
data = {"action": action}
request = fake_request(user=self.user1, data=data, url=url)
response = views.user_moderate(request=request, uid=self.user2.profile.uid)
self.assertTrue(response.status_code == 302, "Error moderating user.")
pass
def test_debug_user(self):
"Test logging in as a user"
url = reverse("debug_user")
request = fake_request(user=self.user1, data=dict(uid=self.user2.profile.uid), url=url)
response = views.debug_user(request=request)
self.assertTrue(response.status_code == 302, "Error debugging user.")
|
[
"nithil1997@gmail.com"
] |
nithil1997@gmail.com
|
7ac8563d6aacb10540b4737d547e048a5b5d34cb
|
8723e6a6104e0aa6d0a1e865fcaaa8900b50ff35
|
/util/test_registration.py
|
552ee309cf404634bd31b78fa6016c8364671422
|
[] |
no_license
|
ejeschke/ginga-plugin-template
|
9c4324b7c6ffaa5009cce718de8ea2fc5172bc81
|
545c785a184aedb1535d161d3c5ca5e7bf5bed6e
|
refs/heads/master
| 2022-11-22T17:50:57.503956
| 2022-11-10T23:20:09
| 2022-11-10T23:20:09
| 78,906,928
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 833
|
py
|
"""
This program allows you to test whether your plugin will register
itself correctly.
"""
from pkg_resources import iter_entry_points
groups = ['ginga.rv.plugins']
available_methods = []
for group in groups:
for entry_point in iter_entry_points(group=group, name=None):
available_methods.append(entry_point.load())
d = dict(name='Name', ptype='Type', klass='Class', module='Module')
print("%(name)14.14s %(ptype)6.6s %(klass)20s %(module)20s" % d)
for method in available_methods:
spec = method()
# for debugging
#print(spec)
d = dict(spec)
d.setdefault('name', spec.get('name', spec.get('menu', spec.get('klass', spec.get('module')))))
d.setdefault('klass', spec.get('module'))
d.setdefault('ptype', 'local')
print("%(name)14.14s %(ptype)6.6s %(klass)20s %(module)20s" % d)
|
[
"eric@naoj.org"
] |
eric@naoj.org
|
40ee07e78375994600154897421ebb3e4da7d420
|
132b214a5d9072d7ed7b4f0ccc681401251c160f
|
/home/migrations/0005_postnews.py
|
5bdbf2d9ccc7019c7b86af0b1486ff2c945f0cd5
|
[] |
no_license
|
kagajugrace/DjangoAndroid
|
7539699192079f193ed01e594c20e026b7387f74
|
dd85d058725691f4896ebaab73be4438325c085b
|
refs/heads/master
| 2023-03-03T14:51:27.921126
| 2021-02-13T09:41:47
| 2021-02-13T09:41:47
| 337,977,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 749
|
py
|
# Generated by Django 3.1.6 on 2021-02-12 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0004_auto_20210206_1347'),
]
operations = [
migrations.CreateModel(
name='Postnews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('owner', models.CharField(max_length=255)),
('image', models.ImageField(upload_to='')),
('price', models.CharField(max_length=255)),
('description', models.TextField()),
],
),
]
|
[
"kagajugrace@gmail.com"
] |
kagajugrace@gmail.com
|
cf669d847a73e884808151acc91b624f3c483dc6
|
64078d7fdf4ca6349ab7447cf5aec8d5b89ee80f
|
/mbta/mbta.py
|
5712d2e542058ee3b5de3fec5d43b649f2f209e0
|
[
"MIT"
] |
permissive
|
mclarkelauer/pymbta
|
9df072c7d6b53662b7fc41bfd603c0fcb5a511f7
|
2c3314a722539db6f516652261832363ba959435
|
refs/heads/master
| 2016-08-07T09:37:19.011564
| 2014-12-17T01:32:14
| 2014-12-17T01:32:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
import urllib
import urllib2
import json
class mbta:
def __init__(self,api_key):
self.api_key=api_key
self.site="http://realtime.mbta.com/developer/api/v2/"
def pretty(self,d,indent=0):
for key, value in d.iteritems():
print '\t' * indent + str(key) +"\n"
if isinstance(value, dict):
self.pretty(value, indent+1)
else:
print '\t' * (indent+1) + str(value) + "\n"
def __makeRequest(self,url,data={}):
#set api key
data["api_key"]=self.api_key
data["format"]="json"
params= urllib.urlencode(data)
resp = urllib2.urlopen(url+ "?" + params)
jsonData = json.load(resp)
return jsonData
def __getURL(self,queryName):
# Possible add validation for queryName
return (self.site+queryName)
# Return dictionary of modes, with routes and names
def routes(self):
url = self.__getURL("routes")
response = self.__makeRequest(url)
return response
def routesByStop(self, stop_id):
url=self.__getURL("routesbystop")
data['stop_id']=stop_id
response = self.__makeRequest(url)
return response
def stopsByRoute(self,route):
url=self.__getURL("stopsbyroute")
data['route']=route
response = self.__makeRequest(url)
return response
def stopsByLocation(self,lat,lon):
url=self.__getURL("stopsbylocation")
data['lat']=lat
data['lon']=lon
response = self.__makeRequest(url)
return response
def scheduleByStop(self):
pass
def scheduleByRoute(self):
pass
def scheduleByTrip(self):
pass
def predictionsByStop(self):
pass
def predictionsByRoute(self):
pass
def vehiclesByRoute(self):
pass
def predictionsByTrip(self):
pass
def vehiclesByTrip(self):
pass
def alerts(self):
pass
def alertsByRoute(self):
pass
def alertsByStop(self):
pass
def alertsById(self):
pass
def alertsHeaders(self):
pass
def alertHeadersByRoute(self):
pass
def alertHeadersByStop(self):
pass
def serverTime(self):
pass
|
[
"matt@clarkelauer.com"
] |
matt@clarkelauer.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.