blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d30d5c62676e592a5f71a0be2cfb4c773f8613f1 | b0d451376c1fa503db52bcc51f36d41d5d87edb9 | /information.py | 3067af8917863a10e1a1afe7c875348a56c7b090 | [] | no_license | roamerboss/first-personal-work | b404def42c5334c077c8a07758c3233ea2a46fd5 | fa578c9d4a9901c975576f6b00c298bfc2a01e05 | refs/heads/main | 2023-03-08T21:46:37.359540 | 2021-02-26T10:10:13 | 2021-02-26T10:10:13 | 341,454,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | import requests
import re
import json
headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15"
}
list1 = []
list2 = []
list3 = []
list4 = []
list_all = []
i = 1614066736711
j = 0
url = "https://video.coral.qq.com/varticle/5963120294/comment/v2?callback=_varticle5963120294commentv2&orinum=10&oriorder=o&pageflag=1&cursor="+str(j)+"&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=132&_="+str(i)
html = requests.get(url,headers=headers).content.decode()
list1 = re.findall('"content":"(.*?)"',html,re.S)
#print(list1)
j = re.findall('"last":"(.*?)"',html,re.S)
i = i+3
url = "https://video.coral.qq.com/varticle/5963120294/comment/v2?callback=_varticle5963120294commentv2&orinum=10&oriorder=o&pageflag=1&cursor="+str(j[0])+"&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=132&_="+str(i)
html = requests.get(url,headers=headers).content.decode()
list2 = re.findall('"content":"(.*?)"',html,re.S)
#print(list2)
for k in range(1,1168,1):
i += 1
j = re.findall('"last":"(.*?)"',html,re.S)
url = "https://video.coral.qq.com/varticle/5963120294/comment/v2?callback=_varticle5963120294commentv2&orinum=10&oriorder=o&pageflag=1&cursor="+str(j[0])+"&scorecursor=0&orirepnum=2&reporder=o&reppageflag=1&source=132&_="+str(i)
html = requests.get(url,headers=headers).content.decode()
list3 = re.findall('"content":"(.*?)"',html,re.S)
list4 += list3
#print(list4)
list_all = list1 + list2 + list4
print(list_all)
| [
"1273915146@qq.com"
] | 1273915146@qq.com |
7d6c50e0939edae8e06e84d3f0739b57bfd24119 | 641563f61bb4061a3488d047f5744d332b33ea05 | /TesterRunner/runner/testcases/chaoyue_master_zzmj_2019_09_30_03_01.py | ad031de804568dc6fca60e88dac256d9bd2c8cf5 | [] | no_license | Zhaohb2017/test_platform_back | 4c1de839c2939b44146e3ad399bdd99ddbd276db | 4209bbf26204ea39c21bcf9d0a31ef3347bf07e0 | refs/heads/master | 2021-06-20T19:50:06.026273 | 2020-05-17T05:14:20 | 2020-05-17T05:14:20 | 199,758,231 | 0 | 1 | null | 2021-06-10T21:47:34 | 2019-07-31T01:56:14 | Python | UTF-8 | Python | false | false | 1,867 | py | import sys
import os
cur_path = os.path.abspath(os.path.dirname(__file__))
last_path = os.path.split(cur_path)[0]
last_path_len = last_path.split("/")[-1]
root_path = last_path[:len(last_path) - len(last_path_len)]
sys.path.append(root_path)
import time
import unittest
from chaoyue.master.phz.api import *
class PHZTestCase(unittest.TestCase):
def test_task(self):
player1 = UserBehavior(127641,127641,True)
player2 = UserBehavior(127643)
time.sleep(2)
player1.SetGameType = "转转麻将"
player2.SetGameType = "转转麻将"
create_room_data = {'o_player': 2, 'o_round': 5, 'o_double_plus_new': False, 'o_zimohu': '可抢杠胡', 'o_sevenPair': True, 'o_xianjia': False, 'o_hongzhonglaizi': False, 'o_youpaibihu': False, 'o_qigang': False, 'o_zhuaMa': 2, 'o_double': 0, 'o_double_score': '', 'o_double_plus': 2, 'o_doublePlusNewScore': 10, 'o_159zhongma': False, 'o_bankerzhongniao': False, 'roomTypeVuale': '普通创房', 'clubRoomTypeVuale': '', 'o_club_id': ''}
player1.CreateRoom(create_room_data)
time.sleep(2)
cards_data = {"1":["1W","1W","2W","2W","3W","3W","4W","4W","5W","5W","6W","6W","7W","7W","8W","8W","9W","9W","HZ","HZ"],
"2":["1S","1S","3S","3S","5S","5S","1T","1T","3T","3T","5T","5T","6W","6T"],
"3":["2S","2S","4S","4S","6S","6S","2T","2T","4T","4T","6T","6T","6T"],
"4":["1T","1T","2S","2W","2T","2S","3W","3T","3S","4W","4T","4S","8W"],
"5":["HZ","HZ","9W","6W","6W","6W","6W","9S","9T","9S","9T","6T","6S"]}
player1.maker_card(cards_data,player1.room_id)
time.sleep(2)
player2.ApplyEnterRoom(player1.room_id,0)
player1.OperateApi('胡')
time.sleep(5)
player1.ConnectClose()
player2.ConnectClose()
if __name__=='__main__':
unittest.main() | [
"540383428@qq.com"
] | 540383428@qq.com |
932c60b434fa614cfd705c8dff56b1d9747a712c | d8270002e8b5c2850d8bf16d70728d24eff6a897 | /speech/speechAudio.py | 824ac17e31db076489daacee6a792c170777b648 | [] | no_license | imratnesh/audioanalysis | 05ac1b535dd99e2ad91bed993e3339e33b5da06c | 7783f2c6eea19333ebf577eea6caae3117b7abfc | refs/heads/master | 2020-07-17T09:54:05.133336 | 2019-09-03T06:50:53 | 2019-09-03T06:50:53 | 205,997,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | from ibm_watson import SpeechToTextV1
from os.path import join, dirname
import json
from ibm_watson.websocket import RecognizeCallback, AudioSource
speech_to_text = SpeechToTextV1(
iam_apikey='GFT9-N0g7zSU9FIM1YLNL7ZyzLdIJ1s_EkluUjYK8B1s',
url='https://gateway-lon.watsonplatform.net/speech-to-text/api'
)
#with open(join(dirname(__file__), './.', 'audio1.wav'),
# 'rb') as audio_file:
# speech_to_text.add_audio(
# '{customization_id}',
# 'audio1',
# audio_file,
# content_type='audio/wav'
# )
# Poll for audio status.
class MyRecognizeCallback(RecognizeCallback):
def __init__(self):
RecognizeCallback.__init__(self)
def on_data(self, data):
print(json.dumps(data['results'][0]['alternatives'][0]['transcript'], indent=2))
def on_error(self, error):
print('Error received: {}'.format(error))
def on_inactivity_timeout(self, error):
print('Inactivity timeout: {}'.format(error))
myRecognizeCallback = MyRecognizeCallback()
with open(join(dirname(__file__), 'audio-file.flac'), 'rb') as audio_file:
audio_source = AudioSource(audio_file)
speech_to_text.recognize_using_websocket(
audio=audio_source,
content_type='audio/flac',
recognize_callback=myRecognizeCallback,
model='en-US_BroadbandModel',
keywords=['colorado', 'tornado', 'tornadoes'],
keywords_threshold=0.5,
max_alternatives=3) | [
"ratnesh.kushwaha@icorprated.com"
] | ratnesh.kushwaha@icorprated.com |
198e1bc91c2353ff7b712aad20d49e61c56292dd | 33b1d5eedcf7e75a83483d9e9cb95288ab35cf88 | /专题研究/专题三:12.获取城市坐标.py | bffc2f8b45b49d4471bf514f3f9536d973f5fca0 | [
"MIT"
] | permissive | Ringo-li/urban_data_analysis | 66baa5ca57f514db9c72b351f5c207e7b6382d12 | c03c92f838849a4cfd34814ef8ddf956e4a9f375 | refs/heads/master | 2021-03-14T23:17:58.022980 | 2020-06-17T15:11:14 | 2020-06-17T15:11:14 | 246,802,436 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # -*- coding: utf-8 -*-
import arcpy
import pandas as pd
#中国城市地图位置
fc = r'D:\rui\code_analysis\file\subject\chapter3\市界_region.shp'
#设置空间参照坐标系,在arcgis中设置好中国城市地图坐标系,右击图层layer——属性——坐标系——另存坐标系文件
sr = arcpy.SpatialReference(r'D:\rui\code_analysis\file\subject\chapter3\Xian 1980 3 Degree GK CM 108E.prj')
#创建游标,遍历表格,获取城市名称,X/Y坐标
with arcpy.da.SearchCursor(fc,['NAME','shape@X','shape@Y'],spatial_reference=sr) as cursor:
ls = []
for row in cursor:
#游标获取元组数据,转换为列表
print list(row)
data = list(row)
#将获得的列表数据保存到ls中
ls.append(data)
#ls为二维数值形式,将其转换为pandas对象并导出,注意导出编码为utf8
pd.DataFrame(ls).to_csv(r'D:\rui\code_analysis\file\subject\chapter3\coor_xian80.csv',encoding='utf8')
| [
"ry.li@qq.com"
] | ry.li@qq.com |
55db676d6f2b08faed2614d8fe254670b6fed986 | 34ceb80ae1bdd52de915b12747aec0eb395dbbfe | /blog/migrations/0009_blog_programmingskills.py | 19dad1ce12b2cd2aa9bccdb8ce53bce5ecedc674 | [] | no_license | dhirajkumar2020/resume | e5b300b7bf02f305d89e7ace9207e24bd9d070ba | aeb80eb8bfda2e864c16c50e410f944baecfc1eb | refs/heads/master | 2020-09-14T09:56:56.450620 | 2019-11-22T10:17:03 | 2019-11-22T10:17:03 | 223,096,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | # Generated by Django 2.2.7 on 2019-11-22 06:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0008_blog_softwareskills'),
]
operations = [
migrations.AddField(
model_name='blog',
name='programmingskills',
field=models.CharField(blank=True, default='', max_length=255),
),
]
| [
"dhiraj.kumar@nescode.com"
] | dhiraj.kumar@nescode.com |
0fa2efac723fc725c9158f538efed0a34196dc90 | bd8a0b708629ccf66758f5b66e340ae2b3665b72 | /praktika/maksimaalne_rida.py | f4b27fbc95e7a6d753723de8b320391397749544 | [] | no_license | akaimar/python | d6f0ef16cce980f20fa896a65ce561349052b75b | 6030faf8cc6fcb5719c21c230a2a8f7a55735273 | refs/heads/main | 2023-01-10T07:13:26.148593 | 2020-10-27T21:15:12 | 2020-10-27T21:15:12 | 302,347,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | """
+ Küsib kasutajalt failinime (maksimaalne_rida_fail.txt)
+ Pärib failist read
# Summerib read kokku
# Leiab suurima summaga rea numbri failis (algab 1st)
# Väljastab rea järjekorranumbri ekraanile
# Näide väljundist: Suurim summa on failis 2. real ja see on 99
"""
# KASUTAJA PÄRINGUD
failinimi = input('Sisestage failinimi: ')
fail = open(failinimi, encoding='UTF-8')
# ANDMETE VÕTMINE FAILIST
andmed = [] #read ühte järjendisse
for rida in fail:
andmed.append(rida)
table = [] #iga rea jaoks oma järjend
for el in andmed:
table.append(el.split())
# RIDADE SUMMEERIMINE TABELIS
summad = [] #siia paigutan kõik ridade summad
for n in range(len(table)): #saan kätte, kui mitu järjendit on tabelis
x = 0
for i in range(len(table[n])): #saan kätte igast järjendist tabelis elemendid
x = x + int(table[n][i]) #liidan kokku
summad.append(x) #lisan summad järjendisse
suurim_summa = summad.index(max(summad)) + 1
print("Summade nimekiri on järgmine:", summad)
print("Suurim summa on failis " + str(suurim_summa) + ". real ja see on " + str(max(summad)))
| [
"noreply@github.com"
] | noreply@github.com |
8cc15d1e5237607661d9a462a86e9f2eb81593f5 | 58d9ed3ea04d22449f03fafdb7cdc0aad8ff87f1 | /pyCardiac/signal/analysis/phase_singularity.py | 28c30c964cf036f2b12f678c3cbb16faf46560d2 | [
"MIT"
] | permissive | Lokynik/pyCardiac | 2401c25cabff4fb0018e7d4ed365e251b87de8da | f0ad0868af233830eb2c4c6cba6136e4c80265c1 | refs/heads/master | 2020-09-05T06:23:10.072867 | 2019-05-24T18:32:23 | 2019-05-24T18:32:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,770 | py | import numpy as np
from ...routines import phase_difference
def phase_singularity_detection_lasso(phase_array: np.ndarray, result: list,
i_range: tuple = None, j_range: tuple = None):
"""Detects phase singularity points (PS) in ``phase_array`` with lasso method.
Parameters
----------
``phase_array``: np.ndarray, shape=(X, Y)
``result`` : list
list to append coordinates of PS
``i_range`` : tuple, optional
range along first axis to process ``phase_array``
``j_range`` : tuple, optional
range along seconf axis to process ``phase_array``
Returns
-------
None
use ``result`` to return PS coordinates
"""
if type(result) is not list:
raise Exception("Invalid value of the argument: <result> must be a list!")
if (i_range == None): # => j_range == None too
i_range = (0, phase_array.shape[0])
j_range = (0, phase_array.shape[1])
i_min, i_max = i_range
j_min, j_max = j_range
i_middle, j_middle = (i_max + i_min) // 2, (j_max + j_min) // 2
N, M = i_max - i_min, j_max - j_min # phase shape
diff = 0
for i in range(i_min + 1, i_max):
diff += phase_difference(phase_array[i-1, j_min], phase_array[i, j_min])
diff -= phase_difference(phase_array[i-1, j_max-1], phase_array[i, j_max-1])
for j in range(j_min + 1, j_max):
diff -= phase_difference(phase_array[i_min, j-1], phase_array[i_min, j])
diff += phase_difference(phase_array[i_max-1, j-1], phase_array[i_max-1, j])
number_of_ps = np.round(abs(diff) / (2 * np.pi))
if number_of_ps > 0:
if ((N <= 3) and (M <= 3)):
x = i_middle
y = j_middle
result.append([x, y])
elif (N >= M):
phase_singularity_detection_lasso(phase_array, result, (i_min, i_middle+1), (j_min, j_max))
phase_singularity_detection_lasso(phase_array, result, (i_middle-1, i_max), (j_min, j_max))
elif (M > N):
phase_singularity_detection_lasso(phase_array, result, (i_min, i_max), (j_min, j_middle+1))
phase_singularity_detection_lasso(phase_array, result, (i_min, i_max), (j_middle-1, j_max))
return number_of_ps
def phase_singularity_detection(phase_array: np.ndarray) -> np.ndarray:
"""Detects phase singularity points (PS) in ``phase_array``.
Parameters
----------
``phase_array``: np.ndarray, shape=(X, Y)
Returns
-------
np.ndarray, shape=(N, 2)
x and y coordinates of PS
"""
i_list, j_list = [], []
for i in range(1, phase_array.shape[0] - 1):
for j in range(1, phase_array.shape[1] - 1):
k11 = phase_difference(phase_array[i-1, j], phase_array[i-1, j-1])
k21 = phase_difference(phase_array[i-1, j+1], phase_array[i-1, j])
k31 = phase_difference(phase_array[i, j+1], phase_array[i-1, j+1])
k32 = phase_difference(phase_array[i+1, j+1], phase_array[i, j+1])
k33 = phase_difference(phase_array[i+1, j], phase_array[i+1, j+1])
k23 = phase_difference(phase_array[i+1, j-1], phase_array[i+1, j])
k13 = phase_difference(phase_array[i, j-1], phase_array[i+1, j-1])
k12 = phase_difference(phase_array[i-1, j-1], phase_array[i, j-1])
k = k11 + k21 + k32 + k33 + k23 + k13 + k12
if np.abs(k) >= 3.0:
i_list.append(i)
j_list.append(j)
result = np.array([i_list, j_list]).transpose()
return result | [
"pikunov@phystech.edu"
] | pikunov@phystech.edu |
1220ea2bbb065d2ac6dacade7ee3ed01983d7ba7 | 1e8feb9c9f0028a264a2f882f4911b4349dc5348 | /bingraphvis/angr/annotator.py | b7786294843ca13de7b86d7305fd85b12d9bfba4 | [
"BSD-2-Clause"
] | permissive | AmesianX/bingraphvis | b90b99454291ff104d3d0a9d245376a6c655ee25 | ebdd0b7d8f5d9cec81a7b1edb9dc78b7b0157117 | refs/heads/master | 2021-01-13T13:04:18.620480 | 2016-11-02T23:59:12 | 2016-11-02T23:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,663 | py |
from ..base import *
import capstone
import pyvex
class AngrColorSimprocedures(NodeAnnotator):
def __init__(self):
super(AngrColorSimprocedures, self).__init__()
def annotate_node(self, node):
if node.obj.is_simprocedure:
if node.obj.simprocedure_name in ['PathTerminator','ReturnUnconstrained','UnresolvableTarget']:
node.style = 'filled'
node.fillcolor = '#ffcccc'
else:
node.style = 'filled'
node.fillcolor = '#dddddd'
class AngrColorExit(NodeAnnotator):
def __init__(self):
super(AngrColorExit, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
found = False
for e in self.graph.edges:
if e.src == node:
found = True
if 'jumpkind' in e.meta and e.meta['jumpkind'] == 'Ijk_Ret':
node.style = 'filled'
node.fillcolor = '#ddffdd'
if not found:
node.style = 'filled'
node.fillcolor = '#ddffdd'
class AngrColorEntry(NodeAnnotator):
def __init__(self):
super(AngrColorEntry, self).__init__()
def annotate_node(self, node):
if not node.obj.is_simprocedure:
if hasattr(node.obj, 'function_address') and node.obj.addr == node.obj.function_address:
node.style = 'filled'
node.fillcolor = '#ffffcc'
class AngrColorEdgesVex(EdgeAnnotator):
EDGECOLOR_CONDITIONAL_TRUE = 'green'
EDGECOLOR_CONDITIONAL_FALSE = 'red'
EDGECOLOR_UNCONDITIONAL = 'blue'
EDGECOLOR_CALL = 'black'
EDGECOLOR_RET = 'grey'
EDGECOLOR_UNKNOWN = 'yellow'
def __init__(self):
super(AngrColorEdgesVex, self).__init__()
def annotate_edge(self, edge):
vex = None
if 'vex' in edge.src.content:
vex = edge.src.content['vex']['vex']
if 'jumpkind' in edge.meta:
jk = edge.meta['jumpkind']
if jk == 'Ijk_Ret':
edge.color = self.EDGECOLOR_RET
elif jk == 'Ijk_FakeRet':
edge.color = self.EDGECOLOR_RET
edge.style = 'dashed'
elif jk == 'Ijk_Call':
edge.color = self.EDGECOLOR_CALL
if len (vex.next.constants) == 1 and vex.next.constants[0].value != edge.dst.obj.addr:
edge.style='dotted'
elif jk == 'Ijk_Boring':
if len(vex.constant_jump_targets) > 1:
if len (vex.next.constants) == 1:
if edge.dst.obj.addr == vex.next.constants[0].value:
edge.color=self.EDGECOLOR_CONDITIONAL_FALSE
else:
edge.color=self.EDGECOLOR_CONDITIONAL_TRUE
else:
edge.color=self.EDGECOLOR_UNKNOWN
else:
edge.color=self.EDGECOLOR_UNCONDITIONAL
else:
#TODO warning
edge.color = self.EDGECOLOR_UNKNOWN
class AngrPathAnnotator(EdgeAnnotator, NodeAnnotator):
def __init__(self, path):
super(AngrPathAnnotator, self).__init__()
self.path = path
self.trace = list(path.addr_trace)
def set_graph(self, graph):
super(AngrPathAnnotator, self).set_graph(graph)
self.vaddr = self.valid_addrs()
ftrace = filter(lambda _: _ in self.vaddr, self.trace)
self.edges_hit = set(zip(ftrace[:-1], ftrace[1:]))
def valid_addrs(self):
vaddr = set()
for n in self.graph.nodes:
vaddr.add(n.obj.addr)
return vaddr
#TODO add caching
#TODO not sure if this is valid
def node_hit(self, node):
ck = list(node.callstack_key)
ck.append(node.addr)
rtrace = list(reversed(self.trace))
found = True
si = 0
for c in reversed(ck):
if c == None:
break
try:
si = rtrace[si:].index(c)
except:
found = False
break
return found
def annotate_edge(self, edge):
key = (edge.src.obj.addr, edge.dst.obj.addr)
if key in self.edges_hit:
edge.width = 3
def annotate_node(self, node):
if self.node_hit(node.obj):
node.width = 3
class AngrBackwardSliceAnnotatorVex(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorVex, self).__init__('vex')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
for k in range(len(content['data'])):
c = content['data'][k]
if k in st:
c['addr']['style'] = 'B'
c['statement']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
if (node.obj, k) in self.targets:
c['addr']['color'] = 'red'
c['statement']['color'] = 'red'
class AngrBackwardSliceAnnotatorAsm(ContentAnnotator):
def __init__(self, bs):
super(AngrBackwardSliceAnnotatorAsm, self).__init__('asm')
self.bs = bs
self.targets = set(self.bs._targets)
def register(self, content):
content.add_column_before('taint')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
st = self.bs.chosen_statements[node.obj.addr]
staddr = set()
#TODO
vex = self.bs.project.factory.block(addr=node.obj.addr, max_size=node.obj.size).vex
caddr = None
for j, s in enumerate(vex.statements):
if isinstance(s, pyvex.stmt.IMark):
caddr = s.addr
if j in st:
staddr.add(caddr)
for c in content['data']:
if c['_addr'] in staddr:
c['addr']['style'] = 'B'
c['mnemonic']['style'] = 'B'
c['operands']['style'] = 'B'
c['taint'] = {
'content':'[*]',
'style':'B'
}
class AngrColorDDGStmtEdges(EdgeAnnotator):
def __init__(self,project=None):
super(AngrColorDDGStmtEdges, self).__init__()
self.project = project
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'tmp':
edge.color = 'blue'
edge.label = 't'+ str(edge.meta['data'])
elif edge.meta['type'] == 'reg':
edge.color = 'green'
if self.project:
edge.label = self.project.arch.register_names[edge.meta['data'].reg] + " " + str(edge.meta['data'].size)
else:
edge.label = "reg"+str(edge.meta['data'].reg) + " " + str(edge.meta['data'].size)
elif edge.meta['type'] == 'mem':
edge.color = 'red'
edge.label = str(edge.meta['data'])
else:
edge.label = edge.meta['type']
edge.style = 'dotted'
class AngrColorDDGData(EdgeAnnotator, NodeAnnotator):
def __init__(self,project=None, labels=False):
super(AngrColorDDGData, self).__init__()
self.project = project
self.labels = labels
def annotate_edge(self, edge):
if 'type' in edge.meta:
if edge.meta['type'] == 'kill':
edge.color = 'red'
elif edge.meta['type'] == 'mem_addr':
edge.color = 'blue'
edge.style = 'dotted'
elif edge.meta['type'] == 'mem_data':
edge.color = 'blue'
else:
edge.color = 'yellow'
if self.labels:
edge.label = edge.meta['type']
def annotate_node(self, node):
if node.obj.initial:
node.fillcolor = '#ccffcc'
node.style = 'filled'
class AngrActionAnnotatorVex(ContentAnnotator):
def __init__(self):
super(AngrActionAnnotatorVex, self).__init__('vex')
def register(self, content):
content.add_column_after('action_type')
content.add_column_after('action_addr')
content.add_column_after('action_data')
def annotate_content(self, node, content):
from simuvex.s_action import SimActionData
if node.obj.is_simprocedure or node.obj.is_syscall:
return
if len(node.obj.final_states) > 0:
state = node.obj.final_states[0]
for action in state.log.actions:
if isinstance(action, SimActionData):
c = content['data'][action.stmt_idx]
c['action_type'] = {
'content': action.type+"/"+action.action+"("+str(action.size.ast)+")",
'align': 'LEFT'
}
#TODO
if str(action.addr) != 'None':
c['action_addr'] = {
'content': str(action.addr.ast),
'align': 'LEFT'
}
if str(action.data) != 'None':
c['action_data'] = {
'content': str(action.data.ast),
'align': 'LEFT'
}
#EXPERIMENTAL
class AngrCodelocLogAnnotator(ContentAnnotator):
def __init__(self, cllog):
super(AngrCodelocLogAnnotator, self).__init__('vex')
self.cllog = cllog
def register(self, content):
content.add_column_after('log')
def annotate_content(self, node, content):
if node.obj.is_simprocedure or node.obj.is_syscall:
return
for k in range(len(content['data'])):
c = content['data'][k]
key = (node.obj.addr, k)
if key in self.cllog:
c['log'] = {
'content': self.cllog[key],
'align':'LEFT'
}
| [
"axt@load.hu"
] | axt@load.hu |
59d53bbb855c0d3cd5405009b5d4442fc843e8fb | 4eb779ea222c91c3a0c33421a75560bed5e9d2f2 | /Practice/code2.py | fd3bac578802934803de4f02e7f135697ca6a1b7 | [] | no_license | sunamya/Python | b632bd2349f4a0b70635a90b8ef18a8728317518 | 6911906d0c98885bcf84657e6f2240a622e9d150 | refs/heads/main | 2023-04-05T07:27:44.601337 | 2021-04-18T12:36:14 | 2021-04-18T12:36:14 | 359,138,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | from __future__ import print_function
n=int(input("Enter Number Of Test Cases : "))
no=int(input("Enter Number OF Houses : "))
for i in range(1,no):
road=input().split(" ") #Fetching data in single line
road=[int(x) for x in road] #Converting into integer
print(road)
| [
"sunamyagupta@gmail.com"
] | sunamyagupta@gmail.com |
beac6bfdd31b5bcf3da0c1b5ef8b6693220e0f26 | 4c43932dbfef8603c4414d88ab98dda1b9e163b8 | /mix_id_increment.py | 9c01c2ac8b59995266508b1b6262300c28c6f4a1 | [] | no_license | edison12347/usefull_code | 7ffe030422edf72fd668b3d3adcecaa57a8a3490 | 4d41e397e21620ec20a1c8da936771f62f46ea39 | refs/heads/master | 2021-10-12T03:18:49.593399 | 2019-02-01T08:17:16 | 2019-02-01T08:17:16 | 103,551,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | stmt = text("SELECT ListID FROM {} ORDER BY ListID DESC LIMIT 1".format(table))
select_last_list_id = connect.execute(stmt)
last_list_id = select_last_list_id.fetchone()[0]
prefix, _ = last_list_id.split('-')
incremented_prefix = copy.copy(prefix)
incrementation_list = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
rotated_prefix = ''.join([prefix[-i] for i in range(1, len(prefix))]) + prefix[0]
for position, symbol in enumerate(rotated_prefix):
tail_position = len(prefix) - 1 - position
if symbol != 'Z':
next_index = incrementation_list.index(symbol) + 1
next_symbol = incrementation_list[next_index]
incremented_prefix_list = [letter for letter in incremented_prefix]
incremented_prefix_list[tail_position] = next_symbol
incremented_prefix = ''.join(incremented_prefix_list)
break
else:
incremented_prefix_list = [letter for letter in incremented_prefix]
incremented_prefix_list[tail_position] = '0'
incremented_prefix = ''.join(incremented_prefix_list)
list_id = incremented_prefix + '-' + str(random.randint(1000000000, 9999999999)) | [
"noreply@github.com"
] | noreply@github.com |
9829e741774cc716fa4e10fe0dbf778cbe079821 | 041122bdc412b8c311eeb68c9aa3a4bac5249145 | /crawlers/socials/pikabu.ru.py | e379ddf762d7da5e13301e9290f60243cab37d14 | [
"Apache-2.0"
] | permissive | fostroll/ru_corner | 5df269ab88bddf9d02f8c6967a063cb9b0b56515 | defb681aa9311c2dd6ed98d1b934453c29e9a750 | refs/heads/master | 2023-06-23T18:38:34.218504 | 2021-07-27T12:16:51 | 2021-07-27T12:16:51 | 314,045,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,196 | py | #!/usr/bin/python -u
#-*- encoding: utf-8 -*-
from collections import OrderedDict
import json
import os
import random
import re
import time
###
import sys
sys.path.append('../')
###
import utils
import _utils
SEED = 42
ROOT_URL = 'https://pikabu.ru'
INIT_URL = ROOT_URL + '/new?twitmode=1&of=v2&page={}&_={}'
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'TE': 'Trailers'
}
AUTHORS_IGNORE_FN = os.path.join(utils.PAGES_DIR, 'authors_ignore')
POSTS_IGNORE_FN = os.path.join(utils.PAGES_DIR, 'posts_ignore')
MAX_FILES = 10000
MIN_DEPTH = 4
SKIP_FIRST = 100
SILENT = False
if SEED:
random.seed(SEED)
'''===========================================================================
Texts download and parse
==========================================================================='''
page_fns = utils.get_file_list(utils.PAGES_DIR, MAX_FILES)
if len(page_fns) < utils.TEXTS_FOR_SOURCE:
need_enter = False
texts_total = 0
re0 = re.compile(r'\W|\d')
re1 = re.compile(r'[^ЁА-Яёа-я]')
re2 = re.compile(r'[^\S\n]+')
re3 = re.compile(r'\n+')
re4 = re.compile(r'#\b\S+\b')
re5 = re.compile(r'\W')
re6 = re.compile(r'[a-z]+://\S+')
re7 = re.compile(r'<p>((?:.|\n)*?)</p>')
re8 = re.compile(r'<blockquote(?:.|\n)+?</blockquote>')
re9 = re.compile(r'<figure(?:.|\n)+?</figure>')
re10 = re.compile(r'<(?:.|\n)*?>')
re11 = re.compile(r'#comment_\d+$')
def parse_comments(comments, header, authors_ignore):
res, lines, authors = False, [], {}
for comment, author in comments:
if authors_ignore and author in authors_ignore:
break
authors[author] = author
line = re7.sub(r'\n\g<1>\n', comment)
line = line.replace('<br>', '\n').replace('<hr>', '\n')
line = re10.sub('', re9.sub('', re8.sub('', line)))
line = re2.sub(' ', re3.sub('\n', utils.norm_text2(line)))
if not line or line.startswith('Комментарий удален.') \
or re11.match(line):
break
lines.append((line, author))
text = None
while True:
if len(lines) < MIN_DEPTH:
break
text_ = '\n'.join(x[0] for x in lines)
text = '\n'.join(x[1] + '\t' + x[0].replace('\n', '\n\t') \
for x in lines)
if not SILENT:
print(text)
text_ = re6.sub('', text_)
text0 = re0.sub('', text_)
text1 = re1.sub('', text0)
if text0 and len(text1) / len(text0) >= .9:
num_words = len([x for x in re4.sub('', text_).split()
if re5.sub('', x)])
if not SILENT:
print('<russian>')
print(num_words)
if num_words < _utils.MIN_CHUNK_WORDS:
break
if num_words > _utils.MAX_CHUNK_WORDS:
lines = lines[:-1]
continue
res = True
break
elif not SILENT:
print('<foreign>')
lines = lines[:-1]
continue
if res:
page_fn = utils.get_data_path(utils.PAGES_DIR, MAX_FILES,
texts_total)
text_fn = utils.get_data_path(utils.TEXTS_DIR, MAX_FILES,
texts_total)
with open(page_fn, 'wt', encoding='utf-8') as f:
print(header, file=f)
json.dump(comments, f, indent=4, ensure_ascii=False)
with open(text_fn, 'wt', encoding='utf-8') as f:
print('{} ({})'.format(texts_total, header), file=f)
f.write(text)
if authors_ignore is not None:
need_enter = os.path.isfile(AUTHORS_IGNORE_FN)
with open(AUTHORS_IGNORE_FN, 'at', encoding='utf-8') as f:
if need_enter:
print(file=f)
f.write('\n'.join('\t'.join(x) for x in authors.items()))
authors_ignore.update(authors)
print('\r{} (of {})'.format(texts_total, utils.TEXTS_FOR_SOURCE),
end='')
return res
for texts_total, page_fn in enumerate(page_fns, start=1):
if os.path.isfile(page_fn.replace(utils.PAGES_DIR, utils.TEXTS_DIR)):
continue
with open(page_fn, 'rt', encoding='utf-8') as f:
header = f.readline().strip()
comments = json.load(f)
parse_comments(comments, header, None)
texts_total += 1
if os.path.isfile(AUTHORS_IGNORE_FN):
with open(AUTHORS_IGNORE_FN, 'rt', encoding='utf-8') as f:
authors_ignore = OrderedDict(x.split('\t')
for x in f.read().split('\n')
if x)
else:
authors_ignore = OrderedDict()
if os.path.isfile(POSTS_IGNORE_FN):
with open(POSTS_IGNORE_FN, 'rt', encoding='utf-8') as f:
posts_ignore = set(x for x in f.read().split('\n') if x)
else:
posts_ignore = set()
url, last_success_url = None, None
retry = False
try:
page_no = SKIP_FIRST
while True:
page_no += 1
url = INIT_URL.format(page_no, time.time_ns() // 1000000)
if not SILENT:
print(url)
res = utils.get_url(url, headers=HEADERS)
res = res.json()
#with open('000.json', 'wt', encoding='utf-8') as f:
# from pprint import pprint
# pprint(res, stream=f)
#exit()
data = res['data']['stories']
for post in data:
post, post_id = post['html'], post['id']
if post_id in posts_ignore:
print('WARNING: Post was already processed. Skipping')
continue
match = re.search(
'<span class="story__comments-link-count">(\d+)</span>',
post
)
if not match:
print('WARNING: Number of comments is not found')
continue
last_success_url = url
num_comments = int(match.group(1))
if num_comments < 12:
continue
match = re.search(
'href="({}/story/\S+?_{})#comments">' \
.format(ROOT_URL, post_id),
post
)
if not match:
print('WARNING: Link to comments is not found')
continue
url = match.group(1)
res = utils.get_url(url, headers=HEADERS)
res = res.text
#with open('111.html', 'wt', encoding='utf-8') as f:
# print(res, file=f)
#exit()
pos = res.find(
'<div class="comments__container_main comments__container" data-story-id="{}">'
.format(post_id)
)
if pos < 0:
print('ERROR: Invalid format')
with open('error.log', 'wt', encoding='utf-8') as f:
print(url, file=f)
print(res, file=f)
assert 0
def store_post_id(post_id):
posts_ignore.add(post_id)
with open(POSTS_IGNORE_FN, 'at',
encoding='utf-8') as f:
print(post_id, file=f)
if texts_total > utils.TEXTS_FOR_SOURCE:
raise OverflowError()
comments, num_comments = [], 0
inprogress = False
while True:
res = res[pos:]
pos = res.find('<div class="comment"')
if pos < 0:
if inprogress and num_comments >= MIN_DEPTH \
and parse_comments(comments, url,
authors_ignore):
texts_total += 1
need_enter = True
store_post_id(post_id)
break
res = res[pos:]
token = 'data-indent="'
pos = res.find(token)
res = res[pos + len(token):]
pos = res.find('"')
depth = int(res[:pos])
if depth == 0:
inprogress = True
if inprogress and depth < num_comments \
and num_comments >= MIN_DEPTH \
and parse_comments(comments, url,
authors_ignore):
texts_total += 1
need_enter = True
store_post_id(post_id)
inprogress = False
if inprogress:
token = '<div class="comment__user"'
pos = res.find(token)
author = res[pos + len(token):]
token = 'data-name="'
pos = author.find(token)
author = author[pos + len(token):]
pos = author.find('"')
author = author[:pos]
token = '<div class="comment__content">'
pos = res.find(token)
comment = res[pos + len(token):]
pos = comment.find('<div class="comment__controls')
comment = comment[:pos].rstrip()
for token in ['<!--noindex-->', '</div>']:
if not comment.endswith(token):
print('ERROR: Invalid format')
with open('error.log', 'wt',
encoding='utf-8') as f:
print(url, file=f)
print(comment, file=f)
print(file=f)
print(res, file=f)
assert 0
comment = comment[:-len(token)].strip()
comments[depth:] = [(comment, author)]
num_comments = depth
with open('error.log', 'wt',
encoding='utf-8') as f:
print('NO POSTS. Last success url:', file=f)
print(last_success_url, file=f)
assert 0
except OverflowError:
pass
if need_enter:
print()
if os.path.isfile(utils.get_data_path(utils.CHUNKS_DIR, MAX_FILES, 1)):
print('WARNING: Chunks are already exist. '
'Delete them if you want to recreate')
exit()
page_fns = utils.get_file_list(utils.PAGES_DIR, MAX_FILES)
text_fns = utils.get_file_list(utils.TEXTS_DIR, MAX_FILES)
assert len(page_fns) == len(text_fns)
#new_order = utils.shuffle_file_list(page_fns)
utils.shuffle_file_list(text_fns, new_order=None)
'''===========================================================================
Chunks creation
==========================================================================='''
_utils.make_chunks(MAX_FILES)
'''===========================================================================
Tokenization
==========================================================================='''
utils.tokenize(MAX_FILES, isdialog=True)
| [
"fostroll@gmail.com"
] | fostroll@gmail.com |
1e173b0d0f6cebec0e25b023d5dc35c3ce20abf4 | 04d0cb0e687c4cd7e433393c8ae35cd9725bb9f1 | /plugins/operators/stage_s3.py | 4c7648190f49a8e45cc022dfd08c97bbda6d34d3 | [] | no_license | mrthlinh/Covid_GoogleTrend | 98ec973e3f60484be858457727c1ca0d11202ea9 | 6f9b09213ff0d580577bc8f37b2e0c31126d8357 | refs/heads/master | 2022-11-15T03:11:07.261896 | 2020-07-13T14:46:51 | 2020-07-13T14:46:51 | 278,245,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | from airflow.models import BaseOperator
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.S3_hook import S3Hook
from airflow.utils.decorators import apply_defaults
import requests
import json
class StateToS3Operator(BaseOperator):
state_code = ['AL','AK','AZ','AR','CA','CO','CT','DE','FL','GA',
'HI','ID','IL','IN','IA','KS','KY','LA','ME','MD',
'MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ',
'NM','NY','NC','ND','OH','OK','OR','PA','RI','SC',
'SD','TN','TX','UT','VT','VA','WA','WV','WI','WY']
@apply_defaults
def __init__(self,
aws_conn_id="",
s3_bucket="",
*args, **kwargs):
super(StateToS3Operator, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
self.s3_bucket = s3_bucket
def execute(self, context):
# aws = AwsHook(self.aws_conn_id)
# Create S3 connection
s3 = S3Hook(self.aws_conn_id)
# self.log.info(s3.list_keys(bucket_name=self.s3_bucket))
for state in self.state_code:
URL = "https://covidtracking.com/api/v1/states/" + state + "/daily.json"
self.log.info(URL)
# Get the return request
response = requests.get(URL)
dict2str = [json.dumps(i,sort_keys=True) for i in response.json()]
json_output = "\n".join(dict2str)
key = "Test"+ "/" + state + "/" + "daily.json"
s3.load_string(json_output,key,bucket_name=self.s3_bucket)
# def upload_stat_state(self):
# """
# Get daily stat for each state
# """
# s3 = self.S3Connection.s3
# for state in self.state_code:
# URL = "https://covidtracking.com/api/v1/states/" + state + "/daily.json"
# # URL = "https://covidtracking.com/api/v1/states/" + state + "/current.json"
# print(URL)
# response = requests.get(URL)
# file_name = self.sub_dir + "/" + state + "/" + "daily.json"
# file_object = s3.Object(self.s3bucket_name, file_name)
# # Convert dict to string
# # dict2str = [str(i) for i in response.json()]
# dict2str = [json.dumps(i,sort_keys=True) for i in response.json()]
# json_output = "\n".join(dict2str)
# # print(json_output)
# file_object.put(Body=json_output)
# # file_object.put(Body = json.dumps(response.json()[0], indent=2))
# # file_object.put(Body=bytes(response.content))
| [
"linhtruong@linhs-mbp.lan"
] | linhtruong@linhs-mbp.lan |
e810dad75980e35a6c7789a53d2a848683a6677c | e0a83b46e5fbd2e80ccafb7b1d4f792d31d516b8 | /pascal/alerts/serializer.py | ac5866f1ce7e250f818022e15cec1dc9efa30c99 | [] | no_license | helloworld76757/pascal | f74ad1a5b13f01fa1613d2786b9f4c0f763eb034 | 37b5b12cba862336742e609475e874c4b0f3efbf | refs/heads/master | 2020-06-13T05:15:55.037124 | 2016-12-03T00:59:08 | 2016-12-03T00:59:08 | 75,442,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | from rest_framework import serializers
from .models import Alert, AlertResponse
class AlertSerializer(serializers.ModelSerializer):
class Meta:
model = Alert
fields = ('timestamp', 'name', 'value')
| [
"helloworld76757@mailinator.com"
] | helloworld76757@mailinator.com |
8ce92bccb334b194201db3ad0db027122a10c3f5 | a01aa15daf3f625420a0ab1bee18674361dee717 | /code/processAcData.py | c900423cb8e1a90fc357bf0a1d0397b9788979a7 | [] | no_license | sirinda-p/sna_utcc | f6ddf92a2ce81ec7a9f69f8da0deafdf2dcc1fc2 | 39276ebd838a9d2d6ee209a4a50fe25e721473a3 | refs/heads/master | 2020-04-03T15:29:38.820434 | 2016-03-25T09:40:09 | 2016-03-25T09:40:09 | 39,806,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | import os
from igraph import *
# Remove nodes and corresponding edges that are not in the colledted class section
def removeNodeAndEdge():
# keep nodes from 5702100227 to 5702100312
path = "/home/amm/Desktop/sna-project/sna-git/data/gml/"
flist = ["Ac57-all_bf.gml","Ac57-all_friend.gml","Ac57-all_study.gml" ]
for fname in flist:
newfname = fname.replace("-all","-1sec")
print newfname
#f_w = open(path+fname, "w")
g = read(path+fname, format="gml")
vlist = []
for v in g.vs():
if int(v['id']) in range(227, 312):
vlist.append(v)
newg = g.subgraph(vlist)
write(newg, path+newfname)
removeNodeAndEdge()
| [
"sirinda111@gmail.com"
] | sirinda111@gmail.com |
1078c72e567126b5c3bae58f97f4f36b32696eaf | 240c4398e2886256099cb18b7c4cbcfbc08a3ff8 | /efb-v2/res/bak_config/modules/filter.py | 341bfb67fdbada89fc84eecae9159095447fbe20 | [] | no_license | bmwcto/docker | f1d0674dda1c1bce0735b60acff0f0516bbc49fe | 504949a8dfa233f50e599cbebd929dcdb7a3b8b9 | refs/heads/main | 2023-04-01T12:15:42.560212 | 2021-04-12T16:02:51 | 2021-04-12T16:02:51 | 355,555,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,308 | py | import pathlib
import shelve
import atexit
import uuid
from collections.abc import Mapping
from threading import Timer
from typing import Optional, Union, Dict
from typing_extensions import overload, Literal
from ruamel.yaml import YAML
from ehforwarderbot import Middleware, Message, Status, coordinator, utils
from ehforwarderbot.chat import Chat, SystemChat
from ehforwarderbot.types import ModuleID, MessageID, InstanceID, ChatID
from ehforwarderbot.message import MsgType, MessageCommands, MessageCommand
from ehforwarderbot.status import MessageRemoval, ReactToMessage, MessageReactionsUpdate
class FilterMiddleware(Middleware):
"""
Filter middleware.
A demo of advanced user interaction with master channel.
"""
middleware_id: ModuleID = ModuleID("filter.FilterMiddleware")
middleware_name: str = "Filter Middleware"
__version__: str = '1.1.1'
message_cache: Dict[MessageID, Message] = {}
def __init__(self, instance_id: Optional[InstanceID] = None):
super().__init__(instance_id)
# load config
self.yaml = YAML()
conf_path = utils.get_config_path(self.middleware_id)
if not conf_path.exists():
conf_path.touch()
self.config = self.yaml.load(conf_path)
self.filters = [
self.chat_id_based_filter
]
# Mapping
self.FILTER_MAPPING = {
"chat_name_contains": self.chat_name_contains_filter,
"chat_name_matches": self.chat_name_matches_filter,
"message_contains": self.message_contains_filter,
"ews_mp": self.ews_mp_filter
}
# Chat ID based filter init
shelve_path = str(utils.get_data_path(self.middleware_id) / "chat_id_filter.db")
self.chat_id_filter_db = shelve.open(shelve_path)
atexit.register(self.atexit)
# load other filters
if isinstance(self.config, Mapping):
for i in self.config.keys():
f = self.FILTER_MAPPING.get(i)
if f:
self.filters.append(f)
def atexit(self):
self.chat_id_filter_db.close()
def process_message(self, message: Message) -> Optional[Message]:
# Only collect the message when it's a text message match the
# hotword "filter`"
if message.type == MsgType.Text and message.text == "filter`" and \
message.deliver_to != coordinator.master:
reply = self.make_status_message(message)
self.message_cache[reply.uid] = message
coordinator.master.send_message(reply)
return None
# Do not filter messages from master channel
if message.deliver_to != coordinator.master:
return message
# Try to filter all other messages.
return self.filter(message)
def make_status_message(self, msg_base: Message = None, mid: MessageID = None) -> Message:
if mid is not None:
msg = self.message_cache[mid]
elif msg_base is not None:
msg = msg_base
else:
raise ValueError
reply = Message(
type=MsgType.Text,
chat=msg.chat,
author=msg.chat.make_system_member(uid=ChatID("filter_info"), name="Filter middleware", middleware=self),
deliver_to=coordinator.master,
)
if mid:
reply.uid = mid
else:
reply.uid = str(uuid.uuid4())
status = self.filter_reason(msg)
if not status:
# Blue circle emoji
status = "\U0001F535 This chat is not filtered."
else:
# Red circle emoji
status = "\U0001F534 " + status
reply.text = "Filter status for chat {chat_id} from {module_id}:\n" \
"\n" \
"{status}\n".format(
module_id=msg.chat.module_id,
chat_id=msg.chat.id,
status=status
)
command = MessageCommand(
name="%COMMAND_NAME%",
callable_name="toggle_filter_by_chat_id",
kwargs={
"mid": reply.uid,
"module_id": msg.chat.module_id,
"chat_id": msg.chat.id
}
)
if self.is_chat_filtered_by_id(msg.chat):
command.name = "Unfilter by chat ID"
command.kwargs['value'] = False
else:
command.name = "Filter by chat ID"
command.kwargs['value'] = True
reply.commands = MessageCommands([command])
return reply
def toggle_filter_by_chat_id(self, mid: str, module_id: str,
chat_id: str, value: bool):
self.chat_id_filter_db[str((module_id, chat_id))] = value
reply = self.make_status_message(mid=mid)
reply.edit = True
# Timer(0.5, coordinator.master.send_message, args=(reply,)).start()
coordinator.master.send_message(reply)
return None
@staticmethod
def get_chat_key(chat: Chat) -> str:
return str((chat.module_id, chat.id))
def process_status(self, status: Status) -> Optional[Status]:
for i in self.filters:
if i(status, False):
return None
return status
def filter_reason(self, message: Message):
for i in self.filters:
reason = i(message, True)
if reason is not False:
return reason
return False
def filter(self, message: Message):
for i in self.filters:
if i(message, False):
return None
return message
@staticmethod
def get_chat_from_entity(entity: Union[Message, Status]) -> Optional[Chat]:
if isinstance(entity, Message):
return entity.chat
elif isinstance(entity, MessageRemoval):
return entity.message.chat
elif isinstance(entity, ReactToMessage):
return entity.chat
elif isinstance(entity, MessageReactionsUpdate):
return entity.chat
else:
return None
# region [Filters]
"""
Filters
Filter must take only two argument apart from self
- ``entity`` (``Union[Message, Status]``)
The message entity to filter
- ``reason`` (``bool``)
Determine whether or not to return the reason to block a message
To allow a message to be delivered, return ``False``.
Otherwise, return ``True`` or a string to explain the reason of filtering
if ``reason`` is ``True``.
"""
@overload
def chat_id_based_filter(self,
entity: Union[Message, Status],
reason: Literal[True]) -> Union[bool, str]:
...
@overload
def chat_id_based_filter(self,
entity: Union[Message, Status],
reason: Literal[False]) -> bool:
...
def chat_id_based_filter(self,
entity: Union[Message, Status],
reason: bool) -> Union[bool, str]:
chat = self.get_chat_from_entity(entity)
if not chat:
return False
if self.is_chat_filtered_by_id(chat):
if reason:
return "Chat is manually filtered."
else:
return True
else:
return False
def is_chat_filtered_by_id(self, chat: Chat) -> bool:
key = str((chat.module_id, chat.id))
if key in self.chat_id_filter_db:
return self.chat_id_filter_db[key]
return False
def chat_name_contains_filter(self, entity, reason):
chat = self.get_chat_from_entity(entity)
if not chat:
return False
for i in self.config['chat_name_contains']:
if i in chat.display_name:
if reason:
return "Chat is filtered because its name contains \"{}\".".format(i)
else:
return True
return False
def chat_name_matches_filter(self, entity, reason):
chat = self.get_chat_from_entity(entity)
if not chat:
return False
for i in self.config['chat_name_matches']:
if i == chat.display_name:
if reason:
return "Chat is filtered because its name matches \"{}\".".format(i)
else:
return True
return False
def message_contains_filter(self, entity, reason):
if not isinstance(entity, Message):
return False
for i in self.config['message_contains']:
if i in entity.text:
if reason:
return "Message is filtered because its contains \"{}\".".format(i)
else:
return True
return False
def ews_mp_filter(self, entity, reason):
chat = self.get_chat_from_entity(entity)
if not chat:
return False
if chat.vendor_specific.get('is_mp'):
if reason:
return "Chat is filtered as it's a EWS \"WeChat Official Account\" chat."
else:
return True
return False
# endregion [Filters]
| [
"wowjoint@gmail.com"
] | wowjoint@gmail.com |
07051b2b2d87f429737993fa6057c7d0ccc452f6 | ef914133e0ade675ae201f7895c50d819180951b | /attacks_SF.py | 42181fb80340a753a0c25e769c15a8c2ee56057c | [] | no_license | vpahari/biconn | b094d6e7e6270f7601fde7de2f4d4528cd80aa20 | fd2259dfeb73a39bbdd4e616700f912cec8f17cf | refs/heads/master | 2021-06-01T18:54:09.477458 | 2020-09-22T14:49:48 | 2020-09-22T14:49:48 | 136,077,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,686 | py | import networkx as nx
import networkit as nk
import random
import sys
import math
from functools import reduce
import csv
from operator import itemgetter
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import pickle
import igraph as ig
import numpy as np
import os
import itertools
def get_name_WS(initial_name, dim, size, nei, p, SEED,radius):
return initial_name + "_dim_" + str(dim) + "_size_" + str(size) + "_nei_" + str(nei) + "_p_" + str(p) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def get_name_ER(initial_name, N, k, SEED,radius):
return initial_name + "_N_" + str(N) + "_k_" + str(k) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def get_name_SF(initial_name,N,k,exp_out,SEED,radius):
return initial_name + "_N_" + str(N) + "_k_" + str(k) + "_expout_" + str(exp_out) + "_SEED_" + str(SEED) + "_radius_" + str(radius) + "_" + ".pickle"
def make_WS_graph(dim,size,nei,p,SEED):
N = size ** dim
random.seed(SEED)
igG = ig.Graph.Watts_Strogatz(dim,size,nei,p)
allEdges = igG.get_edgelist()
fixed_G = nx.Graph()
listOfNodes = [i for i in range(N)]
fixed_G.add_nodes_from(listOfNodes)
fixed_G.add_edges_from(allEdges)
G_nk = nk.nxadapter.nx2nk(fixed_G)
return G_nk
def make_SF_Graph(N,k,exp_out,SEED):
random.seed(SEED)
num_edges = int((N * k) / 2)
igG = ig.Graph.Static_Power_Law(N,num_edges,exp_out)
allEdges = igG.get_edgelist()
fixed_G = nx.Graph()
listOfNodes = [i for i in range(N)]
fixed_G.add_nodes_from(listOfNodes)
fixed_G.add_edges_from(allEdges)
G_nk = nk.nxadapter.nx2nk(fixed_G)
return G_nk
def make_ER_Graph(N,k,SEED):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED)
G_nk = nk.nxadapter.nx2nk(G_nx)
return G_nk
def DA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
degree = nk.centrality.DegreeCentrality(G)
degree.run()
degree_sequence = degree.ranking()
random.shuffle(degree_sequence)
degree_sequence.sort(key = itemgetter(1), reverse = True)
for i in range(num_nodes_to_remove):
node_to_remove = degree_sequence[i][0]
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return GC_List
def ADA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
for i in range(num_nodes_to_remove):
print(i)
degree = nk.centrality.DegreeCentrality(G)
degree.run()
degree_sequence = degree.ranking()
random.shuffle(degree_sequence)
degree_sequence.sort(key = itemgetter(1), reverse = True)
node_to_remove = degree_sequence[0][0]
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return GC_List
def BA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
between = nk.centrality.DynBetweenness(G)
between.run()
between_sequence = between.ranking()
random.shuffle(between_sequence)
between_sequence.sort(key = itemgetter(1), reverse = True)
for i in range(num_nodes_to_remove):
node_to_remove = between_sequence[i][0]
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return GC_List
def ABA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
for i in range(num_nodes_to_remove):
print(i)
between = nk.centrality.DynBetweenness(G)
between.run()
between_sequence = between.ranking()
between_sequence.sort(key = itemgetter(1), reverse = True)
node_to_remove = between_sequence[0][0]
G.removeNode(node_to_remove)
GC_List.append(get_GC(G))
return GC_List
def RA_attack(G_copy,num_nodes_to_remove):
G = copy_graph(G_copy)
GC_List = []
GC_List.append(get_GC(G))
all_nodes = random.sample(list(G.nodes()),num_nodes_to_remove)
for i in all_nodes:
G.removeNode(i)
GC_List.append(get_GC(G))
return GC_List
def big_RA_attack(G_copy,num_nodes_to_remove,num_sims):
big_GC_List = []
for i in range(num_sims):
GC_list = RA_attack(G_copy,num_nodes_to_remove)
big_GC_List.append(GC_list)
avg_list = get_avg_list(big_GC_List)
return avg_list
def get_betweenness_score(G, node):
between = nk.centrality.DynBetweenness(G)
between.run()
return between.score(node)
def get_degree_score(G,node):
return G.degree(node)
def get_coreness_score(G,node):
coreness = nk.centrality.CoreDecomposition(G)
coreness.run()
partition = coreness.getPartition()
core_number = partition.subsetOf(node)
return core_number
def get_betweenness_score_list(G, node_list):
between = nk.centrality.DynBetweenness(G)
between.run()
final_list = []
for node in node_list:
final_list.append(between.score(node))
return final_list
def get_degree_score_list(G,node_list):
final_list = []
for node in node_list:
final_list.append(G.degree(node))
return final_list
def get_coreness_score_list(G,node_list):
coreness = nk.centrality.CoreDecomposition(G)
coreness.run()
final_list = []
partition = coreness.getPartition()
for node in node_list:
final_list.append(partition.subsetOf(node))
return final_list
def add_into_set(s,new_s):
for i in new_s:
s.add(i)
return s
def take_out_list(dBall, ball):
new_list = []
for i in dBall:
if i in ball:
continue
new_list.append(i)
return new_list
#change this such that the neighbors are diff
def get_dBN(G,node,radius):
dBall = set([node])
ball = set([node])
for i in range(radius):
neighbor = []
for j in dBall:
for n in G.neighbors(j):
if n in ball:
continue
neighbor.append(n)
ball = add_into_set(ball,neighbor)
dBall = set(neighbor.copy())
return (list(dBall),list(ball))
def get_all_dBN(G,radius):
all_nodes = get_GC_nodes(G)
dict_nodes_dBall = {}
dict_nodes_ball = {}
dict_nodes_x_i = {}
for n in all_nodes:
(dBall,ball) = get_dBN(G,n,radius)
dict_nodes_dBall[n] = len(dBall)
dict_nodes_ball[n] = len(ball)
dict_nodes_x_i[n] = len(dBall) / len(ball)
return (dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i)
def make_partitions(dict_nodes_x_i, step_size):
counter = 0
values_list = list(dict_nodes_x_i.values())
num_partitions = int(1 / step_size)
all_values = [0 for i in range(num_partitions)]
for i in values_list:
box_to_put = int(i / step_size)
if box_to_put == num_partitions:
all_values[-1] = all_values[-1] + 1
continue
all_values[box_to_put] = all_values[box_to_put] + 1
return all_values
def get_all_same_x_i(sorted_list,x_i_value):
node_list = []
for i in sorted_list:
if i[1] == x_i_value:
node_list.append(i[0])
return node_list
def get_largest_dball(dball_dict,node_list):
largest_dball = 0
largest_node = 0
for i in node_list:
print(dball_dict[i])
if dball_dict[i] > largest_dball:
largest_dball = dball_dict[i]
largest_node = i
return largest_node
def get_random_dball(node_list):
return random.choice(node_list)
def dict_to_sorted_list(d):
new_list = list(d.items())
final_list = sorted(new_list, key = itemgetter(1))
final_list_no_0 = list(filter(lambda x : x[1] != 0, final_list))
if len(final_list_no_0) != 0:
x_i_value = final_list_no_0[0][1]
nodes_list = get_all_same_x_i(final_list_no_0, x_i_value)
return nodes_list
else:
return final_list_no_0
def get_GC_nodes(G):
comp = nk.components.DynConnectedComponents(G)
comp.run()
all_comp = comp.getComponents()
all_comp.sort(key = len)
return all_comp[-1]
def get_GC(G):
comp = nk.components.DynConnectedComponents(G)
comp.run()
all_comp_sizes = comp.getComponentSizes()
all_values = list(all_comp_sizes.values())
all_values.sort()
return all_values[-1]
def copy_graph(G):
G_copy = G.copyNodes()
edges = G.edges()
for (i,j) in edges:
G_copy.addEdge(i,j)
return G_copy
#dball, vball, degree, betweenness, coreness
def dBalls_attack(G_copy,radius):
G = copy_graph(G_copy)
GC_List = []
size_dball = []
size_ball = []
degree_list_mainNode = []
betweenness_list_mainNode = []
coreness_list_mainNode = []
degree_list_removedNode = []
betweenness_list_removedNode = []
coreness_list_removedNode = []
counter = 0
counter_list = []
GC_List.append(get_GC(G))
counter_list.append(counter)
num_nodes_to_remove = G.numberOfNodes()
while counter < num_nodes_to_remove:
print(counter)
(dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i) = get_all_dBN(G,radius)
list_to_remove = dict_to_sorted_list(dict_nodes_x_i)
if len(list_to_remove) == 0:
break
node = get_random_dball(list_to_remove)
(dBall,ball) = get_dBN(G,node,radius)
combined_list = [node] + dBall
between_list = get_betweenness_score_list(G,combined_list)
degree_list = get_degree_score_list(G,combined_list)
coreness_list = get_coreness_score_list(G,combined_list)
degree_list_mainNode.append(degree_list[0])
betweenness_list_mainNode.append(between_list[0])
coreness_list_mainNode.append(coreness_list[0])
degree_list_removedNode += degree_list[1:]
betweenness_list_removedNode += between_list[1:]
coreness_list_removedNode += coreness_list[1:]
size_dball.append(len(dBall))
size_ball.append(len(ball))
#print(dBall)
#print(ball)
for i in dBall:
G.removeNode(i)
counter += 1
GC_List.append(get_GC(G))
counter_list.append(counter)
return (GC_List,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode)
def dBalls_attack_NA(G_copy,radius):
G = copy_graph(G_copy)
GC_List = []
size_dball = []
size_ball = []
degree_list_mainNode = []
betweenness_list_mainNode = []
coreness_list_mainNode = []
degree_list_removedNode = []
betweenness_list_removedNode = []
coreness_list_removedNode = []
counter = 0
counter_list = []
GC_List.append(get_GC(G))
counter_list.append(counter)
num_nodes_to_remove = G.numberOfNodes()
(dict_nodes_dBall,dict_nodes_ball,dict_nodes_x_i) = get_all_dBN(G,radius)
list_to_remove = dict_to_sorted_list_NA(dict_nodes_x_i)
counter_for_nodes = 0
print(dict_nodes_x_i)
print(list_to_remove)
while counter_for_nodes < len(list_to_remove):
curr_nodes_set = set(list(G.nodes()))
node = list_to_remove[counter_for_nodes][0]
print(node,dict_nodes_dBall[node])
if node not in curr_nodes_set:
counter_for_nodes += 1
continue
(dBall,ball) = get_dBN(G,node,radius)
if len(dBall) == 0:
counter_for_nodes += 1
continue
size_dball.append(len(dBall))
size_ball.append(len(ball))
combined_list = [node] + dBall
between_list = get_betweenness_score_list(G,combined_list)
degree_list = get_degree_score_list(G,combined_list)
coreness_list = get_coreness_score_list(G,combined_list)
degree_list_mainNode.append(degree_list[0])
betweenness_list_mainNode.append(between_list[0])
coreness_list_mainNode.append(coreness_list[0])
degree_list_removedNode += degree_list[1:]
betweenness_list_removedNode += between_list[1:]
coreness_list_removedNode += coreness_list[1:]
for i in dBall:
G.removeNode(i)
counter += 1
GC_List.append(get_GC(G))
counter_list.append(counter)
counter_for_nodes += 1
return (GC_List,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode)
def dict_to_sorted_list_NA(d):
new_list = list(d.items())
random.shuffle(new_list)
final_list = sorted(new_list, key = itemgetter(1))
return final_list
def get_avg_list(big_list):
counter = 0
size_of_list = len(big_list[0])
avg_list = []
while counter < size_of_list:
index_list = list(map(lambda x : x[counter], big_list))
avg = sum(index_list) / len(index_list)
avg_list.append(avg)
counter += 1
return avg_list
def turn_lists_together(GC_List,num_nodes_removed):
final_list = []
pointer = 0
counter = 0
for i in num_nodes_removed:
diff = i - counter
for j in range(diff):
final_list.append(GC_List[pointer])
counter += 1
pointer += 1
return final_list
def random_ball_removal(G_copy,radius,num_nodes_to_remove):
G = copy_graph(G_copy)
counter = 0
GC_list = []
size_dball = []
size_ball = []
continue_counter = 0
N = G.numberOfNodes()
while counter < num_nodes_to_remove:
if continue_counter > (0.1 * N):
all_nodes = list(G.nodes())
node_sample = random.sample(all_nodes,(num_nodes_to_remove - counter))
for i in node_sample:
G.removeNode(i)
counter += 1
GC_list.append(get_GC(G))
break
print(counter)
all_nodes = get_GC_nodes(G)
node = random.choice(all_nodes)
(dBall,ball) = get_dBN(G,node,radius)
if len(dBall) == 0:
continue_counter += 1
continue
size_dball.append(len(dBall))
size_ball.append(len(ball))
for i in dBall:
G.removeNode(i)
counter += 1
GC_list.append(get_GC(G))
continue_counter = 0
return (GC_list,size_dball,size_ball)
def big_sim(N,k,SEED,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_dball = []
big_size_ball = []
big_dg_list = []
for i in range(num_sims):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED * (i+1))
G_nk = nk.nxadapter.nx2nk(G_nx)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,dg_list) = perc_process_dBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_dball.append(size_dball)
big_size_ball.append(size_ball)
big_dg_list.append(dg_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_dball(N,k,SEED,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_dball = []
big_size_ball = []
big_dg_list = []
for i in range(num_sims):
G_nx = nx.erdos_renyi_graph(N, k/(N-1), seed = SEED * (i+1))
G_nk = nk.nxadapter.nx2nk(G_nx)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,dg_list) = perc_process_dBalls_bigDBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_dball.append(size_dball)
big_size_ball.append(size_ball)
big_dg_list.append(dg_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_SF(N,k,exp_out,radius,perc_to_remove,num_sims):
big_GC_List = []
big_size_ball = []
big_size_dball = []
big_dg_list = []
for i in range(num_sims):
G_nk = make_SF_Graph(N,k,exp_out)
num_nodes_to_remove = int(perc_to_remove * N)
(GC_List,size_dball,size_ball,degree_list) = perc_process_dBalls(G_nk,radius,num_nodes_to_remove)
GC_List_to_append = GC_List[:num_nodes_to_remove]
big_GC_List.append(GC_List_to_append)
big_size_ball.append(size_ball)
big_size_dball.append(size_dball)
big_dg_list.append(degree_list)
return (big_GC_List,big_size_dball,big_size_ball,big_dg_list)
def big_sim_changing_radius(G,start_radius,end_radius):
big_GC_List = []
big_counter_list = []
curr_radius = start_radius
while curr_radius <= end_radius:
(GC_List,size_dball,size_ball,degree_list,counter_list) = perc_process_dBalls_track_balls(G,curr_radius)
big_GC_List.append(GC_List)
big_counter_list.append(counter_list)
curr_radius += 1
return (big_GC_List,big_counter_list)
def get_results_NA(G, radius):
N = G.numberOfNodes()
GC_list_DA = DA_attack(G, int(N * 0.99))
GC_list_BA = BA_attack(G, int(N * 0.99))
GC_list_RAN = big_RA_attack(G,int(N * 0.99),20)
(GC_List_DB,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode) = dBalls_attack_NA(G_copy,radius)
return (GC_list_DA, GC_list_BA, GC_list_RAN, GC_List_DB, counter_list, size_dball, size_ball, degree_list_mainNode, betweenness_list_mainNode, coreness_list_mainNode, degree_list_removedNode, betweenness_list_removedNode, coreness_list_removedNode)
def get_result(G, radius):
N = G.numberOfNodes()
GC_list_ADA = ADA_attack(G, int(N * 0.99))
GC_list_ABA = ABA_attack(G, int(N * 0.99))
GC_list_RAN = big_RA_attack(G,int(N * 0.99),20)
(GC_List_DB,counter_list,size_dball,size_ball,degree_list_mainNode,betweenness_list_mainNode,coreness_list_mainNode,degree_list_removedNode,betweenness_list_removedNode,coreness_list_removedNode) = dBalls_attack(G,radius)
return (GC_list_ADA, GC_list_ABA, GC_list_RAN, GC_List_DB, counter_list, size_dball, size_ball, degree_list_mainNode, betweenness_list_mainNode, coreness_list_mainNode, degree_list_removedNode, betweenness_list_removedNode, coreness_list_removedNode)
N=int(sys.argv[1])
k=float(sys.argv[2])
exp_out = float(sys.argv[3])
SEED=int(sys.argv[4])
radius = int(sys.argv[5])
G = make_SF_Graph(N,k,exp_out,SEED)
(GC_list_ADA, GC_list_ABA, GC_list_RAN, GC_List_DB, counter_list, size_dball, size_ball, degree_list_mainNode, betweenness_list_mainNode, coreness_list_mainNode, degree_list_removedNode, betweenness_list_removedNode, coreness_list_removedNode) = get_result(G, radius)
"""
GC_list_DA = DA_attack(G,int(N * 0.99))
GC_list_BA = BA_attack(G,int(N * 0.99))
print(GC_list_DA)
print(GC_list_BA)
"""
init_name_GC_Deg = "attackDEG_SF_GC"
init_name_GC_Bet = "attackBET_SF_GC"
init_name_GC_Ran = "attackRAN_SF_GC"
init_name_GC_DB = "attackDB_SF_GC"
init_name_dball = "attackDB_SF_DBALL"
init_name_ball = "attackDB_SF_BALL"
init_name_CL = "attackDB_SF_CL"
init_name_deg_mainNode = "attackDB_SF_degMainNode"
init_name_deg_removedNode = "attackDB_SF_degRemovedNode"
init_name_bet_mainNode = "attackDB_SF_betMainNode"
init_name_bet_removedNode = "attackDB_SF_betRemovedNode"
init_name_core_mainNode = "attackDB_SF_coreMainNode"
init_name_core_removedNode = "attackDB_SF_coreRemovedNode"
GC_List_Deg_name = get_name_SF(init_name_GC_Deg, N,k,exp_out,SEED,radius)
GC_List_Bet_name = get_name_SF(init_name_GC_Bet, N,k,exp_out,SEED,radius)
GC_List_Ran_name = get_name_SF(init_name_GC_Ran, N,k,exp_out,SEED,radius)
GC_List_DB_name = get_name_SF(init_name_GC_DB, N,k,exp_out,SEED,radius)
CL_name = get_name_SF(init_name_CL, N,k,exp_out,SEED,radius)
dBall_name = get_name_SF(init_name_dball, N,k,exp_out,SEED,radius)
ball_name = get_name_SF(init_name_ball, N,k,exp_out,SEED,radius)
deg_mainNode_name = get_name_SF(init_name_deg_mainNode, N,k,exp_out,SEED,radius)
deg_removedNode_name = get_name_SF(init_name_deg_removedNode, N,k,exp_out,SEED,radius)
bet_mainNode_name = get_name_SF(init_name_bet_mainNode, N,k,exp_out,SEED,radius)
bet_removedNode_name = get_name_SF(init_name_bet_removedNode, N,k,exp_out,SEED,radius)
core_mainNode_name = get_name_SF(init_name_core_mainNode, N,k,exp_out,SEED,radius)
core_removedNode_name = get_name_SF(init_name_core_removedNode, N,k,exp_out,SEED,radius)
with open(GC_List_Deg_name,'wb') as handle:
pickle.dump(GC_list_ADA, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(GC_List_Bet_name,'wb') as handle:
pickle.dump(GC_list_ABA, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(GC_List_Ran_name,'wb') as handle:
pickle.dump(GC_list_RAN, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(GC_List_DB_name,'wb') as handle:
pickle.dump(GC_List_DB, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(CL_name,'wb') as handle:
pickle.dump(counter_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(dBall_name,'wb') as handle:
pickle.dump(size_dball, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(ball_name,'wb') as handle:
pickle.dump(size_ball, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(deg_mainNode_name,'wb') as handle:
pickle.dump(degree_list_mainNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(bet_mainNode_name,'wb') as handle:
pickle.dump(betweenness_list_mainNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(core_mainNode_name,'wb') as handle:
pickle.dump(coreness_list_mainNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(deg_removedNode_name,'wb') as handle:
pickle.dump(degree_list_removedNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(bet_removedNode_name,'wb') as handle:
pickle.dump(betweenness_list_removedNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(core_removedNode_name,'wb') as handle:
pickle.dump(coreness_list_removedNode, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(degree_list_mainNode)
print(degree_list_removedNode)
print(betweenness_list_mainNode)
print(betweenness_list_removedNode)
print(coreness_list_mainNode)
print(coreness_list_removedNode)
| [
"vpahari@wesleyan.edu"
] | vpahari@wesleyan.edu |
d6655f4db0445ea8000cfd7c7f697c12e129b47d | b99b57dad607408d4e39423a60654bd9927fbbfe | /2017/15.py | b6021cfa5914f0d3971a60a65b23b82385a8630e | [] | no_license | fbrizu/AdventOfCode | 1ef00bae110e1dc0add4477ba0ecabfcb1cb1dfb | 6978b8bab6ee77260f208008c43565a780375af2 | refs/heads/master | 2020-11-29T16:20:28.866599 | 2019-12-31T04:16:06 | 2019-12-31T04:16:06 | 230,165,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | def p1():
f1 = 16807
f2 = 48271
g1 = 703
g2 = 516
count = 0
for _ in range(40000000):
g1 = g1 * f1 % 2147483647
g2 = g2 * f2 % 2147483647
if g1%65536 == g2%65536:
count += 1
print(count)
def p2():
f1 = 16807
f2 = 48271
g1 = 703
g2 = 516
count = 0
for _ in range(5000000):
t1 = True
t2 = True
while t1 or g1%4 != 0:
g1 = g1 * f1 % 2147483647
t1 = False
while t2 or g2%8 != 0:
g2 = g2 * f2 % 2147483647
t2 = False
if g1%65536 == g2%65536:
count += 1
print(count)
p1()
p2() | [
"frank.brizuela@mail.mcgill.ca"
] | frank.brizuela@mail.mcgill.ca |
a08ba34f8acffacbc09afb49fda4f3c7c36d7a31 | 0448321097219bd8991809ced2816c743677656a | /graph_classes/graphclass_cycle.py | 6c135edc42518906830b1f7b4990de890ba50f2f | [] | no_license | WonkySpecs/mutant-network-sim | 508bc976997fbd9ed92f286c1064773fb85860bb | 29ca68da5ce0956d18a9c5cd9a3278f3fad6b17c | refs/heads/master | 2021-01-11T02:04:05.589870 | 2017-05-07T23:50:24 | 2017-05-07T23:50:24 | 70,815,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import networkx as nx
import graph_classes.graphclass as gc
class GraphClass_Cycle(gc.GraphClass):
def buildGraph(self, parameters):
convertedParams = self.checkParamsValid(parameters)
#Check if error (Will return an error message)
if type(convertedParams) == str:
return convertedParams
nodes = convertedParams['nodes']
G = nx.Graph()
for i in range(nodes - 1):
G.add_edge(i, i + 1)
G.add_edge(0, nodes - 1)
return G
metadata = {
"name" : "cycle",
"display_name" : "Cycle",
"parameters" : {"nodes" : {'type' : 'int'}},
"description" : ( "Basic graph class - each node has 2 neighbours"
"\nnodes parameter is the number of nodes")
}
| [
"w1ll100@hotmail.co.uk"
] | w1ll100@hotmail.co.uk |
aae4fe954783bb2351c32680008744a147445fac | 2aa45e68f6ad580e3e19139b5df88ca53f7f275a | /CreateLabelPermutation.py | 2c414faf42b6bf41b2502065a4f607dd3815455e | [
"MIT"
] | permissive | GISU2KM/GraphLearning | a97d80474137f8bac68e3005132ae0265a51600b | c7cdb71982463297f26635e92b6b27cefdc0b47f | refs/heads/master | 2022-12-23T06:14:17.137655 | 2020-10-09T03:06:51 | 2020-10-09T03:06:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,372 | py | import graphlearning as gl
import numpy as np
import sys, getopt
import os.path as path
def print_help():
print('=======================================================')
print('GraphLearning: Python package for graph-based learning.')
print('=======================================================')
print('=======================================================')
print('Create Label Permutation')
print('=======================================================')
print(' ')
print('Options:')
print(' -d (--dataset=): MNIST, FashionMNIST,...more soon (default=MNIST)')
print(' -m (--NumLabels=): Number of labels per class for each trial (default=1,2,3,4,5)')
print(' -t (--NumTrials=): Number of trials (default=100)')
print(' -n (--name=): Permutation name in form dataset<name>_permutations.npz (default is empty)')
print(' -s (--multiplier=): List of multipliers for each class, to produce unbalanced experiments (default is balanced 1,1,1,1,1)')
print(' -o (--overwrite=): Overwrite existing file.')
#Default settings
dataset = 'MNIST'
m = '1,2,3,4,5'
multiplier = None
t = 100
name = ''
overwrite = False
#Read command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:],"hd:m:t:n:s:o",["dataset=","NumLabels=","NumTrials=","name=","multiplier=","overwrite"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-d", "--dataset"):
dataset = arg
elif opt in ("-m", "--NumLabels"):
m = arg
elif opt in ("-t", "--NumTrials"):
t = int(arg)
elif opt in ("-s", "--multiplier"):
multiplier = arg
multiplier = [float(e) for e in multiplier.split(',')]
elif opt in ("-n", "--name"):
name = arg
elif opt in ("-o", "--overwrite"):
overwrite = True
outfile = "LabelPermutations/"+dataset+name+"_permutations.npz"
#Print basic info
print('=======================================================')
print('GraphLearning: Python package for graph-based learning.')
print('=======================================================')
print('=======================================================')
print('Compute Label Permutations')
print('=======================================================')
print(' ')
print('Dataset: '+dataset)
print('Number of Labels per trial: '+m)
print('Number of Trials: %d'%t)
print('Output file: '+outfile)
print(' ')
print('=======================================================')
print(' ')
#Load labels
try:
M = np.load("Data/"+dataset+"_labels.npz",allow_pickle=True)
except:
print('Cannot find dataset Data/'+dataset+'_labels.npz')
sys.exit(2)
#Extract labels
labels = M['labels']
#Convert string to int list
m = [int(e) for e in m.split(',')]
#Create label permutations
perm = gl.create_label_permutations(labels,t,m,multiplier)
#Save weight matrix to file
if path.isfile(outfile) and not overwrite:
print('Output file: '+outfile+' already exists. Aborting...')
sys.exit(2)
else:
np.savez_compressed(outfile,perm=perm)
| [
"noreply@github.com"
] | noreply@github.com |
852ecca1e2ce71ff84694ec84775776ecab7d1c6 | 3521f78c180ed2b403c5f9d66dcd1ea7240c0eff | /2015/table.py | 83c075c6a1e3a408ebb0cd8ce8539099b3e525d6 | [
"BSD-3-Clause"
] | permissive | timm/sbse14 | f5b683a4cebbf8655f50b10215ac328a61c54185 | 006df700af17566ee2b66cd53de9587004558772 | refs/heads/master | 2016-09-06T09:36:06.618949 | 2014-09-17T03:24:21 | 2014-09-17T03:24:21 | 23,025,059 | 15 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,069 | py | from __future__ import division
import sys,random,math,re
sys.dont_write_bytecode = True
class o():
"Anonymous container"
def __init__(i,**fields): i.has().update(fields)
def has(i) : return i.__dict__
#def __getattr__(i,k) : return i.__dict__[k]
#def __setattr__(i,k,v) : i.__dict__[k] = v; return v
def __repr__(i):
name = i.__class__.__name__
return name+'{'+' '.join([':%s %s' % (k,i.has()[k])
for k in i.public()])+ '}'
def public(i):
return [k for k in sorted(i.has().keys())
if not "_" in k]
about = classmethod
akos = dict(nums = r'^[\$<>]',
syms = r'^[^\$<>]',
klass = r'^[=]',
indep = r'^[^<>=]',
dep = r'^[=<>]',
less = r'^[<]',
more = r'^[>]',
ignore = r'^[/]')
class Log:
def __repr__(i): return '%s(%s,%s)' % (i.__class__.__name__,i.txt,i.col)
def log(i,val): pass
def __init__(i,txt="",col=None,w=1):
i.txt, i.col,i.w=txt,col,w
class Num(Log): pass
class Thing(Num): pass
class Sym(Log): pass
class Row:
fields = {'gender' :Sym,
'age' :Num,
'$shoesize' :Num,
'>lifeExpectancy':Thing}
seen=re.match
def complete(klass):
skip="\?"
klass.cols = o()
cols = klass.cols.has()
for ako in akos.keys():
cols[ako]=[]
cols["eden"] = []
for c,(name,klass) in enumerate(klass.fields.items()):
if not seen(skip, name):
cols["eden"] += [(c,name,klass)]
for ako,pattern in akos.items():
if seen(pattern,name):
cols[ako] += [c]
return klass
class Table:
def __init__(i,about):
i.about = complete(about)
i.rows = []
i.cols = i.headers0(about.cols.eden)
def headers0(i,pairs):
return [klass(name,c) for
c,name,klass in pairs]
def cellhead(i,row,*whats):
for what in whats:
return row[c],i.headers[c]
def log(i,row):
for h in i.headers:
h.log(row[h.col])
tbl = Table(Row)
print tbl.cols
print tbl.about.cols
| [
"tim@menzies.us"
] | tim@menzies.us |
450b8912a42d43ebcefc08d7362f8d72543645b0 | d51a07fe227a1cdb3c5bded913a4ac22895afbd1 | /app/controllers/test_auth_controller.py | 9cfe8288db2591430ed613b30509d7909d8a4ad7 | [] | no_license | serlesen/PyDB | 08c1abb9bdba9e1fd3025271810ba98a95d85f69 | 4e6762f180bfe61c5c86610cb53b3b19692b3736 | refs/heads/master | 2023-02-22T14:10:47.559916 | 2021-11-02T08:39:12 | 2021-11-02T08:39:12 | 219,302,097 | 0 | 1 | null | 2023-02-15T22:57:59 | 2019-11-03T13:04:45 | Python | UTF-8 | Python | false | false | 3,015 | py | import json
import unittest
from app.controllers import app
from app.test.collections_simulator import CollectionsSimulator
from app.tools.database_context import DatabaseContext
from app.threads.threads_manager import ThreadsManager
class AuthControllerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
if DatabaseContext.THREADS_MANAGER_CYCLING == False:
DatabaseContext.THREADS_MANAGER_CYCLING = True
ThreadsManager().start()
CollectionsSimulator.build_users_col()
def setUp(self):
app.config["TESTING"] = True
app.config["DEBUG"] = True
self.app = app.test_client()
@classmethod
def tearDownClass(cls):
CollectionsSimulator.clean()
DatabaseContext.THREADS_MANAGER_CYCLING = False
def test_login(self):
response = self.app.post('/auth/login', data=json.dumps({'login': 'admin', 'password': 'admin'}), content_type='application/json')
self.assertEqual(response.status_code, 200)
login_info = json.loads(response.data)
self.assertEqual(login_info['login'], 'admin')
self.assertTrue('token' in login_info)
def test_logout(self):
response = self.app.post('/auth/login', data=json.dumps({'login': 'admin', 'password': 'admin'}), content_type='application/json')
token = json.loads(response.data)['token']
response = self.app.post('/auth/logout', headers={'Authorization': 'Bearer {}'.format(token)})
self.assertEqual(response.status_code, 200)
def test_create_user(self):
response = self.app.post('/auth/login', data=json.dumps({'login': 'admin', 'password': 'admin'}), content_type='application/json')
token = json.loads(response.data)['token']
response = self.app.post('/auth/user', data=json.dumps({'login': 'new-user', 'password': 'new-user'}), headers={'Authorization': 'Bearer {}'.format(token)}, content_type='application/json')
self.assertEqual(response.status_code, 200)
response = self.app.post('/auth/login', data=json.dumps({'login': 'new-user', 'password': 'new-user'}), content_type='application/json')
self.assertEqual(response.status_code, 200)
login_info = json.loads(response.data)
self.assertEqual(login_info['login'], 'new-user')
self.assertTrue('token' in login_info)
def test_get_user(self):
response = self.app.post('/auth/login', data=json.dumps({'login': 'admin', 'password': 'admin'}), content_type='application/json')
token = json.loads(response.data)['token']
response = self.app.get('/auth/user', headers={'Authorization': 'Bearer {}'.format(token)}, content_type='application/json')
self.assertEqual(response.status_code, 200)
user = json.loads(response.data)
self.assertEqual(user['login'], 'admin')
self.assertNotEqual(user['password'], 'admin')
def suite():
return unittest.TestLoader().loadTestsFromTestCase(AuthControllerTest)
| [
"sergio.lema@ekino.fr"
] | sergio.lema@ekino.fr |
653b22d6a16be1a36b54ae84a8fc932711d61ef1 | c239be070a4cf3dfdfdaad0b5cfd18224ed0a7ae | /main.py | 11de4360c99b210bd6b10bb1ce0bfa414b4c236a | [] | no_license | zubayerkader/Human-Activity-Classifier | e0f943f215076168690da3a493bf53e5666d41f3 | be613be606eba45ee2836eadc9e7118f1f779a7a | refs/heads/main | 2023-04-26T02:52:20.299839 | 2021-05-24T08:03:56 | 2021-05-24T08:03:56 | 370,272,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | import filter_data
import ml_input
import analysis
import left_right_foot_classification
if __name__ == '__main__':
filter_data.main()
ml_input.createMlData()
analysis.main()
left_right_foot_classification.main()
| [
"zubayerkader@gmail.com"
] | zubayerkader@gmail.com |
02ee7135b39cd74dfd1c357da4f0953d13ca1358 | 3965f8e3b1efcf1586859a079ca8b799e04f4109 | /sorting_algorithms.py | 6462a9ab6008ddf16d4a911a047c3483d7d2bac3 | [
"MIT"
] | permissive | linvieson/sorting-algorithms | 67c435138b3d47568cf6200efd9a3e1ce25d9096 | 22fcc3b1860b3181a30eb4256aec5374b49678f1 | refs/heads/main | 2023-08-28T10:01:15.415044 | 2021-10-06T13:41:13 | 2021-10-06T13:41:13 | 414,223,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | def selection_sort(arr: list) -> list:
'''
Perform selection sort, return sorted array and number of comparisons.
'''
comparisons = 0
length = len(arr)
if length <= 1:
return arr, comparisons
for index in range(length):
current_min = index
for index_in_unsorted in range(index + 1, length):
comparisons += 1
if arr[current_min] > arr[index_in_unsorted]:
current_min = index_in_unsorted
arr[index], arr[current_min] = arr[current_min], arr[index]
return arr, comparisons
def insertion_sort(arr: list) -> list:
'''
Perform insertion sort, return sorted array and number of comparisons.
'''
comparisons = 0
length = len(arr)
if length <= 1:
return arr, comparisons
for index in range(length):
inner_ind = index
while (inner_ind > 0) and (arr[inner_ind] < arr[inner_ind - 1]):
comparisons += 1
arr[inner_ind], arr[inner_ind - 1] = arr[inner_ind - 1], arr[inner_ind]
inner_ind -= 1
return arr, comparisons
def merge_sort(arr: list) -> list:
'''
Perform merge sort, return sorted array and number of comparisons.
'''
comparisons = 0
length = len(arr)
if length <= 1:
return arr, comparisons
middle = length // 2
left, right = arr[:middle], arr[middle:]
merge_sort(left), merge_sort(right)
ind_left = ind_right = curr_ind = 0
while ind_left < len(left) and ind_right < len(right):
if left[ind_left] < right[ind_right]:
arr[curr_ind] = left[ind_left]
ind_left += 1
else:
arr[curr_ind] = right[ind_right]
ind_right += 1
comparisons += 1
curr_ind += 1
while ind_left < len(left):
arr[curr_ind] = left[ind_left]
ind_left += 1
curr_ind += 1
while ind_right < len(right):
arr[curr_ind] = right[ind_right]
ind_right += 1
curr_ind += 1
return arr, comparisons
def shellsort(arr: list) -> list:
'''
Perform shellsort, return sorted array and number of comparisons.
'''
comparisons = 0
length = len(arr)
gap = 1
if length <= 1:
return arr, comparisons
gap = 1
while gap < (length / 3):
gap = 3 * gap + 1
while gap >= 1:
for index in range(gap, length):
for inner_ind in range(index, gap - 1, -gap):
comparisons += 1
if arr[inner_ind] < arr[inner_ind - gap]:
arr[inner_ind], arr[inner_ind - gap] = arr[inner_ind - gap], arr[inner_ind]
else:
break
gap //= 3
return arr, comparisons
| [
"alina.voronina@ucu.edu.ua"
] | alina.voronina@ucu.edu.ua |
c45f75d35e6c01f5b2a81a8d4f19fa45c755bca5 | 38acc071960622ab7923435fa085f2efaebe2144 | /pyemm/annular_sdd.py | a2f594249fef7befda94c050059d6d753ccd29b0 | [
"Apache-2.0"
] | permissive | drix00/pyelectronmicroscopymodeling | 72db4b7589d5cb9da0412e7bf7b847705412dc42 | 6a6d3bd174a8d6092332b59cae55c6b71dbba6d9 | refs/heads/master | 2022-11-02T15:59:34.744539 | 2020-06-18T00:12:59 | 2020-06-18T00:12:59 | 105,061,500 | 0 | 0 | Apache-2.0 | 2020-06-18T00:00:44 | 2017-09-27T19:40:10 | Python | UTF-8 | Python | false | false | 1,451 | py | #!/usr/bin/env python
"""
.. py:currentmodule:: annular_sdd
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Modeling of the McGill annular SDD.
"""
# Script information for the file.
__author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)"
__version__ = "0.1"
__date__ = "Apr 14, 2015"
__copyright__ = "Copyright (c) 2015 Hendrix Demers"
__license__ = "GPL 3"
# Standard library modules.
import logging
# Third party modules.
from xraylib import KL3_LINE
# Local modules.
# Project modules
from pyemm.montecarlo.xray_engine import create_xray_detector
from pyemm.montecarlo.xray import CharacteristicXRay
# Globals and constants variables.
def run():
number_photons = 200
xray_atomic_numbers = [5, 6, 7, 8, 14]
for xray_atomic_numbers in xray_atomic_numbers:
logging.info("Simulating %i", xray_atomic_numbers)
engine = create_xray_detector()
for photon_ID in range(number_photons):
logging.info("photon ID %i", photon_ID)
xray = CharacteristicXRay(xray_atomic_numbers, KL3_LINE, direction=(0, 0, -1))
engine.simulate(xray)
print("Backscattered: %.4f" % engine.getBackscatteredCoefficient())
print("Absorbed: %.4f" % engine.getAbsorbedCoefficient())
print("Transmitted: %.4f" % engine.getTranmittedCoefficient())
if __name__ == '__main__': #pragma: no cover
run() | [
"12611589+drix00@users.noreply.github.com"
] | 12611589+drix00@users.noreply.github.com |
ac8073d5c442216c2a5bad5feb1918bfd8d7af96 | d8f146faafd1d9c0da015c401a49e48d84e10db3 | /app.py | 1e46b529917f631e2da4bf036b839bc8202e4745 | [] | no_license | vincentiuskedang/pdacapstone | 86ab0e53d2e7221789fb88779f38d0c9efdb5e48 | 2b0a8c708f85a6f87804556d6fc752bbfe584872 | refs/heads/main | 2023-08-27T20:46:10.344855 | 2021-10-25T20:21:54 | 2021-10-25T20:21:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py | from flask import Flask, render_template
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from io import BytesIO
import base64
from bs4 import BeautifulSoup
import requests
#don't change this
matplotlib.use('Agg')
app = Flask(__name__) #do not change this
#insert the scrapping here
url_get = requests.get('https://www.coingecko.com/en/coins/ethereum/historical_data/usd?start_date=2020-01-01&end_date=2021-06-30#panel')
soup = BeautifulSoup(url_get.content,"html.parser")
#find your right key here
table = soup.find('table',attrs={'class':'table table-striped text-sm text-lg-normal'})
row = soup.find_all('th',attrs={'class':'font-semibold text-center'})
row_length = len(row)
temp = [] #initiating a tuple
for i in range(0, row_length):
#finding all Dates in table
Date = soup.find_all('th',attrs={'class':'font-semibold text-center'})[i].text
Date = Date.strip('\n')
# finding all Volume in table
Volume = soup.find_all('td',attrs={'class':'text-center'})[i*4+1].text
Volume = Volume.strip('\n')
#append the data that has been obtained, into an array.
temp.append((Date, Volume))
#reverse the order of the list
temp = temp[::-1]
#change into dataframe
df = pd.DataFrame(temp,columns=('Date', 'Volume'))
#insert data wrangling here
#change to datetime64 type
df['Date'] = df['Date'].astype('datetime64')
#cleaning the data so we can analyze much better
df['Volume'] = df['Volume'].str.replace('$','')
df['Volume'] = df['Volume'].str.replace(',','')
#change to int64 type
df['Volume'] = df['Volume'].astype('float64')
#make Date as index
df = df.set_index('Date')
#end of data wranggling
@app.route("/")
def index():
card_data = f'{df["Volume"].mean().round(2)}' #be careful with the " and '
# generate plot
ax = df.plot(figsize = (10,5))
# Rendering plot
# Do not change this
figfile = BytesIO()
plt.savefig(figfile, format='png', transparent=True)
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue())
plot_result = str(figdata_png)[2:-1]
# render to html
return render_template('index.html',
card_data = card_data,
plot_result=plot_result
)
if __name__ == "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | noreply@github.com |
81a24d352f994fb5d8d4865d93e5c1cbd9dcc4cd | 498b0f8a8f5ff26835efe855adc2fea8ed9c213f | /blockchain_drf/users/urls.py | 84207894598d20db554b7ee121caec04ef480ffb | [] | no_license | cryptobuks/blockchain-drf-app | cb87deca729d555f0f44db9e35ab192b578e20e5 | 97efa62c34e90ac62ce32b4d758f9daf5b31c5d4 | refs/heads/master | 2020-05-28T08:15:41.304110 | 2019-01-26T23:11:01 | 2019-01-26T23:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from django.urls import path
from users.views.users import (
CreateUserView, CreateTokenView,
ManageUserView
)
app_name = 'users'
urlpatterns = [
path('create/', CreateUserView.as_view(), name='create'),
path('token/', CreateTokenView.as_view(), name='token'),
path('me/', ManageUserView.as_view(), name='me'),
]
| [
"agcastro.py@yahoo.com"
] | agcastro.py@yahoo.com |
e74ac4d8165a99e360c067617af481624a327184 | 82435e420ff48fac5464ac0880b258de48eee63f | /mechMechanics.py | 53d7b0a3fd1ad3e5582d4eaabb7420c788cb1ce9 | [] | no_license | NiharikaRay/HearthstoneAnalysis | 6910aa5dccacc1e8a3dd5a0a0c269bf70832fe10 | 72165c37cda98efad284eceeb57402b27edf0e10 | refs/heads/master | 2021-01-18T13:00:18.844903 | 2015-02-04T01:32:06 | 2015-02-04T01:32:06 | 29,810,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,829 | py | import json
import numpy as np
import csv
from pprint import pprint
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
def getMechCards():
json_data = open("AllSets.enUS.json")
data = json.load(json_data, object_hook=_decode_dict)
json_data.close()
missionCards = filter(lambda x: ("collectible" in x), data["Missions"])
classicCards = filter(lambda x: ("collectible" in x), data["Classic"])
naxCards = filter(lambda x: ("collectible" in x), data["Curse of Naxxramas"])
systemCards = filter(lambda x: ("collectible" in x),data["System"])
creditsCards = filter(lambda x: ("collectible" in x),data["Credits"])
basicCards = filter(lambda x: ("collectible" in x),data["Basic"])
debugCards = filter(lambda x: ("collectible" in x),data["Debug"])
promotionCards = filter(lambda x: ("collectible" in x),data["Promotion"])
rewardCards = filter(lambda x: ("collectible" in x),data["Reward"])
gvgCards = filter(lambda x: ("collectible" in x),data["Goblins vs Gnomes"])
allCards = missionCards + classicCards + naxCards + systemCards + creditsCards + basicCards + debugCards + promotionCards + rewardCards + gvgCards
allCards = filter(lambda x: (x["type"] == "Minion"), allCards)
mechCards = []
for card in allCards:
if ("race" in card):
race = card["race"]
if (race == "Mech"):
mechCards += [card]
return mechCards
def countMechanics():
mechCards = getMechCards()
mechanicsCount = {}
mechanicsToCards = {}
for card in mechCards:
if ("mechanics" in card):
mechanics = card["mechanics"]
for m in mechanics:
if (m in mechanicsCount):
mechanicsCount[m] += 1
else:
mechanicsCount[m] = 1
if (m in mechanicsToCards):
mechanicsToCards[m] += [card["name"]]
else:
mechanicsToCards[m] = [card["name"]]
for (k, v) in mechanicsCount.iteritems():
print "There are " + str(v) + " cards with " + k
print "The cards with " + k + " are: "
pprint(mechanicsToCards[k])
print "\n"
countMechanics()
| [
"YOUR-EMAIL@DOMAIN.COM"
] | YOUR-EMAIL@DOMAIN.COM |
e168407eb15bcececca9947e72682be0c3429267 | 47596e586b3e21b31cf360be7cd1c7d3a5dc6163 | /Google/trafficSnapshot.py | 2dd2859f31fd1a85b4610e8d672e415ce5a7e784 | [] | no_license | jasonlingo/RoadSafety | bfef06abe0668a9cb8ead5b183008a53eabdefa2 | b20af54b915daf7635204e3b942b3ae4624887d7 | refs/heads/master | 2021-03-19T13:51:13.736277 | 2015-09-17T03:49:43 | 2015-09-17T03:49:43 | 36,019,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,140 | py | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from GPS.GPSPoint import GPSPoint
from File.Directory import createDirectory
import webbrowser
from Google.findTimeZone import findTimeZone
from time import sleep
from PIL import Image
import datetime, pytz
from config import TRAFFIC_IMAGE_DIRECTORY
def trafficSnapshot(gpsPoint, numOfShot, interval, size):
"""
Capture traffic snapshots periodically using Google MAP traffic and store those images
Args:
(GPSPoint) gpsPoint: the center of the map from which we capture traffic images
(int) numOfShot: the total number of images that are going to captured
(int) interval: the interval (in seconds) between two captured images
(int) size: the size of the map (from 3(big) to 21(detail))
"""
# Create Google MAP with traffic info request url
url = "https://www.google.com/maps/@"
gps = str(gpsPoint.lat) + ',' + str(gpsPoint.lng)
# The scale of the map.
size = str(size) + "z"
# Street view parameter.
traffic_param = "/data=!5m1!1e1"
# Combine request url
url = url + gps + "," + size + traffic_param
# Create the output directory if it doesn't exist.
createDirectory(TRAFFIC_IMAGE_DIRECTORY)
for i in range(numOfShot):
# Open the Google MAP street view on a web browser.
webbrowser.open(url)
# Wait for the page opens
sleep(5)
# Get the current time of the location
timezone, current_time = findTimeZone(gpsPoint)
imgName = TRAFFIC_IMAGE_DIRECTORY + "traffic-" + current_time + ".png"
command = "screencapture " + imgName
# Screen shot
os.system(command)
im = Image.open(imgName)
# Get captured image size
width, height = im.size
# Crop the captured area, need to be customized depending on different computer
im.crop((500, 350, width-300, height-30)).save(imgName)
print imgName + " captured!"
# Program sleeps for the interval time
sleep(interval)
| [
"jasonlingo@gmail.com"
] | jasonlingo@gmail.com |
08875ef94e180b533919903f03a36659ac43e79f | fee1678e11e413049604eb1c4f087e1dff6b16b5 | /read_and_write_to_file_functions.py | c33664c199340a0817e61c83387643355fc05d76 | [] | no_license | scttohara/python_card_game | 037c5bd58c1dfff34ef027287865500421c8b4e8 | bfb577ad14c3627dd38b8a4f3d66e09877bdb079 | refs/heads/master | 2020-12-10T22:29:50.395039 | 2020-09-27T04:56:05 | 2020-09-27T04:56:05 | 233,729,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,772 | py | import csv
import animals_list
def get_animal_names():
"""
@return: animals_list
@rtype: list of lists
"""
# try catch for opening and reader from animal name file
animals_list_of_lists = []
try:
with open('animal_names.txt', newline='\n') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
animals_list_of_lists.append(row)
csv_file.close()
except PermissionError:
print('\nALERT:\nCannot open file for reading')
print('File might be open in another program.')
print('The default list of animals will be used\n')
pass
except FileNotFoundError:
print('\nALERT:\nCheck that the animal_names.txt file exist in the folder this game is in')
print('The default list of animals will be used\n')
pass
except FileExistsError:
print('\nALERT:\nIssue with reading in animal names from external file. Will use default list\n')
pass
# checks that the list has all the animals if not the animals list is set to a
# pre-existing list
try:
if int(animals_list_of_lists[19][0]) != 10:
raise IndexError
except IndexError:
animals_list_of_lists = animals_list.animals_list_function()
return animals_list_of_lists
# noinspection PyUnboundLocalVariable,PyUnboundLocalVariable,PyUnboundLocalVariable
def get_game_results_record(path_choice=1):
"""
try catch for opening and reader from animal name file
@param path_choice: 1 if user wants to attempt to load past records. 2 if they want to start with new records
@type path_choice: int
@return: list of records, player1's record, player2's record, and the draw record
@rtype: tuple
"""
records_list_of_lists = []
if path_choice == 1: # loads from saved results
try:
with open('results_record.txt', newline='\n') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# print(row)
records_list_of_lists.append(row)
# print(animals_list_of_list[19][0])
csv_file.close()
except PermissionError:
print('\nALERT:\nCannot open file for reading')
print('File might be open in another program.\n')
print('Past records will be set to zero for this session\n')
pass
except FileNotFoundError:
print('\nALERT:\nCheck that the results_record.txt file exist in the folder this game is in')
print('Past records will be set to zero for this session\n')
pass
except FileExistsError:
print('\nALERT:\nCheck that the results_record.txt file exist in the folder this game is in')
print('Past records will be set to zero for this session\n')
pass
if path_choice == 1 or path_choice == 2: # checks results load or sets results to 0 depending on situation
# makes sure something is in records_list_of_lists when it is returned
try:
player1 = int(records_list_of_lists[-3][1])
player2 = int(records_list_of_lists[-2][1])
draws = int(records_list_of_lists[-1][1])
except IndexError:
player1 = 0
player2 = 0
draws = 0
records_list_of_lists.append(['player1', str(player1)])
records_list_of_lists.append(['player2', str(player2)])
records_list_of_lists.append(['draws', str(draws)])
return records_list_of_lists, player1, player2, draws
def write_game_results_record_to_file(results_record, player1, player2, draws):
"""
try catch for opening and reader from animal name file.
@param results_record: holds records
@type results_record: list
@param player1: count of player1 wins
@type player1: int
@param player2: count of player2 wins
@type player2: int
@param draws: count of draws
@type draws: int
@return: none
@rtype: none
"""
try:
results_record.append(['player1', str(player1)])
results_record.append(['player2', str(player2)])
results_record.append(['draws', str(draws)])
csv_file = open('results_record.txt', 'a', newline='\n')
csv_writer = csv.writer(csv_file)
count = -3
while count < 0:
line_to_write = [results_record[count][0], results_record[count][1]]
csv_writer.writerow(line_to_write)
count += 1
csv_file.close()
except PermissionError:
print('Cannot open file for writing')
print('File might be open in another program.')
| [
"noreply@github.com"
] | noreply@github.com |
a3a3312b93fd1130507887a28abc6e2859e972c6 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/Guanghan_ROLO/ROLO-master/update/utils/utils_draw_coord.py | cb75d7c64e6e32251001750fef1e6f67b093e62e | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,002 | py | from utils_convert_coord import coord_regular_to_decimal, coord_decimal_to_regular
import cv2
def debug_decimal_coord(img, coord_decimal, prob = None, class_id = None):
img_cp = img.copy()
img_ht, img_wid, nchannels = img.shape
coord_regular = coord_decimal_to_regular(coord_decimal, img_wid, img_ht)
debug_regular_coord(img, coord_regular, prob, class_id)
def debug_regular_coord(img, coord_regular, prob = None, class_id = None):
img_cp = img.copy()
[x_topleft, y_topleft, w_box, h_box] = coord_regular
cv2.rectangle(img_cp,
(x_topleft, y_topleft),
(x_topleft + w_box, y_topleft + h_box),
(0,255,0), 2)
if prob is not None and class_id is not None:
assert(isinstance(prob, (float)))
assert(isinstance(class_id, (int, long)))
cv2.rectangle(img_cp,
(x_topleft, y_topleft - 20),
(x_topleft + w_box, y_topleft),
(125,125,125),-1)
cv2.putText(img_cp,
str(class_id) + ' : %.2f' % prob,
(x_topleft + 5, y_topleft - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
cv2.imshow('debug_detection',img_cp)
cv2.waitKey(1)
def debug_3_locations( img, gt_location, yolo_location, rolo_location):
img_cp = img.copy()
for i in range(3): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blur for yolo
elif i ==2: location= rolo_location; color= (0, 255, 0) # green for rolo
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
if i == 1 or i== 2: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)
elif i== 0: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('3 locations',img_cp)
cv2.waitKey(100)
return img_cp
| [
"659338505@qq.com"
] | 659338505@qq.com |
793efb87761ef8c69620a2da9deafd73d517872d | d2a030f7a050a641fddd657e895651ee0310ae41 | /givers/forms.py | 9b644910b52b4a7d64e77046e0ac842fe866489a | [] | no_license | Shawen17/Giveawaynow | f052a1055a96f2d0a392aaf748adcafbec2a5135 | 92f3bc0b359a712776661348e239b492894b81a1 | refs/heads/master | 2023-09-05T00:28:59.237486 | 2021-10-24T21:12:37 | 2021-10-24T21:12:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | from django.db import models
from django.forms.models import ALL_FIELDS
from .models import Give,Profile,ContactUs,states,Vendor
from django.forms import ModelForm, fields
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import validate_num
class ContactUsForm(ModelForm):
class Meta:
model= ContactUs
fields = ('email','subject','ticket','body')
class GiveForm(ModelForm):
class Meta:
model= Give
fields = ('name','category','description','image','quantity','state','giver_number','address')
class SignupForm(UserCreationForm):
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control','placeholder':'Password'}))
password2 = forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control','placeholder':'Password Again'}))
email = forms.EmailField(max_length=100,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Email'}))
firstname = forms.CharField(max_length= 100,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'First Name'}))
lastname = forms.CharField(max_length= 100,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Last Name'}))
username = forms.CharField(max_length= 200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Username'}))
phone_number=forms.IntegerField(required=True)
class Meta:
model = User
fields = ('firstname','lastname','username','email','phone_number','password1','password2')
class Profileform(ModelForm):
firstname = forms.CharField(max_length= 100)
lastname = forms.CharField(max_length= 100)
email = forms.EmailField(max_length=100)
phone_number=forms.IntegerField(required=True)
class Meta:
model = Profile
fields = ('profile_pic','firstname','lastname','email','sex','state','phone_number','bio')
class VendorForm(GiveForm):
state=forms.ChoiceField(widget=forms.Select(attrs={'placeholder':'State of Residence'}),choices=states)
class Meta:
model = Vendor
fields=('receiver_number','state','delivery_address')
| [
"shawen022@yahoo.com"
] | shawen022@yahoo.com |
85ae65707ad634936086129bb17d2ebc16ab0115 | eef39fd96ef4ed289c1567f56fde936d5bc42ea4 | /BaekJoon/Bronze2/2744.py | 15ea7e4ea8c55e3f6546f94a24d170bd01b27fa9 | [] | no_license | dudwns9331/PythonStudy | 3e17da9417507da6a17744c72835c7c2febd4d2e | b99b9ef2453af405daadc6fbf585bb880d7652e1 | refs/heads/master | 2023-06-15T12:19:56.019844 | 2021-07-15T08:46:10 | 2021-07-15T08:46:10 | 324,196,430 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | # 대소문자 바꾸기
"""
2021-01-20 오후 4:09
안영준
문제
영어 소문자와 대문자로 이루어진 단어를 입력받은 뒤, 대문자는 소문자로, 소문자는 대문자로 바꾸어 출력하는 프로그램을 작성하시오.
입력
첫째 줄에 영어 소문자와 대문자로만 이루어진 단어가 주어진다. 단어의 길이는 최대 100이다.
출력
첫째 줄에 입력으로 주어진 단어에서 대문자는 소문자로, 소문자는 대문자로 바꾼 단어를 출력한다.
"""
String = input()
result = list()
for i in range(len(String)):
if String[i].islower():
result.append(String[i].upper())
else:
result.append(String[i].lower())
print(''.join(map(str, result)))
| [
"dudwns1045@naver.com"
] | dudwns1045@naver.com |
769fe78fbed72ed38ddaaf8886043f57213b6e36 | a687ba436b2b4926cde9fa327e3c932c3442ae1f | /models/official/transformer/transformer_main.py | b9543ee9c49f6fe691eb9364cf9d8901059f6fad | [
"Apache-2.0"
] | permissive | youlingwangzi/TensorFlow | 1bf4e5a9ac5c9eeaa4510c3dad71ac18dc473ecf | b7dd462d553d868dfe446b3d6d467935333647d3 | refs/heads/master | 2022-12-22T09:43:44.069358 | 2018-07-18T16:41:41 | 2018-07-18T16:41:41 | 136,822,259 | 5 | 1 | Apache-2.0 | 2022-12-17T14:25:20 | 2018-06-10T15:46:30 | Python | UTF-8 | Python | false | false | 23,341 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train and evaluate the Transformer model.
See README for description of setting the training schedule and evaluating the
BLEU score.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
# pylint: disable=g-bad-import-order
from six.moves import xrange # pylint: disable=redefined-builtin
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.transformer import compute_bleu
from official.transformer import translate
from official.transformer.data_download import VOCAB_FILE
from official.transformer.model import model_params
from official.transformer.model import transformer
from official.transformer.utils import dataset
from official.transformer.utils import metrics
from official.transformer.utils import schedule
from official.transformer.utils import tokenizer
from official.utils.accelerator import tpu as tpu_util
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.misc import model_helpers
PARAMS_MAP = {
"tiny": model_params.TINY_PARAMS,
"base": model_params.BASE_PARAMS,
"big": model_params.BIG_PARAMS,
}
DEFAULT_TRAIN_EPOCHS = 10
BLEU_DIR = "bleu"
INF = int(1e9)
# Dictionary containing tensors that are logged by the logging hooks. Each item
# maps a string to the tensor name.
TENSORS_TO_LOG = {
"learning_rate": "model/get_train_op/learning_rate/learning_rate",
"cross_entropy_loss": "model/cross_entropy"}
def model_fn(features, labels, mode, params):
"""Defines how to train, evaluate and predict from the transformer model."""
with tf.variable_scope("model"):
inputs, targets = features, labels
# Create model and get output logits.
model = transformer.Transformer(params, mode == tf.estimator.ModeKeys.TRAIN)
logits = model(inputs, targets)
# When in prediction mode, the labels/targets is None. The model output
# is the prediction
if mode == tf.estimator.ModeKeys.PREDICT:
if params["use_tpu"]:
raise NotImplementedError("Prediction is not yet supported on TPUs.")
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.PREDICT,
predictions=logits)
# Explicitly set the shape of the logits for XLA (TPU). This is needed
# because the logits are passed back to the host VM CPU for metric
# evaluation, and the shape of [?, ?, vocab_size] is too vague. However
# it is known from Transformer that the first two dimensions of logits
# are the dimensions of targets. Note that the ambiguous shape of logits is
# not a problem when computing xentropy, because padded_cross_entropy_loss
# resolves the shape on the TPU.
logits.set_shape(targets.shape.as_list() + logits.shape.as_list()[2:])
# Calculate model loss.
# xentropy contains the cross entropy loss of every nonpadding token in the
# targets.
xentropy, weights = metrics.padded_cross_entropy_loss(
logits, targets, params["label_smoothing"], params["vocab_size"])
loss = tf.reduce_sum(xentropy) / tf.reduce_sum(weights)
# Save loss as named tensor that will be logged with the logging hook.
tf.identity(loss, "cross_entropy")
if mode == tf.estimator.ModeKeys.EVAL:
if params["use_tpu"]:
# host call functions should only have tensors as arguments.
# functools.partial() pre-populates params so that metric_fn is
# TPUEstimator compliant.
metric_fn = functools.partial(metrics.get_eval_metrics, params=params)
eval_metrics = (metric_fn, [logits, labels])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, predictions={"predictions": logits},
eval_metrics=eval_metrics)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, predictions={"predictions": logits},
eval_metric_ops=metrics.get_eval_metrics(logits, labels, params))
else:
train_op, metric_dict = get_train_op_and_metrics(loss, params)
# Epochs can be quite long. This gives some intermediate information
# in TensorBoard.
metric_dict["minibatch_loss"] = loss
if params["use_tpu"]:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op,
host_call=tpu_util.construct_scalar_host_call(
metric_dict=metric_dict, model_dir=params["model_dir"],
prefix="training/")
)
record_scalars(metric_dict)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
def record_scalars(metric_dict):
for key, value in metric_dict.items():
tf.contrib.summary.scalar(name=key, tensor=value)
def get_learning_rate(learning_rate, hidden_size, learning_rate_warmup_steps):
"""Calculate learning rate with linear warmup and rsqrt decay."""
with tf.name_scope("learning_rate"):
warmup_steps = tf.to_float(learning_rate_warmup_steps)
step = tf.to_float(tf.train.get_or_create_global_step())
learning_rate *= (hidden_size ** -0.5)
# Apply linear warmup
learning_rate *= tf.minimum(1.0, step / warmup_steps)
# Apply rsqrt decay
learning_rate *= tf.rsqrt(tf.maximum(step, warmup_steps))
# Create a named tensor that will be logged using the logging hook.
# The full name includes variable and names scope. In this case, the name
# is model/get_train_op/learning_rate/learning_rate
tf.identity(learning_rate, "learning_rate")
return learning_rate
def get_train_op_and_metrics(loss, params):
"""Generate training op and metrics to save in TensorBoard."""
with tf.variable_scope("get_train_op"):
learning_rate = get_learning_rate(
learning_rate=params["learning_rate"],
hidden_size=params["hidden_size"],
learning_rate_warmup_steps=params["learning_rate_warmup_steps"])
# Create optimizer. Use LazyAdamOptimizer from TF contrib, which is faster
# than the TF core Adam optimizer.
optimizer = tf.contrib.opt.LazyAdamOptimizer(
learning_rate,
beta1=params["optimizer_adam_beta1"],
beta2=params["optimizer_adam_beta2"],
epsilon=params["optimizer_adam_epsilon"])
if params["use_tpu"] and params["tpu"] != tpu_util.LOCAL:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
# Calculate and apply gradients using LazyAdamOptimizer.
global_step = tf.train.get_global_step()
tvars = tf.trainable_variables()
gradients = optimizer.compute_gradients(
loss, tvars, colocate_gradients_with_ops=True)
train_op = optimizer.apply_gradients(
gradients, global_step=global_step, name="train")
metrics = {"learning_rate": learning_rate}
if not params["use_tpu"]:
# gradient norm is not included as a summary when running on TPU, as
# it can cause instability between the TPU and the host controller.
gradient_norm = tf.global_norm(list(zip(*gradients))[0])
metrics["global_norm/gradient_norm"] = gradient_norm
return train_op, metrics
def translate_and_compute_bleu(estimator, subtokenizer, bleu_source, bleu_ref):
"""Translate file and report the cased and uncased bleu scores."""
# Create temporary file to store translation.
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp_filename = tmp.name
translate.translate_file(
estimator, subtokenizer, bleu_source, output_file=tmp_filename,
print_all_translations=False)
# Compute uncased and cased bleu scores.
uncased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, False)
cased_score = compute_bleu.bleu_wrapper(bleu_ref, tmp_filename, True)
os.remove(tmp_filename)
return uncased_score, cased_score
def get_global_step(estimator):
"""Return estimator's last checkpoint."""
return int(estimator.latest_checkpoint().split("-")[-1])
def evaluate_and_log_bleu(estimator, bleu_source, bleu_ref, vocab_file_path):
"""Calculate and record the BLEU score."""
subtokenizer = tokenizer.Subtokenizer(vocab_file_path)
uncased_score, cased_score = translate_and_compute_bleu(
estimator, subtokenizer, bleu_source, bleu_ref)
tf.logging.info("Bleu score (uncased):", uncased_score)
tf.logging.info("Bleu score (cased):", cased_score)
return uncased_score, cased_score
def run_loop(
estimator, schedule_manager, train_hooks=None, benchmark_logger=None,
bleu_source=None, bleu_ref=None, bleu_threshold=None, vocab_file_path=None):
"""Train and evaluate model, and optionally compute model's BLEU score.
**Step vs. Epoch vs. Iteration**
Steps and epochs are canonical terms used in TensorFlow and general machine
learning. They are used to describe running a single process (train/eval):
- Step refers to running the process through a single or batch of examples.
- Epoch refers to running the process through an entire dataset.
E.g. training a dataset with 100 examples. The dataset is
divided into 20 batches with 5 examples per batch. A single training step
trains the model on one batch. After 20 training steps, the model will have
trained on every batch in the dataset, or, in other words, one epoch.
Meanwhile, iteration is used in this implementation to describe running
multiple processes (training and eval).
- A single iteration:
1. trains the model for a specific number of steps or epochs.
2. evaluates the model.
3. (if source and ref files are provided) compute BLEU score.
This function runs through multiple train+eval+bleu iterations.
Args:
estimator: tf.Estimator containing model to train.
schedule_manager: A schedule.Manager object to guide the run loop.
train_hooks: List of hooks to pass to the estimator during training.
benchmark_logger: a BenchmarkLogger object that logs evaluation data
bleu_source: File containing text to be translated for BLEU calculation.
bleu_ref: File containing reference translations for BLEU calculation.
bleu_threshold: minimum BLEU score before training is stopped.
vocab_file_path: Path to vocabulary file used to subtokenize bleu_source.
"""
evaluate_bleu = bleu_source is not None and bleu_ref is not None
if evaluate_bleu and schedule_manager.use_tpu:
raise ValueError("BLEU score can not be computed when training with a TPU, "
"as it requires estimator.predict which is not yet "
"supported.")
# Print details of training schedule.
tf.logging.info("Training schedule:")
tf.logging.info(
"\t1. Train for {}".format(schedule_manager.train_increment_str))
tf.logging.info("\t2. Evaluate model.")
if evaluate_bleu:
tf.logging.info("\t3. Compute BLEU score.")
if bleu_threshold is not None:
tf.logging.info("Repeat above steps until the BLEU score reaches %f" %
bleu_threshold)
if not evaluate_bleu or bleu_threshold is None:
tf.logging.info("Repeat above steps %d times." %
schedule_manager.train_eval_iterations)
if evaluate_bleu:
# Create summary writer to log bleu score (values can be displayed in
# Tensorboard).
bleu_writer = tf.summary.FileWriter(
os.path.join(estimator.model_dir, BLEU_DIR))
if bleu_threshold is not None:
# Change loop stopping condition if bleu_threshold is defined.
schedule_manager.train_eval_iterations = INF
# Loop training/evaluation/bleu cycles
for i in xrange(schedule_manager.train_eval_iterations):
tf.logging.info("Starting iteration %d" % (i + 1))
# Train the model for single_iteration_train_steps or until the input fn
# runs out of examples (if single_iteration_train_steps is None).
estimator.train(
dataset.train_input_fn,
steps=schedule_manager.single_iteration_train_steps,
hooks=train_hooks)
eval_results = estimator.evaluate(
input_fn=dataset.eval_input_fn,
steps=schedule_manager.single_iteration_eval_steps)
tf.logging.info("Evaluation results (iter %d/%d):" %
(i + 1, schedule_manager.train_eval_iterations))
tf.logging.info(eval_results)
benchmark_logger.log_evaluation_result(eval_results)
# The results from estimator.evaluate() are measured on an approximate
# translation, which utilize the target golden values provided. The actual
# bleu score must be computed using the estimator.predict() path, which
# outputs translations that are not based on golden values. The translations
# are compared to reference file to get the actual bleu score.
if evaluate_bleu:
uncased_score, cased_score = evaluate_and_log_bleu(
estimator, bleu_source, bleu_ref, vocab_file_path)
# Write actual bleu scores using summary writer and benchmark logger
global_step = get_global_step(estimator)
summary = tf.Summary(value=[
tf.Summary.Value(tag="bleu/uncased", simple_value=uncased_score),
tf.Summary.Value(tag="bleu/cased", simple_value=cased_score),
])
bleu_writer.add_summary(summary, global_step)
bleu_writer.flush()
benchmark_logger.log_metric(
"bleu_uncased", uncased_score, global_step=global_step)
benchmark_logger.log_metric(
"bleu_cased", cased_score, global_step=global_step)
# Stop training if bleu stopping threshold is met.
if model_helpers.past_stop_threshold(bleu_threshold, uncased_score):
bleu_writer.close()
break
def define_transformer_flags():
"""Add flags and flag validators for running transformer_main."""
# Add common flags (data_dir, model_dir, train_epochs, etc.).
flags_core.define_base(multi_gpu=False, num_gpu=False, export_dir=False)
flags_core.define_performance(
num_parallel_calls=True,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=False
)
flags_core.define_benchmark()
flags_core.define_device(tpu=True)
# Set flags from the flags_core module as "key flags" so they're listed when
# the '-h' flag is used. Without this line, the flags defined above are
# only shown in the full `--helpful` help text.
flags.adopt_module_key_flags(flags_core)
# Add transformer-specific flags
flags.DEFINE_enum(
name="param_set", short_name="mp", default="big",
enum_values=["base", "big", "tiny"],
help=flags_core.help_wrap(
"Parameter set to use when creating and training the model. The "
"parameters define the input shape (batch size and max length), "
"model configuration (size of embedding, # of hidden layers, etc.), "
"and various other settings. The big parameter set increases the "
"default batch size, embedding/hidden size, and filter size. For a "
"complete list of parameters, please see model/model_params.py."))
flags.DEFINE_bool(
name="static_batch", default=False,
help=flags_core.help_wrap(
"Whether the batches in the dataset should have static shapes. In "
"general, this setting should be False. Dynamic shapes allow the "
"inputs to be grouped so that the number of padding tokens is "
"minimized, and helps model training. In cases where the input shape "
"must be static (e.g. running on TPU), this setting will be ignored "
"and static batching will always be used."))
# Flags for training with steps (may be used for debugging)
flags.DEFINE_integer(
name="train_steps", short_name="ts", default=None,
help=flags_core.help_wrap("The number of steps used to train."))
flags.DEFINE_integer(
name="steps_between_evals", short_name="sbe", default=1000,
help=flags_core.help_wrap(
"The Number of training steps to run between evaluations. This is "
"used if --train_steps is defined."))
# BLEU score computation
flags.DEFINE_string(
name="bleu_source", short_name="bls", default=None,
help=flags_core.help_wrap(
"Path to source file containing text translate when calculating the "
"official BLEU score. --bleu_source, --bleu_ref, and --vocab_file "
"must be set. Use the flag --stop_threshold to stop the script based "
"on the uncased BLEU score."))
flags.DEFINE_string(
name="bleu_ref", short_name="blr", default=None,
help=flags_core.help_wrap(
"Path to source file containing text translate when calculating the "
"official BLEU score. --bleu_source, --bleu_ref, and --vocab_file "
"must be set. Use the flag --stop_threshold to stop the script based "
"on the uncased BLEU score."))
flags.DEFINE_string(
name="vocab_file", short_name="vf", default=VOCAB_FILE,
help=flags_core.help_wrap(
"Name of vocabulary file containing subtokens for subtokenizing the "
"bleu_source file. This file is expected to be in the directory "
"defined by --data_dir."))
flags_core.set_defaults(data_dir="/tmp/translate_ende",
model_dir="/tmp/transformer_model",
batch_size=None,
train_epochs=None)
@flags.multi_flags_validator(
["train_epochs", "train_steps"],
message="Both --train_steps and --train_epochs were set. Only one may be "
"defined.")
def _check_train_limits(flag_dict):
return flag_dict["train_epochs"] is None or flag_dict["train_steps"] is None
@flags.multi_flags_validator(
["data_dir", "bleu_source", "bleu_ref", "vocab_file"],
message="--bleu_source, --bleu_ref, and/or --vocab_file don't exist. "
"Please ensure that the file paths are correct.")
def _check_bleu_files(flags_dict):
"""Validate files when bleu_source and bleu_ref are defined."""
if flags_dict["bleu_source"] is None or flags_dict["bleu_ref"] is None:
return True
# Ensure that bleu_source, bleu_ref, and vocab files exist.
vocab_file_path = os.path.join(
flags_dict["data_dir"], flags_dict["vocab_file"])
return all([
tf.gfile.Exists(flags_dict["bleu_source"]),
tf.gfile.Exists(flags_dict["bleu_ref"]),
tf.gfile.Exists(vocab_file_path)])
flags_core.require_cloud_storage(["data_dir", "model_dir"])
def construct_estimator(flags_obj, params, schedule_manager):
"""Construct an estimator from either Estimator or TPUEstimator.
Args:
flags_obj: The FLAGS object parsed from command line.
params: A dict of run specific parameters.
schedule_manager: A schedule.Manager object containing the run schedule.
Returns:
An estimator object to be used for training and eval.
"""
if not params["use_tpu"]:
return tf.estimator.Estimator(
model_fn=model_fn, model_dir=flags_obj.model_dir, params=params)
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu=flags_obj.tpu,
zone=flags_obj.tpu_zone,
project=flags_obj.tpu_gcp_project
)
tpu_config = tf.contrib.tpu.TPUConfig(
iterations_per_loop=schedule_manager.single_iteration_train_steps,
num_shards=flags_obj.num_tpu_shards)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=flags_obj.model_dir,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=True),
tpu_config=tpu_config)
return tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=params["use_tpu"] and flags_obj.tpu != tpu_util.LOCAL,
train_batch_size=schedule_manager.batch_size,
eval_batch_size=schedule_manager.batch_size,
params={
# TPUEstimator needs to populate batch_size itself due to sharding.
key: value for key, value in params.items() if key != "batch_size"},
config=run_config)
def run_transformer(flags_obj):
"""Create tf.Estimator to train and evaluate transformer model.
Args:
flags_obj: Object containing parsed flag values.
"""
# Add flag-defined parameters to params object
params = PARAMS_MAP[flags_obj.param_set]
params["data_dir"] = flags_obj.data_dir
params["model_dir"] = flags_obj.model_dir
params["num_parallel_calls"] = flags_obj.num_parallel_calls
params["tpu"] = flags_obj.tpu
params["use_tpu"] = bool(flags_obj.tpu) # was a tpu specified.
params["batch_size"] = flags_obj.batch_size or (
params["default_batch_size_tpu"] if params["use_tpu"]
else params["default_batch_size"])
params["static_batch"] = flags_obj.static_batch or params["use_tpu"]
params["allow_ffn_pad"] = not params["use_tpu"]
schedule_manager = schedule.Manager(
train_steps=flags_obj.train_steps,
steps_between_evals=flags_obj.steps_between_evals,
train_epochs=flags_obj.train_epochs,
epochs_between_evals=flags_obj.epochs_between_evals,
default_train_epochs=DEFAULT_TRAIN_EPOCHS,
batch_size=params["batch_size"],
max_length=params["max_length"],
use_tpu=params["use_tpu"],
num_tpu_shards=flags_obj.num_tpu_shards
)
params["repeat_dataset"] = schedule_manager.repeat_dataset
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks,
tensors_to_log=TENSORS_TO_LOG, # used for logging hooks
batch_size=schedule_manager.batch_size, # for ExamplesPerSecondHook
use_tpu=params["use_tpu"] # Not all hooks can run with TPUs
)
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="transformer",
dataset_name="wmt_translate_ende",
run_params=params,
test_id=flags_obj.benchmark_test_id)
# Train and evaluate transformer model
estimator = construct_estimator(flags_obj, params, schedule_manager)
run_loop(
estimator=estimator,
# Training arguments
schedule_manager=schedule_manager,
train_hooks=train_hooks,
benchmark_logger=benchmark_logger,
# BLEU calculation arguments
bleu_source=flags_obj.bleu_source,
bleu_ref=flags_obj.bleu_ref,
bleu_threshold=flags_obj.stop_threshold,
vocab_file_path=os.path.join(flags_obj.data_dir, flags_obj.vocab_file))
def main(_):
with logger.benchmark_context(flags.FLAGS):
run_transformer(flags.FLAGS)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_transformer_flags()
absl_app.run(main)
| [
"yuanxiaokun@bbktel.com"
] | yuanxiaokun@bbktel.com |
6f08e80ed6c86615f0e0953c321c153964153361 | f450ed5c70c0e6a9df0b78ed92b7823ec61256e7 | /src/init.py | 840958dce68aaf75f6f86a57ae3a60aa99c50c06 | [
"MIT"
] | permissive | thakreyn/drive-sink | 5508d371fc68ae9faa500cdc3cd481ccdee3ed90 | 0b2674f23e4ece7273c32112478ec0a24befd287 | refs/heads/main | 2023-07-18T09:24:13.777790 | 2021-09-08T20:15:10 | 2021-09-08T20:15:10 | 400,298,504 | 19 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,080 | py | """
init.py:
Responsible for setting up the directory for synchronisation and setting up
the user profile.
- Checks the directory if already initialised.
- Setups up directory structure
- Asks user data
- Sets up user Files and preferences
- Confirms creation and prints help message
"""
import os
import shutil
from datetime import datetime
from termcolor import colored
import configparser
from . import scan as user_scan
from . import drive as user_drive
CURRENT_LOCATION = os.getcwd()
def check_pre_init():
""" Checks if folder has been inititalised for sync and returns bool
True -> Already init
False -> Not init
"""
# filename = CURRENT_LOCATION + "\.sink"
filename = os.path.join(CURRENT_LOCATION, '.sink')
if os.path.exists(filename):
return True
return False
def generate_config_file():
"""
Generates the initial config.ini file for the user
File contains the following data :
2 sections -> general, user
"""
config = configparser.ConfigParser()
# General config details
config['general'] = {
"root" : CURRENT_LOCATION,
"drive_status" : False,
"populated" : False
}
# User config details
config['user'] = {
"folder_name" : CURRENT_LOCATION,
"folder_id" : ""
}
path = os.path.join(os.getcwd(), '.sink', 'config', 'config.ini')
with open(path, "w") as configfile:
config.write(configfile)
# Also available in utility.py
def read_config_file(section = "general", attr = "root"):
""" Returns the mentioned attr from a given section
(Default: returns the init directory)
"""
config = configparser.ConfigParser()
path = os.path.join(os.getcwd(), '.sink', 'config', 'config.ini')
config.read(path)
return config[section][attr]
def edit_config_file(section, attr, new_attr):
""" Edits the mentioned section and attr in the config.ini """
edit = configparser.ConfigParser()
# edit.read(read_config_file() + "/.sink/config/config.ini")
edit.read(os.path.join(read_config_file() , '.sink', 'config', 'config.ini'))
edit_section = edit[section]
edit_section[attr] = new_attr
with open( os.path.join(read_config_file() , '.sink', 'config', 'config.ini') , "w") as configfile:
edit.write(configfile)
def main_init_process():
"""
Main initialisation routine
Init steps:
1. Establish '.sink' directory
2. Create subfolders (log, config, meta)
3. Generate config file
4. Generate ignore file
5. Generate log files (usage, commit)
6. Complete first scan and write to metadata
7. Establish the drive-sink directory in users folder
"""
if not check_pre_init():
print("Initialising at : " + CURRENT_LOCATION)
directory = ".sink"
path = os.path.join(CURRENT_LOCATION, directory)
os.mkdir(path)
subdirectories = ["log", "config", "meta"]
# Create mentioned subdirectories
for subdirectory in subdirectories:
path = os.path.join(CURRENT_LOCATION , ".sink" , subdirectory)
os.mkdir(path)
# Check if drive-sink is available in users directory, else -> initialise it (with name .drive-sink)
user_folder_path = os.path.expanduser("~")
user_folder_path = os.path.join(user_folder_path, ".drive-sink")
if not os.path.exists(user_folder_path):
os.mkdir(user_folder_path)
# config file
generate_config_file()
# ignore files
with open(os.path.join('.', '.sink', 'ignore.txt'), "w+") as file:
text = "!__pycache__\n!.sink\n!sink\ncredentials.json\ntoken.json"
file.write(text)
# usage log
with open(os.path.join('.','.sink','log','usage.log'), "w+") as file:
time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
log_message = f"[{time}] : Initialised Folder at -> {CURRENT_LOCATION}"
file.write(log_message)
# commit log
with open(os.path.join('.', '.sink', 'log', 'commit.log'), "w+") as file:
time = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
log_message = f"[{time}] : Initialised Folder at -> {CURRENT_LOCATION}"
file.write(log_message)
# Check if credentials already exist, then inform the user about the file being used
# and give option to use local credentials file
if os.path.exists(os.path.join(user_folder_path, 'credentials.json')):
print(f"\nCredentials.json already found at global location {colored(user_folder_path, 'green')} using it by default.")
print(f"""
{colored("Folder has been successfully initialised at " + CURRENT_LOCATION, 'green')}
Run command:
'{colored("sink initdrive", 'green')}' to enable the drive and verify.
(optional) If you want to use local credentials, please copy 'credentials.json' to '.sink/config'
and then run `sink initdrive`.
If you don't have a credentials.json file, see documentation for instructions to generate one.
If this directory was initialised by mistake, use 'sink clean' to cancel.
""")
else:
print(f"""
{colored("Folder has been successfully initialised at " + CURRENT_LOCATION, 'green')}
{colored("No global 'credentials.json' found!", 'red')}
Please copy 'credentials.json' to '{user_folder_path}' for global access or
to '.sink/config' if you want to use different local credentials.
Then run :
'{colored("sink initdrive", 'green')}' to enable drive and verify.
If you don't have a credentials.json file, see documentation for instructions to generate one.
If this directory was initialised by mistake, use 'sink clean' to cancel.
""")
else:
print(colored("[Error] : A folder has already been initilised here !", 'red'))
def clean_setup():
"""
Completely deletes the sink directory with all config files and
option to delete the drive folder as well
"""
if check_pre_init():
location = read_config_file()
dir = ".sink"
path = os.path.join(location, dir)
if input("Do you want to delete drive folder as well ? (y/n) : ").lower() == 'y':
if read_config_file("general", "drive_status") == 'True':
mydrive = user_drive.MyDrive()
root_id = read_config_file("user", "folder_id")
mydrive.delete_file(root_id)
shutil.rmtree(path)
print(colored("Successfully deleted and cleaned the setup",'green'))
else:
print("No directory found to clean!!") | [
"yash.nthakre@gmail.com"
] | yash.nthakre@gmail.com |
0ea743b25376fd94f0a2b9297d804aee3562820d | b44adadcc087f86d523042084b5d10f612a11365 | /src/combat.py | 4cf27440b47b62817cb31df96fa5dc3259ad0865 | [] | no_license | e-stan/covid_19_analysis | f1cd3e50d14cf0880d7266e768586cac428e31de | 55ca37c2bff68b170d97f8aa2dd588f41af44987 | refs/heads/master | 2023-06-09T18:49:47.830901 | 2021-06-29T17:28:05 | 2021-06-29T17:28:05 | 297,370,604 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,919 | py | import pandas as pd
import patsy
import sys
import numpy.linalg as la
import numpy as np
def adjust_nums(numerical_covariates, drop_idxs):
# if we dropped some values, have to adjust those with a larger index.
if numerical_covariates is None: return drop_idxs
return [nc - sum(nc < di for di in drop_idxs) for nc in numerical_covariates]
def design_mat(mod, numerical_covariates, batch_levels):
# require levels to make sure they are in the same order as we use in the
# rest of the script.
design = patsy.dmatrix("~ 0 + C(batch, levels=%s)" % str(batch_levels),
mod, return_type="dataframe")
mod = mod.drop(["batch"], axis=1)
numerical_covariates = list(numerical_covariates)
sys.stderr.write("found %i batches\n" % design.shape[1])
other_cols = [c for i, c in enumerate(mod.columns)
if not i in numerical_covariates]
factor_matrix = mod[other_cols]
design = pd.concat((design, factor_matrix), axis=1)
if numerical_covariates is not None:
sys.stderr.write("found %i numerical covariates...\n"
% len(numerical_covariates))
for i, nC in enumerate(numerical_covariates):
cname = mod.columns[nC]
sys.stderr.write("\t{0}\n".format(cname))
design[cname] = mod[mod.columns[nC]]
sys.stderr.write("found %i categorical variables:" % len(other_cols))
sys.stderr.write("\t" + ", ".join(other_cols) + '\n')
return design
def combat(data, batch, model=None, numerical_covariates=None):
"""Correct for batch effects in a dataset
Parameters
----------
data : pandas.DataFrame
A (n_features, n_samples) dataframe of the expression or methylation
data to batch correct
batch : pandas.Series
A column corresponding to the batches in the data, with index same as
the columns that appear in ``data``
model : patsy.design_info.DesignMatrix, optional
A model matrix describing metadata on the samples which could be
causing batch effects. If not provided, then will attempt to coarsely
correct just from the information provided in ``batch``
numerical_covariates : list-like
List of covariates in the model which are numerical, rather than
categorical
Returns
-------
corrected : pandas.DataFrame
A (n_features, n_samples) dataframe of the batch-corrected data
"""
if isinstance(numerical_covariates, str):
numerical_covariates = [numerical_covariates]
if numerical_covariates is None:
numerical_covariates = []
if model is not None and isinstance(model, pd.DataFrame):
model["batch"] = list(batch)
else:
model = pd.DataFrame({'batch': batch})
batch_items = model.groupby("batch").groups.items()
batch_levels = [k for k, v in batch_items]
batch_info = [v for k, v in batch_items]
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# drop intercept
drop_cols = [cname for cname, inter in ((model == 1).all()).iteritems() if inter == True]
drop_idxs = [list(model.columns).index(cdrop) for cdrop in drop_cols]
model = model[[c for c in model.columns if not c in drop_cols]]
numerical_covariates = [list(model.columns).index(c) if isinstance(c, str) else c
for c in numerical_covariates if not c in drop_cols]
design = design_mat(model, numerical_covariates, batch_levels)
sys.stderr.write("Standardizing Data across genes.\n")
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch,:])
var_pooled = np.dot(((data - np.dot(design, B_hat).T)**2), np.ones((int(n_array), 1)) / int(n_array))
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array))))
tmp = np.array(design.copy())
tmp[:,:n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
s_data = ((data - stand_mean) / np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array)))))
sys.stderr.write("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T)
delta_hat = []
for i, batch_idxs in enumerate(batch_info):
#batches = [list(model.columns).index(b) for b in batches]
delta_hat.append(s_data[batch_idxs].var(axis=1))
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
a_prior = list(map(aprior, delta_hat))
b_prior = list(map(bprior, delta_hat))
sys.stderr.write("Finding parametric adjustments\n")
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
#print '18 20 22 28 29 31 32 33 35 40 46'
#print batch_info[batch_id]
temp = it_sol(s_data[batch_idxs], gamma_hat[i],
delta_hat[i], gamma_bar[i], t2[i], a_prior[i], b_prior[i])
gamma_star.append(temp[0])
delta_star.append(temp[1])
sys.stdout.write("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
for j, batch_idxs in enumerate(batch_info):
dsq = np.sqrt(delta_star[j,:])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
return bayesdata
def it_sol(sdat, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001):
n = (1 - np.isnan(sdat)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
while change > conv:
#print g_hat.shape, g_bar.shape, t2.shape
g_new = postmean(g_hat, g_bar, n, d_old, t2)
sum2 = ((sdat - np.dot(g_new.values.reshape((g_new.shape[0], 1)), np.ones((1, sdat.shape[1])))) ** 2).sum(axis=1)
d_new = postvar(sum2, n, a, b)
change = max((abs(g_new - g_old) / g_old).max(), (abs(d_new - d_old) / d_old).max())
g_old = g_new #.copy()
d_old = d_new #.copy()
count = count + 1
adjust = (g_new, d_new)
return adjust
def aprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (2 * s2 +m**2) / s2
def bprior(gamma_hat):
m = gamma_hat.mean()
s2 = gamma_hat.var()
return (m*s2+m**3)/s2
def postmean(g_hat, g_bar, n, d_star, t2):
return (t2*n*g_hat+d_star * g_bar) / (t2*n+d_star)
def postvar(sum2, n, a, b):
return (0.5 * sum2 + b) / (n / 2.0 + a - 1.0) | [
"estancl1234@gmail.com"
] | estancl1234@gmail.com |
e085ceeb417ebc929fd54fd1c1667da85a497a9a | a25c8c2789750a0b95c2af9b27dde72a8c49b395 | /test/functional/xaya_trading.py | 78a629c3e5ee1521232038231d9d54e9c0aa53fb | [
"MIT"
] | permissive | gripen89/xaya | 65d38dd1cad6a7c21addea51cb4b697fa424fb46 | db0cb3601d9eff01e35ebd4a764aa7ff859e61be | refs/heads/master | 2022-11-21T22:47:45.072342 | 2019-10-21T23:26:24 | 2019-10-21T23:26:24 | 216,678,870 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,237 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests trading with atomic name updates."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
)
from test_framework.script import (
CScript,
OP_2DROP,
OP_DROP,
OP_NAME_UPDATE,
)
from test_framework.util import (
assert_equal,
assert_greater_than,
hex_str_to_bytes,
)
from decimal import Decimal
import io
import json
# The fee paid for example transactions.
FEE = Decimal ('0.01')
class AtomicTradingTest (BitcoinTestFramework):
def set_test_params (self):
self.setup_clean_chain = True
self.num_nodes = 2
def generate (self, n, ind = 0):
"""
Mines n blocks with rewards sent to an address that is in the wallet
of none of the test nodes. This ensures that balances are stable and
not changing except through the test.
"""
addr = "chirt1qcmdxwpu35mqlzxz3alc9u9ztp22edsuc5s7zzk"
self.nodes[ind].generatetoaddress (n, addr)
def buildTxOut (self, addr, amount):
"""
Builds a CTxOut message that sends the given amount of CHI to the
given address.
"""
addrData = self.nodes[0].validateaddress (addr)
addrScript = hex_str_to_bytes (addrData["scriptPubKey"])
return CTxOut (int (amount * COIN), addrScript)
def buildNameUpdate (self, name, value, addr, amount):
"""
Builds a name_update output with the given data.
"""
addrData = self.nodes[0].validateaddress (addr)
addrScript = hex_str_to_bytes (addrData["scriptPubKey"])
bname = name.encode ("utf-8")
bvalue = value.encode ("utf-8")
nameScript = CScript ([OP_NAME_UPDATE, bname, bvalue, OP_2DROP, OP_DROP])
# Adding two CScript instances together pushes the second operand
# as data, rather than simply concatenating the scripts. Thus we do
# the concatenation as raw bytes.
nameScriptBytes = bytes (nameScript)
return CTxOut (int (amount * COIN), nameScriptBytes + addrScript)
def findOutput (self, node, amount):
"""
Finds an unspent output in the given node with at least the required
amount. Returns the matching COutPoint as well as its value.
"""
for u in node.listunspent ():
if u["amount"] >= amount:
outp = COutPoint (int (u["txid"], 16), u["vout"])
return outp, Decimal (u["amount"])
raise AssertionError ("No output found with value >= %.8f" % amount)
def parseHexTx (self, txHex):
"""
Converts a transaction in hex format to a CTransaction instance.
"""
data = hex_str_to_bytes (txHex)
tx = CTransaction ()
tx.deserialize (io.BytesIO (data))
return tx
def getBalances (self):
"""
Returns an array with the balances of both nodes.
"""
return [self.nodes[i].getbalance () for i in range (2)]
def assertBalanceChange (self, before, changes):
"""
Asserts that the balances of the nodes have changed compared to
the values of "before" in the given amount.
"""
after = self.getBalances ()
assert_equal (len (before), len (changes))
assert_equal (after, [before[i] + changes[i] for i in range (len (before))])
def getTxFee (self, node, txid):
"""
Computes the paid transaction fee in the given tx. All inputs to the
transaction must be in the node's wallet.
"""
txHex = node.gettransaction (txid)["hex"]
data = node.decoderawtransaction (txHex)
inSum = Decimal ('0.00000000')
for vin in data["vin"]:
prevTxHex = node.gettransaction (vin["txid"])["hex"]
prevTx = node.decoderawtransaction (prevTxHex)
inSum += Decimal (prevTx["vout"][vin["vout"]]["value"])
outSum = Decimal ('0.00000000')
for vout in data["vout"]:
outSum += Decimal (vout["value"])
assert_greater_than (inSum, outSum)
return inSum - outSum
def buildBid (self, node, name, value, price):
"""
Builds a partially signed "bid" offer for updating the name to the
given value and paying the given price for that. The node is used as
the bidder (i.e. the price is funded from it).
The partially signed bid transaction is returned as hex string.
"""
nameData = node.name_show (name)
addr = nameData["address"]
namePrevOut = node.gettxout (nameData["txid"], nameData["vout"])
assert_equal (namePrevOut["scriptPubKey"]["addresses"], [addr])
nameValue = namePrevOut["value"]
tx = CTransaction ()
nameOut = COutPoint (int (nameData["txid"], 16), nameData["vout"])
tx.vin.append (CTxIn (nameOut))
tx.vout.append (self.buildNameUpdate (name, value, addr, nameValue))
tx.vout.append (self.buildTxOut (addr, price))
inp, inValue = self.findOutput (node, price)
tx.vin.append (CTxIn (inp))
change = inValue - price - FEE
assert_greater_than (change, 0)
changeAddr = node.getnewaddress ()
tx.vout.append (self.buildTxOut (changeAddr, change))
txHex = tx.serialize ().hex ()
signed = node.signrawtransactionwithwallet (txHex)
assert not signed["complete"]
return signed["hex"]
def buildAsk (self, node, name, value, price):
"""
Builds a partially signed "ask" offer for updating the name as given.
The problem with prebuilt asks is that the seller does not know
which inputs the buyer uses to pay. This is solved by signing the
name input with SINGLE|ANYONECANPAY and sending the ask price
*into the name*. (It can be recovered later, as the only requirement
for the locked amount is that it always stays >= 0.01 CHI.)
The node is the seller, who owns the name.
Note that this type of order is rather useless for most real-world
situations of trading game assets (since the name value would need to
contain a transfer of assets to the seller, which is not known yet).
There may still be some situations where it can be useful, but it is
mainly interesting since the same method can be applied for
"sentinel inputs" as well; the only difference there is that the
input/output pair created does not involve any names at all.
"""
nameData = node.name_show (name)
namePrevOut = node.gettxout (nameData["txid"], nameData["vout"])
nameValue = namePrevOut["value"]
addr = node.getnewaddress ()
tx = CTransaction ()
nameOut = COutPoint (int (nameData["txid"], 16), nameData["vout"])
tx.vin.append (CTxIn (nameOut))
tx.vout.append (self.buildNameUpdate (name, value, addr, nameValue + price))
txHex = tx.serialize ().hex ()
signed = node.signrawtransactionwithwallet (txHex, [],
"SINGLE|ANYONECANPAY")
assert signed["complete"]
return signed["hex"]
def run_test (self):
# Mine initial blocks so that both nodes have matured coins and no
# more are mined for them in the future (so we can check balances).
self.nodes[0].generate (10)
self.nodes[1].generate (10)
self.generate (110, ind=0)
# Register a name for testing.
self.nodes[0].name_register ("p/test", "{}")
self.generate (1, ind=0)
# Make sure everything is as expected.
self.sync_blocks ()
for node in self.nodes:
info = node.getwalletinfo ()
assert_equal (info["immature_balance"], 0)
# Run individual tests.
self.testBidOffer ()
self.testAskOffer ()
def testBidOffer (self):
self.log.info ("Testing trading by taking a bid offer...")
# Build the bid transaction.
name = "p/test"
newValue = json.dumps ({"data": "bid taken"})
bid = self.buildBid (self.nodes[1], name, newValue, 10)
# The seller must not change the name-update value (this will invalidate
# the signature on the bid).
wrongValue = json.dumps ({"data": "wrong"})
addr = self.nodes[0].getnewaddress ()
tx = self.parseHexTx (bid)
tx.vout[0] = self.buildNameUpdate (name, wrongValue, addr, 0.01)
txHex = tx.serialize ().hex ()
signed = self.nodes[0].signrawtransactionwithwallet (txHex)
assert not signed["complete"]
# The seller also must not change the amount he gets.
tx = self.parseHexTx (bid)
tx.vout[1].nValue = 20 * COIN
txHex = tx.serialize ().hex ()
signed = self.nodes[0].signrawtransactionwithwallet (txHex)
assert not signed["complete"]
# Take the bid successfully and verify the expected changes.
signed = self.nodes[0].signrawtransactionwithwallet (bid)
assert signed["complete"]
oldValue = self.nodes[0].name_show (name)["value"]
assert oldValue != newValue
before = self.getBalances ()
self.nodes[0].sendrawtransaction (signed["hex"])
self.generate (1)
self.sync_blocks ()
self.assertBalanceChange (before, [10, -10 - FEE])
nameData = self.nodes[0].name_show (name)
assert nameData["ismine"]
assert_equal (nameData["value"], newValue)
def testAskOffer (self):
self.log.info ("Testing trading by taking an ask offer...")
# Build the ask transaction.
price = 10
name = "p/test"
newValue = json.dumps ({"data": "ask taken"})
ask = self.buildAsk (self.nodes[0], name, newValue, price)
# Complete it by funding properly.
tx = self.parseHexTx (ask)
inp, inValue = self.findOutput (self.nodes[1], price)
tx.vin.append (CTxIn (inp))
change = inValue - price - FEE
assert_greater_than (change, 0)
changeAddr = self.nodes[1].getnewaddress ()
tx.vout.append (self.buildTxOut (changeAddr, change))
ask = tx.serialize ().hex ()
# The transaction should be invalid if the amount received by the seller
# is changed.
tx = self.parseHexTx (ask)
tx.vout[0].nValue = COIN
txHex = tx.serialize ().hex ()
signed = self.nodes[1].signrawtransactionwithwallet (txHex)
assert not signed["complete"]
# The transaction should be invalid if the name-output script is changed
# to something else.
wrongValue = json.dumps ({"data": "wrong"})
addr = self.nodes[0].getnewaddress ()
tx = self.parseHexTx (ask)
tx.vout[0] = self.buildNameUpdate (name, wrongValue, addr, 10.01)
txHex = tx.serialize ().hex ()
signed = self.nodes[1].signrawtransactionwithwallet (txHex)
assert not signed["complete"]
# Take the ask successfully.
signed = self.nodes[1].signrawtransactionwithwallet (ask)
assert signed["complete"]
oldValue = self.nodes[0].name_show (name)["value"]
assert oldValue != newValue
before = self.getBalances ()
self.nodes[0].sendrawtransaction (signed["hex"])
self.generate (1)
self.sync_blocks ()
nameData = self.nodes[0].name_show (name)
assert nameData["ismine"]
assert_equal (nameData["value"], newValue)
# Recover the locked price and verify wallet balances.
txid = self.nodes[0].name_update (name, "{}")
self.generate (1, ind=0)
feeUpdate = self.getTxFee (self.nodes[0], txid)
assert_greater_than (0.001, feeUpdate)
self.assertBalanceChange (before, [10 - feeUpdate, -10 - FEE])
if __name__ == '__main__':
AtomicTradingTest ().main ()
| [
"d@domob.eu"
] | d@domob.eu |
f0d7d985d89cc20f937eb52a76241d951d86e384 | b8a18888062bf7253a1c32761e210bbc4875b62d | /U009_Organized_Window.py | 8111ec4c4aea1891a18a70b924ccee3a209af203 | [] | no_license | yudhastyawan/Tutorial-Python-PyQt5-Basic | 1c993847e2f7ee0e45ce3e33f561370fe778a2af | b1e412e493cc41f04cbcf3f18965f50c3d7f152a | refs/heads/master | 2020-03-18T13:42:50.692765 | 2018-11-03T13:26:37 | 2018-11-03T13:26:37 | 134,802,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import sys
import time
class Main(QMainWindow):
def __init__(self, parent = None):
super(Main, self).__init__(parent)
# self.setWindowFlag(Qt.WindowCloseButtonHint, False)
# self.setWindowFlag(Qt.WindowMinMaxButtonsHint, False)
# self.setWindowFlag(Qt.FramelessWindowHint,True)
if __name__ == '__main__':
App = QApplication(sys.argv)
# Create and display the splash screen
splash_pix = QPixmap('tux.png')
splash = QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
splash.show()
App.processEvents()
# Simulate something that takes time
time.sleep(2)
main = Main()
main.show()
splash.finish(main)
sys.exit(App.exec_()) | [
"39240883+yudhastyawan@users.noreply.github.com"
] | 39240883+yudhastyawan@users.noreply.github.com |
5798de8ec0cb78d787b255aa68313cfba3467723 | 10bad3cbbfb5c9105f045527711ca84c6bce0360 | /portal/Jobs/init.py | 4c01327fe3beacad01bb81909a1e5c84722a2549 | [] | no_license | alekhyamanomay/Timesheet_Portal | 9ede4bea74c45ea7263a9e0dbd7a46518a939f62 | 05140ca86c15a08ebb2439ec43cf31f23c72be7d | refs/heads/master | 2023-02-06T19:26:10.820401 | 2020-12-16T06:17:04 | 2020-12-16T06:17:04 | 312,593,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,123 | py | import os
import sys
import logging
from logging import Formatter
from logging.handlers import RotatingFileHandler
LOG = logging
def init_logger(app):
global LOG
log_file = os.path.join(app.config['LOG_DIR'], 'remainder.log')
log_level = logging.DEBUG
log_format = Formatter(f'%(asctime)s-%(levelname)s-%(message)s')
TWO_MEGABYTE = 2_000_000
file_handler = RotatingFileHandler(filename=log_file, maxBytes=TWO_MEGABYTE, backupCount=3)
file_handler.setFormatter(log_format)
app.logger.addHandler(file_handler)
app.logger.setLevel(log_level)
LOG = app.logger
LOG.info('Initialized logger with level %s', log_level)
print("working")
print(LOG)
basepath =os.path.abspath(os.path.join(os.getcwd(),"..\.."))
print(basepath)
sys.path.insert(1,basepath)
# create instance of flask app
from flask import Flask
app = Flask(__name__)
configfile = os.path.abspath(os.path.join(basepath,'config','development.py'))
app.config.from_pyfile(configfile)
# create instance of sql alchemy
import portal.models as models
models.init_app(app)
init_logger(app)
app.app_context().push()
| [
"57211933+unofficialfarooqsheikh@users.noreply.github.com"
] | 57211933+unofficialfarooqsheikh@users.noreply.github.com |
be507621c7e451be5c73f45c1b26ac6457984c09 | 041293fc11f0af8b8d7e3640f5f3a2d221ebf1e7 | /milestone3/sink.py | eaffbd025c6220f67a985e67d13480ec27006a78 | [] | no_license | cearto/pidgeot | a480000de1eaf8dae4a3b830e2348b1066e21c2a | 16343af7f5c27ab2da728c156691f44f5741e8f8 | refs/heads/master | 2020-05-17T23:39:01.227890 | 2013-06-04T12:20:02 | 2013-06-04T12:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,037 | py | # audiocom library: Source and sink functions
from common_srcsink import *
import Image
from graphs import *
import binascii
import random
import os
import heapq # for huffman tree
class Sink:
def __init__(self):
# no initialization required for sink
print 'Sink:'
def process(self, recd_bits):
# Process the recd_bits to form the original transmitted
# file.
# Here recd_bits is the array of bits that was
# passed on from the receiver. You can assume, that this
# array starts with the header bits (the preamble has
# been detected and removed). However, the length of
# this array could be arbitrary. Make sure you truncate
# it (based on the payload length as mentioned in
# header) before converting into a file.
# If its an image, save it as "rcd-image.png"
# If its a text, just print out the text
# Return the received payload for comparison purposes
[srctype, payload_length, padding] = self.read_type_size(recd_bits[:HEADER_GEN_LEN])
if srctype != SRCTYPE_MON:
stats = self.read_stat(recd_bits[HEADER_GEN_LEN:HEADER_GEN_LEN + HEADER_STATS_LEN])
rcd_payload = self.huffman_decode(stats, recd_bits[HEADER_GEN_LEN + HEADER_STATS_LEN:HEADER_GEN_LEN + HEADER_STATS_LEN + payload_length], padding)
else:
rcd_payload = recd_bits[HEADER_GEN_LEN - PADDING_BITS:HEADER_GEN_LEN - PADDING_BITS + payload_length]
print rcd_payload, len(rcd_payload)
print '\tRecd ', len(recd_bits) - HEADER_GEN_LEN, ' data bits'
if srctype == SRCTYPE_TXT:
print '\tText recd: ', self.bits2text(rcd_payload)
elif srctype == SRCTYPE_IMG:
self.image_from_bits(rcd_payload, "rcd-image.png")
return rcd_payload
def huffman_decode(self, stats, bits, padding):
# print "len of undecoded bits", len(bits)
mapping = huffman_reverse_lookup_table(stats)
# print "huffman_decode lookup table", mapping
decoded_str = ''
i = 0
while i < len(bits):
key = str(bits[i])
i = i + 1
while i < len(bits) and key not in mapping:
key = key + str(bits[i])
i = i + 1
decoded_str = decoded_str + mapping[key]
decoded_str = decoded_str[:len(decoded_str) - padding]
# print "huffman_decode bits, len", decoded_str, len(decoded_str)
decoded_bits = list(decoded_str)
decoded_bits = [int(b) for b in decoded_bits]
return decoded_bits
def bits2text(self, bits):
# Convert the received payload to text (string)
#array to binary string
text = ''.join(str(e) for e in bits)
#binary to hex
text = "%x" % int(text, 2)
#hex to ascii
text = binascii.unhexlify(text)
return text
def image_from_bits(self, bits,filename):
# Convert the received payload to an image and save it
# No return value required .
data = ''.join(str(e) for e in bits)
data = "%x" % int(data, 2)
imgSize = (32, 32)
data = binascii.unhexlify(data)
img = Image.fromstring('L', imgSize, data)
img.save(filename)
pass
def read_stat(self, ext_header):
stats = []
klist = []
generate_keys(klist)
for i in xrange(0, len(ext_header), STATSIZE):
freq_bits = ext_header[i:i+STATSIZE]
freq_str = str_from_arr(numpy.array(freq_bits))
freq = int(freq_str, 2)
if freq > 0:
tp = (freq, klist[i/STATSIZE])
stats.append(tp)
return stats
def read_type_size(self, header_bits):
# Given the header bits, compute the payload length
# and source type (compatible with get_header on source)
src_str = ''.join(map(str, header_bits[0:2]))
src_int = int(src_str, 2)
if src_int == SRCTYPE_MON:
srctype = SRCTYPE_MON
srctypestr = 'monotone'
elif src_int == SRCTYPE_IMG:
srctype = SRCTYPE_IMG
srctypestr = 'image'
elif src_int == SRCTYPE_TXT:
srctype = SRCTYPE_TXT
srctypestr = 'text'
else:
print "INVALID SRCTYPE"
payload_str = ''.join(map(str, header_bits[2:18]))
payload_length = int(payload_str, 2)
print '\tRecd header: ', header_bits
print '\tSource type: ', srctypestr
print '\tLength from header: ', payload_length
if src_int != SRCTYPE_MON:
padding_str = ''.join(map(str, header_bits[18:20]))
padding = int(padding_str, 2)
print '\tPadding: ', padding
else:
padding = 0
return srctype, payload_length, padding
| [
"itsmaxine@gmail.com"
] | itsmaxine@gmail.com |
8661ca4dd6a72591400f8f8aab510c0a5d703243 | 40dc3f774f264f3c2d41bbd0c9cf9e07459ee000 | /Code/Python/Factorial.py | 989f30fd257fce02853f3f02e67929ac074ac820 | [
"LicenseRef-scancode-free-unknown",
"MIT"
] | permissive | krishna-NIT/Algo-Tree | 8a5f66b5baef0b7b38a22c41fe090cc9754d8a67 | 1cdb2c3682c6ab85ae8af0b57f42697c15a10554 | refs/heads/main | 2023-05-08T23:39:00.964136 | 2021-05-31T04:07:24 | 2021-05-31T04:07:24 | 371,605,115 | 3 | 0 | MIT | 2021-05-28T06:38:42 | 2021-05-28T06:38:42 | null | UTF-8 | Python | false | false | 604 | py | '''
Factorial function instruct to multiply all whole number from input number down to 1.
The formula for n factorial can be defined as n! = n×(n−1)!
Factorial zero is defined as equal to 1
'''
#This is a recursive function to find the factorial of an integer
def factorial(num):
if num == 0:
return 1
else:
temp = factorial(num-1)
return num * temp
num = int(input("Input: "))
print('Output:',factorial(num))
'''
Test cases:
Input: 7
Output: 5040
Input: 0
Output: 1
Time complexity: O(n)
Space Complexity: O(1)
'''
| [
"noreply@github.com"
] | noreply@github.com |
5c5db34ac82587b820a9710f2e8318425496725b | e4b24fe980b1639a2dc73fe76fe38c1c920b8fc7 | /zoopy/models/plan.py | 7b8090836a21717951d8ff8d6a9580def6a6f0b5 | [] | no_license | andersonqueiroz/zoopy | 8f16e6d03974708af0c2726df9bae76ce5f35f00 | d43b14aa6814aec132622915f6c54cd0630ad090 | refs/heads/master | 2021-10-13T12:56:10.689381 | 2021-09-30T20:52:24 | 2021-09-30T20:52:24 | 239,834,336 | 1 | 1 | null | 2021-09-30T20:52:47 | 2020-02-11T18:27:57 | Python | UTF-8 | Python | false | false | 943 | py | from zoopy.utils import get, put, post, delete, get_marketplace_id
from zoopy.models import marketplace
BASE_MODEL_URL = '/plans'
def get_full_url():
return BASE_MODEL_URL
def list(params={}, is_beta=False):
url = f'{marketplace.get_full_url()}{BASE_MODEL_URL}'
return get(url, params=params, is_beta=is_beta)
def details(plan_id, is_beta=False):
url = f'{marketplace.get_full_url()}{BASE_MODEL_URL}/{plan_id}'
return get(url, is_beta=is_beta)
def create(params, is_beta=False):
url = f'{marketplace.get_full_url()}{get_full_url()}'
return post(end_point=url, data=params, is_beta=is_beta)
def update(plan_id, params, is_beta=False):
url = f'{marketplace.get_full_url()}{get_full_url()}/{plan_id}'
return put(end_point=url, data=params, is_beta=is_beta)
def remove(plan_id, is_beta=False):
url = f'{marketplace.get_full_url()}{BASE_MODEL_URL}/{plan_id}'
return delete(url, is_beta=is_beta)
| [
"andersonkeiroz@gmail.com"
] | andersonkeiroz@gmail.com |
792e20b7f15bf1b906999ff8891ac9c607365a3d | f22a8078d87b873235c458ed0593c6eb3e6807a2 | /apps/flujo/admin.py | 907fdb39ceca793901f847228b9fd5d8b1e230a9 | [] | no_license | nabilchamas/is2 | d5d9bb393facddcb0a68f8e9f96d08b76dc9cb35 | bd4e55661f7897d2294a27ce240c044192385102 | refs/heads/master | 2020-04-18T21:13:08.062527 | 2015-06-19T21:40:04 | 2015-06-19T21:40:04 | 33,631,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | from django.contrib import admin
from apps.flujo.models import Flujo, Actividad
admin.site.register(Flujo)
admin.site.register(Actividad)
| [
"nabilchamasi@gmail.com"
] | nabilchamasi@gmail.com |
9d2ecf89e34e3fcd68ce945948faae0d1b839664 | 1bc67a91d85a7106106ca31307ef9ee93f1d1a20 | /src/py/flwr/server/strategy/dpfedavg_fixed.py | 43dc2249736e6cde6e8488d6afaf39ac3b2c9ab4 | [
"Apache-2.0"
] | permissive | adap/flower | 4915d143c674eb675504d585e1e90ed06833812f | 55be690535e5f3feb33c888c3e4a586b7bdbf489 | refs/heads/main | 2023-08-17T01:18:12.168723 | 2023-08-16T17:17:48 | 2023-08-16T17:17:48 | 241,095,326 | 2,999 | 658 | Apache-2.0 | 2023-09-14T15:43:22 | 2020-02-17T11:51:29 | Python | UTF-8 | Python | false | false | 6,995 | py | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DP-FedAvg [McMahan et al., 2018] strategy.
Paper: arxiv.org/pdf/1710.06963.pdf
"""
from typing import Dict, List, Optional, Tuple, Union
from flwr.common import EvaluateIns, EvaluateRes, FitIns, FitRes, Parameters, Scalar
from flwr.common.dp import add_gaussian_noise
from flwr.common.parameter import ndarrays_to_parameters, parameters_to_ndarrays
from flwr.server.client_manager import ClientManager
from flwr.server.client_proxy import ClientProxy
from flwr.server.strategy.strategy import Strategy
class DPFedAvgFixed(Strategy):
"""Wrapper for configuring a Strategy for DP with Fixed Clipping."""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
strategy: Strategy,
num_sampled_clients: int,
clip_norm: float,
noise_multiplier: float = 1,
server_side_noising: bool = True,
) -> None:
super().__init__()
self.strategy = strategy
# Doing fixed-size subsampling as in https://arxiv.org/abs/1905.03871.
self.num_sampled_clients = num_sampled_clients
if clip_norm <= 0:
raise Exception("The clipping threshold should be a positive value.")
self.clip_norm = clip_norm
if noise_multiplier < 0:
raise Exception("The noise multiplier should be a non-negative value.")
self.noise_multiplier = noise_multiplier
self.server_side_noising = server_side_noising
def __repr__(self) -> str:
"""Compute a string representation of the strategy."""
rep = "Strategy with DP with Fixed Clipping enabled."
return rep
def _calc_client_noise_stddev(self) -> float:
return float(
self.noise_multiplier * self.clip_norm / (self.num_sampled_clients ** (0.5))
)
def initialize_parameters(
self, client_manager: ClientManager
) -> Optional[Parameters]:
"""Initialize global model parameters using given strategy."""
return self.strategy.initialize_parameters(client_manager)
def configure_fit(
self, server_round: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, FitIns]]:
"""Configure the next round of training incorporating Differential Privacy (DP).
Configuration of the next training round includes information related to DP,
such as clip norm and noise stddev.
Parameters
----------
server_round : int
The current round of federated learning.
parameters : Parameters
The current (global) model parameters.
client_manager : ClientManager
The client manager which holds all currently connected clients.
Returns
-------
fit_configuration : List[Tuple[ClientProxy, FitIns]]
A list of tuples. Each tuple in the list identifies a `ClientProxy` and the
`FitIns` for this particular `ClientProxy`. If a particular `ClientProxy`
is not included in this list, it means that this `ClientProxy`
will not participate in the next round of federated learning.
"""
additional_config = {"dpfedavg_clip_norm": self.clip_norm}
if not self.server_side_noising:
additional_config[
"dpfedavg_noise_stddev"
] = self._calc_client_noise_stddev()
client_instructions = self.strategy.configure_fit(
server_round, parameters, client_manager
)
for _, fit_ins in client_instructions:
fit_ins.config.update(additional_config)
return client_instructions
def configure_evaluate(
self, server_round: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, EvaluateIns]]:
"""Configure the next round of evaluation using the specified strategy.
Parameters
----------
server_round : int
The current round of federated learning.
parameters : Parameters
The current (global) model parameters.
client_manager : ClientManager
The client manager which holds all currently connected clients.
Returns
-------
evaluate_configuration : List[Tuple[ClientProxy, EvaluateIns]]
A list of tuples. Each tuple in the list identifies a `ClientProxy` and the
`EvaluateIns` for this particular `ClientProxy`. If a particular
`ClientProxy` is not included in this list, it means that this
`ClientProxy` will not participate in the next round of federated
evaluation.
"""
return self.strategy.configure_evaluate(
server_round, parameters, client_manager
)
def aggregate_fit(
self,
server_round: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],
) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
"""Aggregate training results using unweighted aggregation."""
if failures:
return None, {}
# Forcing unweighted aggregation, as in https://arxiv.org/abs/1905.03871.
for _, fit_res in results:
fit_res.num_examples = 1
fit_res.parameters = ndarrays_to_parameters(
add_gaussian_noise(
parameters_to_ndarrays(fit_res.parameters),
self._calc_client_noise_stddev(),
)
)
return self.strategy.aggregate_fit(server_round, results, failures)
def aggregate_evaluate(
self,
server_round: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]],
) -> Tuple[Optional[float], Dict[str, Scalar]]:
"""Aggregate evaluation losses using the given strategy."""
return self.strategy.aggregate_evaluate(server_round, results, failures)
def evaluate(
self, server_round: int, parameters: Parameters
) -> Optional[Tuple[float, Dict[str, Scalar]]]:
"""Evaluate model parameters using an evaluation function from the strategy."""
return self.strategy.evaluate(server_round, parameters)
| [
"noreply@github.com"
] | noreply@github.com |
51ef475926c1fe3bb2fb1c490a227bcaa3740d0b | 21bd66da295baa48603ca9f169d870792e9db110 | /cgp/utils/failwith.py | 3647d91543301dbab771107a4c9d604d07544190 | [] | no_license | kristto/cgptoolbox | e6c01ccea1da06e35e26ffbca227258023377e48 | 8bbaf462e9c1320f237dd3c1ae6d899e1d01ade7 | refs/heads/master | 2021-01-16T20:38:45.097722 | 2012-03-01T09:18:10 | 2012-03-01T09:18:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,964 | py | """Modify a function to return a default value in case of error."""
from functools import wraps
import logging
from contextlib import contextmanager
import numpy as np
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger("failwith")
logger.addHandler(NullHandler())
@contextmanager
def silenced(logger, level=logging.CRITICAL):
"""
Silence a logger for the duration of the 'with' block.
>>> logger.error("Error as usual.")
Error as usual.
>>> with silenced(logger):
... logger.error("Silenced error.")
>>> logger.error("Back to normal.")
Back to normal.
You may specify a different temporary level if you like.
>>> with silenced(logger, logging.INFO):
... logger.error("Breaking through the silence.")
Breaking through the silence.
"""
oldlevel = logger.level
try:
logger.setLevel(level)
yield logger
finally:
logger.setLevel(oldlevel)
def nans_like(x):
"""
Returns an array of nans with the same shape and type as a given array.
This also works recursively with tuples, lists or dicts whose leaf nodes
are arrays.
>>> x = np.arange(3.0)
>>> nans_like(x)
array([ nan, nan, nan])
>>> y = x.view([(k, float) for k in "a", "b", "c"])
>>> nans_like(y)
array([(nan, nan, nan)], dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
>>> nans_like(y.view(np.recarray))
rec.array([(nan, nan, nan)], dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
Tuple, list, dict.
>>> nans_like((x, y))
[array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]
>>> nans_like([x, y])
[array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]
>>> nans_like(dict(a=x, b=y))
{'a': array([ nan, nan, nan]), 'b': array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])}
Nested list and dict.
>>> nans_like([x, [x, y]])
[array([ nan, nan, nan]), [array([ nan, nan, nan]), array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])]]
>>> nans_like(dict(a=x, b=dict(c=x, d=y)))
{'a': array([ nan, nan, nan]),
'b': {'c': array([ nan, nan, nan]), 'd': array([(nan, nan, nan)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])}}
Note that there is no nan for integers.
>>> nans_like((1, 2, 3))
Traceback (most recent call last):
AssertionError: nan is only defined for float types, not int...
This works because the 1.0 makes Numpy interpret the tuple as a float array.
>>> nans_like((1.0, 2, 3))
array([ nan, nan, nan])
"""
try:
return dict((k, nans_like(v)) for k, v in x.iteritems())
except AttributeError:
try:
xc = np.copy(x)
try:
xc = x.__array_wrap__(xc)
except AttributeError:
pass
msg = "nan is only defined for float types, not %s" % xc.dtype
assert not xc.dtype.kind == "i", msg
xc.view(np.float).fill(np.nan)
return xc
except TypeError:
return [nans_like(i) for i in x]
def failwith(default=None):
"""
Modify a function to return a default value in case of error.
>>> @failwith("Default")
... def f(x):
... raise Exception("Failure")
>>> f(1)
'Default'
Exceptions are logged, but the default handler doesn't do anything.
This example adds a handler so exceptions are logged to :data:`sys.stdout`.
>>> import sys
>>> logger.addHandler(logging.StreamHandler(sys.stdout))
>>> f(2)
Failure in <function f at 0x...>. Default: Default. args = (2,), kwargs = {}
Traceback (most recent call last):...
Exception: Failure
'Default'
>>> del logger.handlers[-1] # Removing the handler added by the doctest
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception, exc:
msg = "Failure in %s. Default: %s. args = %s, kwargs = %s"
logger.exception(msg, func, default, args, kwargs)
result = default
return result
return wrapper
return decorator
def failwithnanlikefirst(func):
"""
Like :func:`failwith`, but the default is set to `nan` + result on first evaluation.
>>> @failwithnanlikefirst
... def f(x):
... return 1.0 / x
>>> f(1)
1.0
>>> f(0)
array(nan)
Exceptions are logged, but the default handler doesn't do anything.
This example adds a handler so exceptions are logged to :data:`sys.stdout`.
>>> import sys
>>> logger.addHandler(logging.StreamHandler(sys.stdout))
>>> f(0)
Failure in <function f at 0x...>. Default: nan. args = (0,), kwargs = {}
Traceback (most recent call last):...
ZeroDivisionError: float division...
array(nan)
If the first evaluation fails, the exception is logged with an explanatory
note, then re-raised.
>>> @failwithnanlikefirst
... def g():
... raise Exception("Failure")
>>> try:
... g()
... except Exception, exc:
... print "Caught exception:", exc
<function g at 0x...> failed on first evaluation, or result could not be
interpreted as array of float. args = (), kwargs = {}
Traceback (most recent call last):...Exception: Failure
Caught exception: Failure
"""
d = {} # mutable container to store the default between evaluations
@wraps(func)
def wrapper(*args, **kwargs):
if not d:
# First evaluation
try:
result = func(*args, **kwargs)
d["default"] = nans_like(result)
except Exception, exc:
msg = "%s failed on first evaluation, "
msg += "or result could not be interpreted as array of float. "
msg += "args = %s, kwargs = %s"
logger.exception(msg, func, args, kwargs)
raise
else:
# Not first evaluation, so default is defined
try:
result = func(*args, **kwargs)
except Exception, exc:
msg = "Failure in %s. Default: %s. args = %s, kwargs = %s"
logger.exception(msg, func, d["default"], args, kwargs)
result = d["default"]
return result
return wrapper
def failwithnan_asfor(*args, **kwargs):
"""
Like :func:`failwith`, but the default is set to `nans_like(func(*args, **kwargs))`.
>>> @failwithnan_asfor(2.0, 3)
... def f(value, length):
... return [value] * length
>>> f()
array([ nan, nan, nan])
"""
def decorator(func):
default = nans_like(func(*args, **kwargs))
return failwith(default)(func)
return decorator
def failwithdefault_asfor(*args, **kwargs):
"""
Like :func:`failwith`, but the default is set to `func(*args, **kwargs)`.
>>> @failwithdefault_asfor(2, 3)
... def f(value, length):
... return [value] * length
>>> f()
[2, 2, 2]
"""
def decorator(func):
default = func(*args, **kwargs)
return failwith(default)(func)
return decorator
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS)
| [
"jonovik@gmail.com"
] | jonovik@gmail.com |
ece95817e254bad4af662002d4394d82856327f1 | 4fc3604c2e2f5e54b7cf19578c7013f0525e2ee7 | /generators/utils/3bitPathDist.py | 056413a2dc87b7de99080b49067fb9dd9813c160 | [] | no_license | nbermudezs/bracketology | d4fd4aad1052505dd3287310cdd5b9df97b75705 | 86ef2d495842350120e015260fe89cf0f12a392c | refs/heads/master | 2020-05-05T05:57:27.290588 | 2019-07-08T14:39:40 | 2019-07-08T14:39:40 | 179,770,315 | 0 | 0 | null | 2019-06-30T02:38:39 | 2019-04-06T00:06:35 | Python | UTF-8 | Python | false | false | 3,639 | py | from __future__ import print_function
__author__ = "Nestor Bermudez"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "nab6@illinois.edu"
__status__ = "Development"
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import rpy2
import rpy2.robjects as robjects
import seaborn as sns
from scipy.stats import chisquare
paths = {
'P_S1': [0, 8, 12],
'P_S8': [1, 8, 12],
'P_S5': [2, 9, 12],
'P_S4': [3, 9, 12],
'P_S6': [4, 10, 13],
'P_S3': [5, 10, 13],
'P_S7': [6, 11, 13],
'P_S2': [7, 11, 13],
'P_R2_1': [8, 12, 14],
'P_R2_2': [9, 12, 14],
'P_R2_3': [10, 13, 14],
'P_R2_4': [11, 13, 14]
}
plt.style.use('seaborn-white')
sns.set_palette('colorblind')
def load_brackets(fmt='TTT'):
with open('allBrackets{}.json'.format(fmt)) as f:
brackets = json.load(f)['brackets']
return brackets
def observed_dist(brackets, year, bits):
vectors = [list(bracket['bracket']['fullvector'])
for bracket in brackets
if int(bracket['bracket']['year']) < year]
vectors = np.array(vectors, dtype=int)
vectors = vectors[:, :60].reshape(-1, 15)
triplets = vectors[:, bits]
triplets, counts = np.unique(triplets, axis=0, return_counts=True)
triplet_labels = np.apply_along_axis(''.join, 1, triplets.astype(str))
for t in ['000', '001', '010', '011', '100', '101', '110', '111']:
if t not in triplet_labels:
triplet_labels = np.append(triplet_labels, t)
counts = np.append(counts, 0)
return {l: c for l, c in zip(triplet_labels, counts)}
def expected_dist(brackets, year, bits):
vectors = [list(bracket['bracket']['fullvector'])
for bracket in brackets
if int(bracket['bracket']['year']) < year]
vectors = np.array(vectors, dtype=int)
vectors = vectors[:, :60].reshape(-1, 15)
triplets = vectors[:, bits]
p_1 = np.mean(triplets, axis=0)
p_0 = 1 - p_1
p = [p_0, p_1]
result = {}
for t in ['000', '001', '010', '011', '100', '101', '110', '111']:
values = [int(x) for x in list(t)]
triplet_p = np.prod([p[values[i]][i] for i in range(3)])
result[t] = (year - 1985) * 4 * triplet_p
return result
def uniformity_check(observed, expected):
chi, p = chisquare(observed, expected)
print('Uniformity chi-square test p-value', p)
def plot_dist(brackets, year, bits, name):
observed = observed_dist(brackets, year, bits)
expected = expected_dist(brackets, year, bits)
data = {'Observed': observed, 'Expected (ind)': expected}
df = pd.DataFrame.from_dict(data)
df.plot.bar(rot=0)
# plt.show()
plt.title('3-bit path value distribution - {}'.format(name))
plt.savefig('DistPlots/TTT/3bit_path-{}.png'.format(name))
plt.cla()
plt.clf()
values = list(observed.values())
keys = list(observed.keys())
arr = 'array(c{}, dim=c(2, 2, 2))'.format(
tuple(np.array(values)[np.argsort(keys)].astype(int).tolist()))
res = robjects.r('library(hypergea); hypergeom.test(' + arr + ")['p.value']")
p_value = np.array(res[0])[0]
print('Independence Fisher exact test p-value', p_value)
uniformity_check(list(observed.values()), np.repeat((year - 1985) * 4 / 8, 8))
print()
# print('m = array(c{}, dim=c(2, 2, 2))'.format(tuple(np.array(list(observed.values()))[np.argsort(observed.keys())].astype(int).tolist())))
if __name__ == '__main__':
brackets = load_brackets()
for name, bits in paths.items():
print('path {}'.format(name))
plot_dist(brackets, 2019, bits, name)
| [
"nestor.bermudez@agilityfeat.com"
] | nestor.bermudez@agilityfeat.com |
5ae4b198d2a7269a72cc1d693548079756c4fb9b | e16d7d8f60145c68640b25aa7c259618be60d855 | /django_test/webtest/testapp/urls.py | 32d935549071646c1d172c99ccd6ba69db2bd72b | [] | no_license | zongqiqi/mypython | bbe212223002dabef773ee0dbeafbad5986b4639 | b80f3ce6c30a0677869a7b49421a757c16035178 | refs/heads/master | 2020-04-21T07:39:59.594233 | 2017-12-11T00:54:44 | 2017-12-11T00:54:44 | 98,426,286 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index,name='index'),
]
| [
"544136329@qq.com"
] | 544136329@qq.com |
c7cc769036318b5263632ef6db922b0a4ffa72cf | 0533d0ceb5966f7327f40d54bbd17e08e13d36bf | /python/HashMap/Maximum Number of Balloons/Maximum Number of Balloons.py | 485eee52af17f72c857b5f35d3beacd6b25b3591 | [] | no_license | danwaterfield/LeetCode-Solution | 0c6178952ca8ca879763a87db958ef98eb9c2c75 | d89ebad5305e4d1a185b0c6f101a88691602b523 | refs/heads/master | 2023-03-19T01:51:49.417877 | 2020-01-11T14:17:42 | 2020-01-11T14:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from collections import Counter
class Solution(object):
def maxNumberOfBalloons(self, text):
"""
:type text: str
:rtype: int
"""
c = Counter(text)
return min(c["b"], c["a"], c["l"] // 2, c["o"] // 2, c["n"])
| [
"zjuzjj@gmail.com"
] | zjuzjj@gmail.com |
0710b3ada6e27cc9d82d7532b608ce1ab6c6bad7 | f71f9c4736f5279ade23382fc39a6b4f6243bc42 | /finalsalad/wsgi.py | 51de96e59fb786703a467b376f3cc535698f0cfb | [] | no_license | Pancakem/saladmaster | b62143fb41a00e78a0fb6f68c64c8af72956cb89 | 234b388addc45cb74b1021608f888e1efd5d9292 | refs/heads/master | 2020-03-25T08:29:36.398546 | 2018-10-08T10:51:07 | 2018-10-08T10:51:07 | 142,886,081 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "finalsalad.settings")
application = get_wsgi_application()
| [
"pancakesdeath@protonmail.com"
] | pancakesdeath@protonmail.com |
3bf31dc5541a71a9313fdcce75a2fbb612fe9083 | 41941d61fdb1c2f6b50613de20376ca882678946 | /restaurant/migrations/0038_auto_20200903_1340.py | 0977001525108cdcddd8cf1b3672d34d74571759 | [] | no_license | ankitgadewal/saleor_server | 1000ac6dbfa9fb8fa6b8172a03ceb82a86002f0a | 0c743545ea3567eba7bfc402f53abe5413a564b6 | refs/heads/master | 2022-12-11T01:31:28.518059 | 2020-09-09T09:16:19 | 2020-09-09T09:16:19 | 293,053,275 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | # Generated by Django 3.1 on 2020-09-03 08:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0037_auto_20200903_1304'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='payment_id',
new_name='payment',
),
]
| [
"ankitgadewal@hotmail.com"
] | ankitgadewal@hotmail.com |
3b5771126a3de74c7f3d369f13baba24a89456bb | 1b300019417ea1e25c59dd6f00fbffb60ec5a123 | /python/example/run_demo.py | 1bcd9cc24764d75872a145135941ce238fefc7d5 | [
"MIT"
] | permissive | Wendong-Huo/diff_stokes_flow | 9176210b162e9a8c7b9910274fe4c699814fa7d7 | 55eb7c0f3a9d58a50c1a09c2231177b81e0da84e | refs/heads/master | 2023-03-16T13:16:17.028974 | 2020-12-11T03:55:44 | 2020-12-11T03:55:44 | 576,797,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,729 | py | import sys
sys.path.append('../')
from pathlib import Path
import numpy as np
from importlib import import_module
import scipy.optimize
import time
import matplotlib.pyplot as plt
from tqdm import tqdm
import pickle
import os
from py_diff_stokes_flow.common.common import print_info, print_ok, print_error, print_warning, ndarray
from py_diff_stokes_flow.common.grad_check import check_gradients
from py_diff_stokes_flow.common.display import export_gif
# Update this dictionary if you would like to add new demos.
all_demo_names = {
# ID: (module name, class name).
'amplifier': ('amplifier_env_2d', 'AmplifierEnv2d'),
'flow_averager': ('flow_averager_env_3d', 'FlowAveragerEnv3d'),
'superposition_gate': ('superposition_gate_env_3d', 'SuperpositionGateEnv3d'),
'funnel': ('funnel_env_3d', 'FunnelEnv3d'),
'fluidic_twister': ('fluidic_twister_env_3d', 'FluidicTwisterEnv3d'),
'fluidic_switch': ('fluidic_switch_env_3d', 'FluidicSwitchEnv3d'),
}
if __name__ == '__main__':
# Input check.
if len(sys.argv) != 2:
print_error('Usage: python run_demo.py [demo_name]')
sys.exit(0)
demo_name = sys.argv[1]
assert demo_name in all_demo_names
# Hyperparameters which are loaded from the config file.
config_file_name = 'config/{}.txt'.format(demo_name)
config = {}
with open(config_file_name, 'r') as f:
lines = f.readlines()
for line in lines:
key, val = line.strip().split(':')
key = key.strip()
val = val.strip()
config[key] = val
seed = int(config['seed'])
sample_num = int(config['sample_num'])
solver = config['solver']
rel_tol = float(config['rel_tol'])
max_iter = int(config['max_iter'])
enable_grad_check = config['enable_grad_check'] == 'True'
spp = int(config['spp'])
fps = int(config['fps'])
# Load class.
module_name, env_name = all_demo_names[demo_name]
Env = getattr(import_module('py_diff_stokes_flow.env.{}'.format(module_name)), env_name)
env = Env(seed, demo_name)
# Global search: randomly sample initial guesses and pick the best.
samples = []
losses = []
best_sample = None
best_loss = np.inf
print_info('Randomly sampling initial guesses...')
for _ in tqdm(range(sample_num)):
x = env.sample()
loss, _ = env.solve(x, False, { 'solver': solver })
losses.append(loss)
samples.append(ndarray(x).copy())
if loss < best_loss:
best_loss = loss
best_sample = np.copy(x)
unit_loss = np.mean(losses)
pickle.dump((losses, samples, unit_loss, best_sample), open('{}/sample.data'.format(demo_name), 'wb'))
# Load from file.
losses, _, unit_loss, best_sample = pickle.load(open('{}/sample.data'.format(demo_name), 'rb'))
print_info('Randomly sampled {:d} initial guesses.'.format(sample_num))
print_info('Loss (min, max, mean): ({:4f}, {:4f}, {:4f}).'.format(
np.min(losses), np.max(losses), np.mean(losses)
))
print_info('Normalized loss (min, max, mean): ({:4f}, {:4f}, {:4f}).'.format(
np.min(losses) / unit_loss, np.max(losses) / unit_loss, 1
))
# Local optimization: run L-BFGS from best_sample.
x_init = np.copy(best_sample)
bounds = scipy.optimize.Bounds(env.lower_bound(), env.upper_bound())
def loss_and_grad(x):
t_begin = time.time()
loss, grad, _ = env.solve(x, True, { 'solver': solver })
# Normalize loss and grad.
loss /= unit_loss
grad /= unit_loss
t_end = time.time()
print('loss: {:3.6e}, |grad|: {:3.6e}, time: {:3.6f}s'.format(loss, np.linalg.norm(grad), t_end - t_begin))
return loss, grad
if enable_grad_check:
print_info('Checking gradients...')
# Sanity check gradients.
success = check_gradients(loss_and_grad, x_init)
if success:
print_ok('Gradient check succeeded.')
else:
print_error('Gradient check failed.')
sys.exit(0)
# File index + 1 = len(opt_history).
loss, grad = loss_and_grad(x_init)
opt_history = [(x_init.copy(), loss, grad.copy())]
pickle.dump(opt_history, open('{}/{:04d}.data'.format(demo_name, 0), 'wb'))
def callback(x):
loss, grad = loss_and_grad(x)
global opt_history
cnt = len(opt_history)
print_info('Summary of iteration {:4d}'.format(cnt))
opt_history.append((x.copy(), loss, grad.copy()))
print_info('loss: {:3.6e}, |grad|: {:3.6e}, |x|: {:3.6e}'.format(
loss, np.linalg.norm(grad), np.linalg.norm(x)))
# Save data to the folder.
pickle.dump(opt_history, open('{}/{:04d}.data'.format(demo_name, cnt), 'wb'))
results = scipy.optimize.minimize(loss_and_grad, x_init.copy(), method='L-BFGS-B', jac=True, bounds=bounds,
callback=callback, options={ 'ftol': rel_tol, 'maxiter': max_iter})
if not results.success:
print_warning('Local optimization fails to reach the optimal condition and will return the last solution.')
print_info('Data saved to {}/{:04d}.data.'.format(demo_name, len(opt_history) - 1))
# Load results from demo_name.
cnt = 0
while True:
data_file_name = '{}/{:04d}.data'.format(demo_name, cnt)
if not os.path.exists(data_file_name):
cnt -= 1
break
cnt += 1
data_file_name = '{}/{:04d}.data'.format(demo_name, cnt)
print_info('Loading data from {}.'.format(data_file_name))
opt_history = pickle.load(open(data_file_name, 'rb'))
# Plot the optimization progress.
plt.rc('pdf', fonttype=42)
plt.rc('font', size=18)
plt.rc('axes', titlesize=18)
plt.rc('axes', labelsize=18)
fig = plt.figure(figsize=(18, 12))
ax_loss = fig.add_subplot(121)
ax_grad = fig.add_subplot(122)
ax_loss.set_position((0.12, 0.2, 0.33, 0.6))
iterations = np.arange(len(opt_history))
ax_loss.plot(iterations, [l for _, l, _ in opt_history], color='tab:red')
ax_loss.set_xlabel('Iteration')
ax_loss.set_ylabel('Loss')
ax_loss.set_yscale('log')
ax_loss.grid(True, which='both')
ax_grad.set_position((0.55, 0.2, 0.33, 0.6))
ax_grad.plot(iterations, [np.linalg.norm(g) + np.finfo(np.float).eps for _, _, g in opt_history],
color='tab:green')
ax_grad.set_xlabel('Iteration')
ax_grad.set_ylabel('|Gradient|')
ax_grad.set_yscale('log')
ax_grad.grid(True, which='both')
plt.show()
fig.savefig('{}/progress.pdf'.format(demo_name))
# Render the results.
print_info('Rendering optimization history in {}/'.format(demo_name))
# 000k.png renders opt_history[k], which is also the last element in 000k.data.
cnt = len(opt_history)
for k in range(cnt - 1):
xk0, _, _ = opt_history[k]
xk1, _, _ = opt_history[k + 1]
for i in range(fps):
t = i / fps
xk = (1 - t) * xk0 + t * xk1
env.render(xk, '{:04d}.png'.format(k * fps + i), { 'solver': solver, 'spp': spp })
print_info('{}/mode_[0-9]*/{:04d}.png is ready.'.format(demo_name, k * fps + i))
env.render(opt_history[-1][0], '{:04d}.png'.format((cnt - 1) * fps), { 'solver': solver, 'spp': spp })
print_info('{}/mode_[0-9]*/{:04d}.png is ready.'.format(demo_name, (cnt - 1) * fps))
# Get mode number.
mode_num = 0
while True:
mode_folder = Path(demo_name) / 'mode_{:04d}'.format(mode_num)
if not mode_folder.exists():
break
export_gif(mode_folder, '{}_{:04d}.gif'.format(demo_name, mode_num), fps=fps)
print_info('Video {}_{:04d}.gif is ready.'.format(demo_name, mode_num))
mode_num += 1 | [
"taodu@csail.mit.edu"
] | taodu@csail.mit.edu |
4c005fbd4e54f24c9b1a2f8d6364a336338e0c60 | 0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a | /python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/example2.py | b775c924c64d2785f7f994c9c8c606e50a2ae97e | [] | no_license | udhayprakash/PythonMaterial | 3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156 | e72f44e147141ebc9bf9ec126b70a5fcdbfbd076 | refs/heads/develop | 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,073 | py | import collections
import multiprocessing as mp
Msg = collections.namedtuple("Msg", ["event", "args"])
class BaseProcess(mp.Process):
"""A process backed by an internal queue for simple one-way message passing."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = mp.Queue()
def send(self, event, *args):
"""Puts the event and args as a `Msg` on the queue"""
msg = Msg(event, args)
self.queue.put(msg)
def dispatch(self, msg):
event, args = msg
handler = getattr(self, "do_%s" % event, None)
if not handler:
raise NotImplementedError("Process has no handler for [%s]" % event)
handler(*args)
def run(self):
while True:
msg = self.queue.get()
self.dispatch(msg)
# usage
class MyProcess(BaseProcess):
def do_helloworld(self, arg1, arg2):
print(arg1, arg2)
if __name__ == "__main__":
process = MyProcess()
process.start()
process.send("helloworld", "hello", "world")
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
bacebe743480b18f6233b694f509ca0713abd2d4 | 0e61801612672a7e302d0996eea4587d68127b54 | /src/grid_inference.py | d4b8ec4587e0cd8207b4f2880644663186bd6bfa | [] | no_license | DongjoonLim/EvoLSTM | a2c501c2842958cf62b0017b4d2415a5558e0975 | c192488eb30131f77c3111c638b6cf683a0185ca | refs/heads/master | 2023-06-29T23:09:59.928447 | 2023-06-13T20:34:49 | 2023-06-13T20:34:49 | 237,307,918 | 4 | 0 | null | 2022-12-08T11:38:59 | 2020-01-30T21:20:05 | Jupyter Notebook | UTF-8 | Python | false | false | 10,519 | py |
# coding: utf-8
# In[1]:
# !pip3 install -U scikit-learn
# !pip3 install keras
# !pip3 install cudnnenv
# !pip3 install tensorflow-gpu
# !pip3 install matplotlib
# !conda uninstall -c anaconda cudatoolkit
#!nvidia-smi
from keras.utils.vis_utils import plot_model
import numpy as np
from keras.models import Sequential
from keras.layers import Activation, LSTM, TimeDistributed, Dense, RepeatVector, CuDNNLSTM, GRU, Bidirectional, Input, CuDNNGRU
from keras.utils import np_utils
from keras.callbacks import TensorBoard
import tensorflow as tf
import os
from keras import backend as K
from keras.models import Model
from keras.layers.core import Dense, Reshape
from keras.layers.wrappers import TimeDistributed
from keras.layers import concatenate
import difflib
from keras.models import load_model
import keras
from keras import losses
import matplotlib.pyplot as plt
import random
from random import choice
import re
import pickle
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import math
seq_length = 10
test_size = 50000
val_size = 30000
nucleotide = ['0', 'A', 'C', 'G', 'T', '-']
#model5 = load_model('model/seq2seq_nogap_camFam3_1mut.h5')
def decoder(array):
result = ""
size = len(array)
for i in range(size):
if array[i].tolist() == [1, 0, 0, 0, 0, 0]:
result=result+"0"
elif array[i].tolist() == [0, 1, 0, 0, 0, 0]:
result=result+"A"
elif array[i].tolist() == [0, 0, 1, 0, 0, 0]:
result=result+"C"
elif array[i].tolist() == [0, 0, 0, 1, 0, 0]:
result=result+"G"
elif array[i].tolist() == [0, 0, 0, 0, 1, 0]:
result=result+"T"
elif array[i].tolist() == [0, 0, 0, 0, 0, 1]:
result=result+"-"
return result
#model5 = load_model('model/seq2seq_nogap_camFam3_1mut.h5')
def decoderY(array):
result = ""
size = len(array)
if array.tolist() == [1, 0, 0, 0, 0, 0]:
result=result+"0"
elif array.tolist() == [0, 1, 0, 0, 0, 0]:
result=result+"A"
elif array.tolist() == [0, 0, 1, 0, 0, 0]:
result=result+"C"
elif array.tolist() == [0, 0, 0, 1, 0, 0]:
result=result+"G"
elif array.tolist() == [0, 0, 0, 0, 1, 0]:
result=result+"T"
elif array.tolist() == [0, 0, 0, 0, 0, 1]:
result=result+"-"
return result
def printHitMiss(a,b):
if a==b:
return 'Hit'
else:
return 'Miss'
def accuracy(a, b):
count = 0
for i in range(len(a)):
if a[i] == b[i]:
count = count+1
return count/len(a)
def accuracy2(a, b, c):
count = 0
count2 =0
for i in range(len(a)):
if a[i] != c[i]:
count2 = count2 +1
if a[i] != c[i] and b[i]==c[i]:
count = count+1
return count/count2
def isMutation(a, b):
if a!= b:
print("mutation")
def decode_sequence(input_seq, model, encoder_model, decoder_model):
nucleotide = ['0', 'A', 'C', 'G', 'T', '-']
# Encode the input as state vectors.
#print(input_seq[0,0])
index = 0
states_value = encoder_model.predict(input_seq)
#print(len(states_value))
#print(states_value)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, 12))
target_seq[0][0]= np.hstack((input_seq[0,index], np.array([1,0,0,0,0,0])))
#print(target_seq)
# Populate the first character of target sequence with the start character.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
probability = 1
while not stop_condition:
index = index +1
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
print(output_tokens)
#sampled_token_index = np.random.choice(6, 1, p=output_tokens[0, -1, :])[0]
#print(output_tokens[0, -1, :])
sampled_char = nucleotide[sampled_token_index]
decoded_sentence += sampled_char
#print(decoded_sentence)
# Exit condition: either hit max length
# or find stop character.
if (len(decoded_sentence) == seq_length):
break
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, 12))
temp = np.zeros((6))
temp[sampled_token_index] = 1
target_seq[0][0]= np.hstack((input_seq[0, index], temp))
# target_seq[0, 0, sampled_token_index] = 1
# Update states
states_value = [h, c]
return decoded_sentence
def get_prob(input_seq, target, model, encoder_model, decoder_model):
nucleotide = ['0', 'A', 'C', 'G', 'T', '-']
# Encode the input as state vectors.
index = 0
states_value = encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, 12))
target_seq[0][0]= np.hstack((input_seq[0,index], np.array([1,0,0,0,0,0])))
stop_condition = False
decoded_sentence = ''
probability = []
while not stop_condition:
index = index +1
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
#print(output_tokens[0, -1, :])
sampled_token_index = np.argmax(target[index-1])
#sampled_token_index = np.random.choice(6, 1, p=output_tokens[0, -1, :])[0]
#probability[index-1] = probability[index-1] * output_tokens[0, -1, :][sampled_token_index]
probability.append(output_tokens[0, -1, :][sampled_token_index])
#print(output_tokens[0, -1, :])
sampled_char = nucleotide[sampled_token_index]
decoded_sentence += sampled_char
#print(decoded_sentence)
# Exit condition: either hit max length
# or find stop character.
if (len(decoded_sentence) == seq_length):
break
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, 12))
temp = np.zeros((6))
temp[sampled_token_index] = 1
target_seq[0][0]= np.hstack((input_seq[0, index], temp))
# target_seq[0, 0, sampled_token_index] = 1
# Update states
states_value = [h, c]
return decoded_sentence, probability
def diffList(a, b):
count = 0
length = len(a)
for i in range(length):
if a[i] != b[i]:
count = count+1
return count
#for seq_index in range(1):
def predict2(X_test, y_test, model, encoder_model, decoder_model, gru=False):
x_true =[]
y_hat =[]
y_true =[]
probList=[]
productProb = [0]*seq_length
for seq_index in range(len(X_test)):
input_seq = X_test[seq_index: seq_index + 1]
#print(input_seq[0])
if gru:
decoded_sentence = decode_gru(input_seq, model, encoder_model, decoder_model)
else :
decoded_sentence = decode_sequence(input_seq, model, encoder_model, decoder_model)
_, prob = get_prob(input_seq, y_test[seq_index], model, encoder_model, decoder_model)
probList.append(prob)
prob = [math.log(x) for x in prob]
productProb = [sum(x) for x in zip(productProb, prob)]
input_sen = decoder(input_seq[0])
print(input_sen, ' -> ',
decoded_sentence, 'True:', decoder(y_test[seq_index]),
printHitMiss(decoded_sentence, decoder(y_test[seq_index])),
diffList(input_sen, decoded_sentence)
)
print(input_sen, ' -> ',
decoder(y_test[seq_index]), 'True:', decoder(y_test[seq_index]),
prob,
printHitMiss(decoded_sentence, decoder(y_test[seq_index])),
diffList(input_sen, decoded_sentence)
)
print()
x_true.append(input_sen)
y_hat.append(decoded_sentence)
y_true.append(decoder(y_test[seq_index]))
productProb = [x/test_size for x in productProb]
print("Mean and std of probabilities : {} , {} ".format(np.mean(probList), np.std(probList)))
print("Sum of log probabilities : {}".format(productProb))
print("Percentage of target and prediction being identical: {}".format(accuracy(y_hat, y_true)))
print("Percentage of training and prediction being identical: {}".format(accuracy(y_hat, x_true)))
print("Accuracy given mutation happened : {}".format(accuracy2(x_true, y_hat, y_true)))
#print("Test loss : {}".format(keras.losses.categorical_crossentropy(y_true, y_hat)))
#return x_true, y_hat, y_true
def grid_predict(train_size, half, epoch, X_test, y_test):
model1 = load_model("models/{}_{}_{}.h5".format(train_size,half,epoch))
encoder_model1 = load_model("models/E{}_{}_{}.h5".format(train_size,half, epoch))
decoder_model1 =load_model("models/D{}_{}_{}.h5".format(train_size,half, epoch))
predict2(X_test, y_test, model1, encoder_model1, decoder_model1, gru=False)
# In[3]:
def concat(input1, input2):
result = []
for x, y in zip(input1, input2):
result.append(np.hstack((x, y)))
return np.array(result)
def get_data(trainInd, valInd, testInd):
X_train=np.load('prepData/X_train_camFam3_1mut_v3_chr2.npy')[:trainInd]
X_val=np.load('prepData/X_val_camFam3_1mut_v3_chr2.npy')[:valInd]
X_test=np.load('prepData/X_test_camFam3_1mut_v3_chr2.npy')[:testInd]
y_train=np.load('prepData/y_train_camFam3_1mut_v3_chr2.npy')[:trainInd]
y_val=np.load('prepData/y_val_camFam3_1mut_v3_chr2.npy')[:valInd]
y_test=np.load('prepData/y_test_camFam3_1mut_v3_chr2.npy')[:testInd]
y_train1 = np.load('prepData/y_train1_camFam3_1mut_v3_chr2.npy')[:trainInd]
y_val1 = np.load('prepData/y_val1_camFam3_1mut_v3_chr2.npy')[:valInd]
y_test1 = np.load('prepData/y_test1_camFam3_1mut_v3_chr2.npy')[:testInd]
y_train1 = concat(X_train, y_train1)
y_val1 = concat(X_val, y_val1)
y_test1 = concat(X_test, y_test1)
return X_test, y_test
train_size = 0
hidden = [16,32, 64,128,256,512]
epoch = [5, 5, 5, 5, 5, 5]
X_test, y_test = get_data(train_size, val_size, test_size)
for h, e in zip(hidden, epoch):
print("Train size = {}, hidden_size = {}, epoch = {}".format(train_size, h, e))
grid_predict(train_size, h, e, X_test, y_test)
print("The end of Train size = {}, hidden_size = {}, epoch = {}".format(train_size, h, e))
| [
"noreply@github.com"
] | noreply@github.com |
4d1729f71d00955ffc428891f9ef09e4d06f2ef4 | 8221fc863fa51c2723a78888e9af010d056ad7a2 | /WebServer.py | 446cd65e94d061aa6ad9f258fe26bebcb381ef05 | [] | no_license | hxyalee/CS3331 | 87021ef2c5862b4b86b4039bb52d2fe144db8e67 | bcf5082870a082fc4907e684687aa2ffcca02067 | refs/heads/master | 2023-01-23T09:02:20.769727 | 2020-12-05T17:16:21 | 2020-12-05T17:16:21 | 306,234,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | #!/usr/bin/env python3
import sys
from socket import *
# No valid arguments
try:
if(len(sys.argv) != 2):
raise Exception()
tmp = int(sys.argv[1])
if(type(tmp) != int):
raise Exception()
except Exception:
print("Usage: ./Webserver.py {port}")
exit()
# Extract host and port
server_host = 'localhost'
server_port = int(sys.argv[1])
# Set server's TCP socket
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind((server_host, server_port))
serverSocket.listen(1)
# Display that the server is ready
print("--- The server is ready to receive --- \n")
while True:
connectionSocket, addr = serverSocket.accept()
# Get and parse the request; assuming only GET requests
request = connectionSocket.recv(2048).decode('utf-8')
print(request)
request_list = request.split(' ')
# If the method is not a GET request, then return error message
method = request_list[0]
resource = request_list[1]
if(method != "GET"):
header = "HTTP/1.1 405 Method Not Allowed\nContent-Type: " + "text/html" + "\n\n"
message = """<html>\n\t<body>\n\t<h3>Error 405: Method Not Allowed</h3>\n\t</body>\n</html>"""
response = header + message
response = request.encode('utf-8')
connectionSocket.send(response)
connectionSocket.close()
continue
# Parse the requested resource
if(resource == "/"):
resource = 'index.html'
else:
resource = resource[1:]
try:
# Build header for success case
header = "HTTP/1.1 200 OK\n"
# Check the type of the resource
if '.png' in resource.split():
mimetype = 'image/png'
else:
mimetype = 'text/html'
header += "Content-Type " + mimetype + '\n\n'
# Read requested resource to memory
file = open(resource, 'rb')
message = file.read()
file.close()
except Exception:
# File is not found
header = "HTTP/1.1 404 Not Found\nContent-Type: " + "text/html" + "\n\n"
message = """<html>\n<body>\t\n\t\t\n<h3>Error 404: Not Found</h3>\n\t</body>\n</html>""".encode('utf-8')
finally:
response = header.encode('utf-8') + message
connectionSocket.send(response)
connectionSocket.close()
# Close the TCP socket
serverSocket.close() | [
"z5226463@ad.unsw.edu.au"
] | z5226463@ad.unsw.edu.au |
f10cdd86dd40f18b8d7c02cf3eabfd28b6204cf2 | 9f61f361a545825dd6ff650c2d81bc4d035649bd | /tests/test_document.py | e95f63d807e367b91125f2d53fc4b1218a64b17d | [
"MIT"
] | permissive | cassj/dexy | 53c9e7ce3f601d9af678816397dcaa3a111ba670 | fddfeb4db68c362a4126f496dbd019f4639d07ba | refs/heads/master | 2020-12-25T11:52:35.144908 | 2011-06-05T20:52:52 | 2011-06-05T20:52:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | from dexy.controller import Controller
from dexy.document import Document
from dexy.artifacts.file_system_json_artifact import FileSystemJsonArtifact
import os
def setup_controller():
controller = Controller()
controller.artifacts_dir = 'artifacts'
if not os.path.isdir(controller.artifacts_dir):
os.mkdir(controller.artifacts_dir)
controller.artifact_class = FileSystemJsonArtifact
controller.allow_remote = True
controller.config = {
'tests/data' : {
"@simple.py|pyg" : {
"contents" : "x = 5\nx^2"
}
}
}
controller.setup_and_run()
return controller
def setup_doc():
controller = setup_controller()
doc = controller.members['tests/data/simple.py|pyg']
assert isinstance(doc, Document)
return doc
def setup_artifact():
doc = setup_doc()
return doc.final_artifact()
def test_artifact_hash_dict():
artifact = setup_artifact()
hash_dict = artifact.hash_dict()
for k in hash_dict.keys():
assert k in artifact.HASH_WHITELIST
# hashstring shouldn't change
hashstring = artifact.hashstring
artifact.set_hashstring
assert artifact.hashstring == hashstring
def test_init():
"""document: filters should be processed correctly"""
doc = Document(FileSystemJsonArtifact, "data/test.py|abc")
assert doc.name == "data/test.py"
assert doc.filters == ['abc']
doc.filters += ['def', 'xyz']
assert doc.filters == ['abc', 'def', 'xyz']
assert doc.key() == "data/test.py|abc|def|xyz"
def test_complete():
"""document: after controller has run"""
doc = setup_doc()
assert doc.key() == "tests/data/simple.py|pyg"
| [
"ana@ananelson.com"
] | ana@ananelson.com |
dbbab268c0f12ac2bcfab7eab23967dd84e060e4 | 0252a277036b9ac7f95e5db3cad6c1a94b89c4ef | /eaif4_ws/build/turtlebot_apps/turtlebot_rapps/catkin_generated/pkg.installspace.context.pc.py | 5910a9329ceee27595e6b34e8f4452ce3011c710 | [] | no_license | maxwelldc/lidar_slam | 1e5af586cd2a908474fa29224b0d9f542923c131 | 560c8507ea1a47844f9ce6059f48937b0627967b | refs/heads/master | 2020-07-01T03:15:42.877900 | 2019-08-07T10:25:27 | 2019-08-07T10:25:27 | 201,025,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot_rapps"
PROJECT_SPACE_DIR = "/home/wenhou/eaif4_ws/install"
PROJECT_VERSION = "2.3.7"
| [
"374931377@qq.com"
] | 374931377@qq.com |
c42484aa0e251a858cba80f1b7cbda8c5b61ad40 | b6fa182321756b891b84958e2b2c01e63b3f88b2 | /stepik/product _of_numbers.py | 61d44cf81b636fd8b2f1484dd3cedb783f9c8444 | [] | no_license | carden-code/python | 872da0dff5466070153cf945c428f1bc8309ea2b | 64e4df0d9893255ad362a904bb5d9677a383591c | refs/heads/master | 2023-07-05T05:14:16.479392 | 2021-08-22T21:27:36 | 2021-08-22T21:27:36 | 305,476,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | # Напишите программу для определения, является ли число произведением двух чисел из данного набора,
# выводящую результат в виде ответа «ДА» или «НЕТ».
#
# Формат входных данных
# В первой строке подаётся число n, (0 < n < 1000) – количество чисел в наборе.
# В последующих n строках вводятся целые числа, составляющие набор (могут повторяться).
# Затем следует целое число, которое является или не является произведением двух каких-то чисел из набора.
#
# Формат выходных данных
# Программа должна вывести «ДА» или «НЕТ» в соответствии с условием задачи.
#
# Примечание.
# Само на себя число из набора умножиться не может, другими словами, два множителя должны иметь разные номера в наборе.
amount_numbers = int(input())
numbers_list = [int(input()) for _ in range(amount_numbers)]
product = int(input())
yes = False
for index, num in enumerate(numbers_list):
for i, n in enumerate(numbers_list):
if index != i and num * n == product:
yes = True
print('ДА' if yes else 'НЕТ')
| [
"carden.ruby@gmail.com"
] | carden.ruby@gmail.com |
c3003bd895edb9bdabce1c019fd28d0ab153b7af | 5e255845c19689f598d9221c7542df60e85f5923 | /setup.py | 01108ddb79b16904ea2ca5f26ffbb9cf402398ee | [
"MIT"
] | permissive | hulsmeier/best_voxelnet_ever | 48023522e109fa263622536b343ee6b310057856 | aeefd32711a5c986c6099d53c5a2efdf9e01ea48 | refs/heads/master | 2021-06-13T03:27:49.944036 | 2020-06-03T18:35:47 | 2020-06-03T18:35:47 | 254,420,901 | 0 | 0 | MIT | 2020-04-09T16:16:11 | 2020-04-09T16:16:09 | null | UTF-8 | Python | false | false | 427 | py | #!/usr/bin/env python
# -*- coding:UTF-8 -*-
# File Name : setup.py
# Purpose :
# Creation Date : 11-12-2017
# Last Modified : Sat 23 Dec 2017 03:18:37 PM CST
# Created By : Jeasine Ma [jeasinema[at]gmail[dot]com]
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name='box overlaps',
ext_modules=cythonize('./utils/box_overlaps.pyx'),
include_dirs=[numpy.get_include()]
)
| [
"fschaeffler@gmx.de"
] | fschaeffler@gmx.de |
0d87632a4b2c03e675bb8726a5f7622be7f35e49 | 06e897ed3b6effc280eca3409907acc174cce0f5 | /plugins/pelican_unity_webgl/config.py | d7250678123196715136c46cfa982901234d38d6 | [
"LicenseRef-scancode-other-permissive",
"MIT",
"AGPL-3.0-only"
] | permissive | JackMcKew/jackmckew.dev | ae5a32da4f1b818333ae15c6380bca1329d38f1e | b5d68070b6f15677a183424c84e30440e128e1ea | refs/heads/main | 2023-09-02T14:42:19.010294 | 2023-08-15T22:08:19 | 2023-08-15T22:08:19 | 213,264,451 | 15 | 8 | MIT | 2023-02-14T21:50:28 | 2019-10-07T00:18:15 | JavaScript | UTF-8 | Python | false | false | 201 | py | # unity webgl options
DEFAULT_WIDTH = 960
DEFAULT_HEIGHT = 600
DEFAULT_ALIGN = "center"
# paths
GAMES_ROOT_DIR = "/games" # directory with games
TEMPLATE_PATH = "/games/utemplate" # template path
| [
"jackmckew2@gmail.com"
] | jackmckew2@gmail.com |
ad614ea6517177899ac56fa3ee0f5c97ebe6eaed | 20e4eb529af631faed63ce213938a3d08c4c4533 | /maxsubarray.py | 87bdfc8a9df0415b413842f59ca45967c401c2a9 | [] | no_license | mmarat01/leet | 0c4ac88bcec5fc9c35769f151a10a0d588e30ed3 | fb7a41524b51f5fd08460acdde9a5fc44713583e | refs/heads/master | 2023-04-09T05:08:34.948408 | 2021-04-03T16:14:30 | 2021-04-03T16:14:30 | 319,825,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from typing import List
# Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
'''
traverse list starting from second element because
first element added to sum is the first element in array
we keep a moving sum ("up to this point"), defined as the maximum
of the previous moving sum + current number OR the current number alone.
the reason why we'd reset to the current number alone is because by
keeping track of the moving contiguous sum, if a new element is greater than
the whole thing, that must be the start of the new max subarray.
with negative numbers, there could potentially be a scenario where both
"curr_sum + curr_num" or "curr_num" are smaller than the sum that had been
found before.
* say you have a current sum of "-2", and the next number is "-4"
-2 + -4 = -6
so -4 it is --> new current sum, and smaller than the prev
that's why we want to keep track of the greatest "current sum"! if it
were all positives this would be trivial; you'd add them all up.
'''
if len(nums) == 1:
return nums[0]
curr_sum = max_sum = nums[0]
'''
cool syntax i should prob use
for i in nums[1:]:
curr_sum = max(curr_sum + i, i)
max_sum = max(max_sum, curr_sum)
'''
for i in range(1, len(nums)):
if curr_sum + nums[i] < nums[i]:
curr_sum = nums[i]
else:
curr_sum += nums[i]
if curr_sum > max_sum:
max_sum = curr_sum
return max_sum
s = Solution()
print(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4])) # 6
print(s.maxSubArray([0])) # 0
print(s.maxSubArray([-1])) # -1
print(s.maxSubArray([-2147483647])) # -2147483647 | [
"66384102+mmarat01@users.noreply.github.com"
] | 66384102+mmarat01@users.noreply.github.com |
cca5b0ff8e93a51f7a513b999bef95ba4627b04e | 9cce59649373fa58104641c71cd31c350dd93836 | /server.py | da1a36dfba16ed41a0df4fb24b1a2fec0b46efe4 | [] | no_license | ravi-oli/hackernews | 21a1f8230367a9fbc556d7c3813186c12da750c4 | fd8ac029ca8536fb94be7853430953e1eb72b4a4 | refs/heads/master | 2022-11-30T22:36:30.030884 | 2020-08-06T14:04:12 | 2020-08-06T14:04:12 | 285,585,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,199 | py | # Import libraries
import os
import flask
from flask import request
from google.cloud import bigquery
# Initialize flask application
app_flask = flask.Flask(__name__,
static_url_path="/",
static_folder="./interface")
# Define API route
@app_flask.route("/")
def root():
return app_flask.send_static_file("index.html")
@app_flask.route("/story-details")
def fetch_story_details(methods=['GET']):
# Fetch query parameter
query_params = request.args
story_id = query_params["storyid"]
# Fetch details from DB
# 1. Establish credentials
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "provide-path-to-service-account-credentials.json"
# 2. Establish BQ client
client = bigquery.Client()
# 3. Query
sql_query = """
SELECT
A.id,
A.by,
A.score,
A.title
FROM
`dev-mantarays.HACKERNEWS.stories` as A
WHERE
A.id = {story_id}
"""
# 4. Fetch results
result = list(client.query(sql_query.format(story_id = story_id)))
print(result)
# Return response to
return "Story Id: {}, Published by: {}, Score: {}, Title: {}".format(result[0]['id'],
result[0]['by'], result[0]['score'], result[0]['title']), 200
app_flask.run(port=8000, host='0.0.0.0')
| [
"omrrjcravi@gmail.com"
] | omrrjcravi@gmail.com |
504f0658b7b7f9ae808c5d819daa2760c8f38d06 | f953784405bc32ea61c770664edeb534459f33d9 | /Shmup/main.py | d35d8c9c1ed44df5f28031ea5a63ccdd1bd83728 | [] | no_license | JLew15/Python | 7ba060baf71cefcf20fe0566b7a6f380134be550 | e735bacb2de433788bf173e9e8d50a187159e1c3 | refs/heads/master | 2023-04-28T06:36:55.734871 | 2021-05-11T15:47:49 | 2021-05-11T15:47:49 | 293,847,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,275 | py | import pygame as pg
import random as r
import math
from os import *
# Code written by Jaiden Lewis
# Artwork Credit Kenney.nl or www.kenney.nl
class Explosion(pg.sprite.Sprite):
def __init__(self, center):
super(Explosion, self).__init__()
self.image = explosionAnimation["lg"][0]
self.rect = self.image.get_rect()
self.rect.center = center
self.frame = 0
self.lastUpdate = pg.time.get_ticks()
self.frameRate = 50
def update(self):
now = pg.time.get_ticks()
if now - self.lastUpdate > self.frameRate:
self.lastUpdate = now
self.frame += 1
if self.frame == len(explosionAnimation["lg"]):
self.kill()
else:
center = self.rect.center
self.image = explosionAnimation["lg"][self.frame]
self.rect = self.image.get_rect()
self.rect.center = center
class Projectile(pg.sprite.Sprite):
def __init__(self, x, y):
super(Projectile, self).__init__()
# self.image = pg.Surface((5, 5))
# self.image.fill(WHITE)
self.image = bulletImg
self.image = pg.transform.scale(bulletImg, (5, 10))
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.bottom = y - 1
self.speedY = -5
def update(self):
self.rect.y += self.speedY
if self.rect.bottom < 0:
self.kill()
class Player(pg.sprite.Sprite):
def __init__(self):
super(Player, self).__init__()
# self.image = pg.Surface((50, 40))
# self.image.fill(GREEN)
self.image = playerImg
self.image = pg.transform.scale(playerImg, (50, 40))
self.rect = self.image.get_rect()
self.rect.centerx = (WIDTH / 2)
self.rect.bottom = (HEIGHT - (HEIGHT * .05))
self.speedX = 0
self.shootDelay = 250
self.lastShot = pg.time.get_ticks()
def update(self):
self.speedX = 0
if self.rect.left < 0:
self.rect.left = 0
if self.rect.right > WIDTH:
self.rect.right = WIDTH
keystate = pg.key.get_pressed()
if keystate[pg.K_LEFT] or keystate[pg.K_a]:
self.speedX = -3
if keystate[pg.K_RIGHT] or keystate[pg.K_d]:
self.speedX = 3
if keystate[pg.K_SPACE]:
self.shoot()
self.rect.x += self.speedX
def shoot(self):
now = pg.time.get_ticks()
if now - self.lastShot > self.shootDelay:
self.lastShot = now
shootAudio.play()
bullet = Projectile(self.rect.centerx, self.rect.top)
projectileGroup.add(bullet)
allSprites.add(bullet)
class Mob(pg.sprite.Sprite):
def __init__(self):
super(Mob, self).__init__()
# self.image = pg.Surface((25, 25))
# self.image.fill(RED)
self.imageO = mobImg
self.imageO = pg.transform.scale(mobImg, (25, 25))
self.image = self.imageO.copy()
self.rect = self.image.get_rect()
self.rect.centerx = r.randint(13, WIDTH - 13)
self.rect.top = 0
self.speedY = r.randint(1, 10)
self.speedX = r.randint(-3, 3)
self.last_update = pg.time.get_ticks()
self.rot = 0
self.rotSpeed = r.randint(-8, 8)
def rotate(self):
now = pg.time.get_ticks()
if now - self.last_update > 60:
self.last_update = now
self.rot = (self.rot + self.rotSpeed) % 360
newImage = pg.transform.rotate(self.imageO, self.rot)
oldCenter = self.rect.center
self.image = newImage
self.rect = self.image.get_rect()
self.rect.center = oldCenter
def update(self):
self.rotate()
if self.rect.top > HEIGHT:
self.rect.top = 0
self.rect.centerx = r.randint(13, WIDTH - 13)
self.speedY = r.randint(1, 10)
self.speedX = r.randint(-3, 3)
self.rect.x += self.speedX
self.rect.y += self.speedY
def spawnNPC(self):
npc = Mob()
mobGroup.add(npc)
allSprites.add(npc)
# Game Constants
#################################
HEIGHT = 600
WIDTH = 300
FPS = 60
TITLE = "Shoot Em Up"
playerLives = 5
playerScore = 0
fontName = pg.font.match_font("arial")
# COLORS
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
PURPLE = (255, 0, 255)
TURQUOISE = (0, 255, 255)
SKYBLUE = (123, 255, 255)
PASTELGREEN = (123, 255, 123)
#################################
# Init Folders
#################################
gameFolder = path.dirname(__file__)
imgs = path.join(gameFolder, "img")
saveData = path.join(gameFolder, "data")
aud = path.join(gameFolder, "aud")
playerimgs = path.join(imgs, "player")
mobimgs = path.join(imgs, "mob")
bgimg = path.join(imgs, "bg")
anim = path.join(imgs, "ani")
print(imgs)
#################################
# Init pygame and create window
#################################
pg.init()
pg.mixer.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
clock = pg.time.Clock()
#################################
# Load images
#################################
bg = pg.image.load(bgimg + "/bg.png")
bgRect = bg.get_rect()
playerImg = pg.image.load(playerimgs + "/shooter.png")
playerRect = playerImg.get_rect()
bulletImg = pg.image.load(playerimgs + "/shot.png")
bulletRect = bulletImg.get_rect()
mobImg = pg.image.load(mobimgs + "/shooting.png")
mobRect = mobImg.get_rect()
explosionAnimation = {"lg": []}
for i in range(0, 8):
fn = "regularExplosion0{}.png".format(i)
img = pg.image.load(path.join(anim, fn)).convert()
img.set_colorkey(BLACK)
img = pg.transform.scale(img, (40, 40))
explosionAnimation["lg"].append(img)
#################################
# Sprite Groups
#################################
allSprites = pg.sprite.Group()
playerGroup = pg.sprite.Group()
mobGroup = pg.sprite.Group()
projectileGroup = pg.sprite.Group()
#################################
# Create Game Obj
#################################
player1 = Player()
mob1 = Mob()
for i in range(10):
mob1.spawnNPC()
#################################
# Add Obj to Sprite Groups
#################################
player1.add(playerGroup)
mob1.add(mobGroup)
for sprite in playerGroup:
sprite.add(allSprites)
for sprite in mobGroup:
sprite.add(allSprites)
#################################
shootAudio = pg.mixer.Sound(aud + "/sfx_wpn_laser2.wav")
def drawText(surf, text, size, x, y):
font = pg.font.Font(fontName, size)
txtSurface = font.render(text, True, WHITE)
textRect = txtSurface.get_rect()
textRect.midtop = (x, y)
surf.blit(txtSurface, textRect)
def drawHB(surf, x, y, pct):
if pct < 0:
pct = 0
barLength = 100
barHeight = 10
fill = (pct/100) * barLength
# Game loop
#################################
running = True
while running:
# Timing
#######
clock.tick(FPS)
#######
# Input
#######
for event in pg.event.get():
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
running = False
if event.type == pg.QUIT:
running = False
#######
# Updates
#######
allSprites.update()
hits = pg.sprite.spritecollide(player1, mobGroup, True)
for hit in hits:
print("Player hit")
mob1.spawnNPC()
playerLives -= 1
playerScore -= 10
exp = Explosion(hit.rect.center)
allSprites.add(exp)
if playerLives <= 0:
player1.kill()
hits = pg.sprite.groupcollide(projectileGroup, mobGroup, True, True)
for hit in hits:
mob1.spawnNPC()
exp = Explosion(hit.rect.center)
allSprites.add(exp)
playerScore += 5
if playerScore % 100 == 0:
playerLives += 1
print("LIFE GAINED")
#######
# Render
#######
screen.fill(BLACK)
screen.blit(bg, bgRect)
allSprites.draw(screen)
drawText(screen,"Score: " + str(playerScore), 18, WIDTH/2, 10)
pg.display.flip()
#######
pg.quit()
#################################
| [
"zombears@icloud.com"
] | zombears@icloud.com |
1292e503e8b05cd9f288de556289ca29880a41cc | f31fda8014ecadf6af7d4e3392fb917c49e0352a | /HeavyIonsAnalysis/JetAnalysis/python/jets/akVs4CaloJetSequence_PbPb_jec_cff.py | 1138081e341c23d6cf41f8774dfe47930fc4f528 | [] | no_license | jniedzie/lightbylight | acea5051f053c49824a49a0b78bac3a2247ee75f | f5a4661fcf3fd3c0e9ccd8893a46a238e30c2aa8 | refs/heads/master | 2020-03-18T12:24:31.970468 | 2018-02-09T15:50:00 | 2018-02-09T15:50:00 | 134,724,759 | 0 | 1 | null | 2018-05-24T14:11:12 | 2018-05-24T14:11:12 | null | UTF-8 | Python | false | false | 14,330 | py |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.patHeavyIonSequences_cff import patJetGenJetMatch, patJetPartonMatch, patJetCorrFactors, patJets
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
from HeavyIonsAnalysis.JetAnalysis.bTaggers_cff import *
from RecoJets.JetProducers.JetIDParams_cfi import *
from RecoJets.JetProducers.nJettinessAdder_cfi import Njettiness
akVs4Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akVs4CaloJets"),
matched = cms.InputTag("ak4HiSignalGenJets"),
resolveByMatchQuality = cms.bool(True),
maxDeltaR = 0.4
)
akVs4CalomatchGroomed = patJetGenJetMatch.clone(
src = cms.InputTag("ak4HiGenJets"),
matched = cms.InputTag("ak4HiSignalGenJets"),
resolveByMatchQuality = cms.bool(True),
maxDeltaR = 0.4
)
akVs4Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akVs4CaloJets")
)
akVs4Calocorr = patJetCorrFactors.clone(
useNPV = cms.bool(False),
useRho = cms.bool(False),
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akVs4CaloJets"),
payload = "AK4Calo_offline"
)
akVs4CaloJetID= cms.EDProducer('JetIDProducer', JetIDParams, src = cms.InputTag('akVs4CaloJets'))
#akVs4Caloclean = heavyIonCleanedGenJets.clone(src = cms.InputTag('ak4HiSignalGenJets'))
akVs4CalobTagger = bTaggers("akVs4Calo",0.4)
#create objects locally since they dont load properly otherwise
#akVs4Calomatch = akVs4CalobTagger.match
akVs4Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akVs4CaloJets"), matched = cms.InputTag("hiSignalGenParticles"))
akVs4CaloPatJetFlavourAssociationLegacy = akVs4CalobTagger.PatJetFlavourAssociationLegacy
akVs4CaloPatJetPartons = akVs4CalobTagger.PatJetPartons
akVs4CaloJetTracksAssociatorAtVertex = akVs4CalobTagger.JetTracksAssociatorAtVertex
akVs4CaloJetTracksAssociatorAtVertex.tracks = cms.InputTag("highPurityTracks")
akVs4CaloSimpleSecondaryVertexHighEffBJetTags = akVs4CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akVs4CaloSimpleSecondaryVertexHighPurBJetTags = akVs4CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akVs4CaloCombinedSecondaryVertexBJetTags = akVs4CalobTagger.CombinedSecondaryVertexBJetTags
akVs4CaloCombinedSecondaryVertexV2BJetTags = akVs4CalobTagger.CombinedSecondaryVertexV2BJetTags
akVs4CaloJetBProbabilityBJetTags = akVs4CalobTagger.JetBProbabilityBJetTags
akVs4CaloSoftPFMuonByPtBJetTags = akVs4CalobTagger.SoftPFMuonByPtBJetTags
akVs4CaloSoftPFMuonByIP3dBJetTags = akVs4CalobTagger.SoftPFMuonByIP3dBJetTags
akVs4CaloTrackCountingHighEffBJetTags = akVs4CalobTagger.TrackCountingHighEffBJetTags
akVs4CaloTrackCountingHighPurBJetTags = akVs4CalobTagger.TrackCountingHighPurBJetTags
akVs4CaloPatJetPartonAssociationLegacy = akVs4CalobTagger.PatJetPartonAssociationLegacy
akVs4CaloImpactParameterTagInfos = akVs4CalobTagger.ImpactParameterTagInfos
akVs4CaloImpactParameterTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akVs4CaloJetProbabilityBJetTags = akVs4CalobTagger.JetProbabilityBJetTags
akVs4CaloSecondaryVertexTagInfos = akVs4CalobTagger.SecondaryVertexTagInfos
akVs4CaloSimpleSecondaryVertexHighEffBJetTags = akVs4CalobTagger.SimpleSecondaryVertexHighEffBJetTags
akVs4CaloSimpleSecondaryVertexHighPurBJetTags = akVs4CalobTagger.SimpleSecondaryVertexHighPurBJetTags
akVs4CaloCombinedSecondaryVertexBJetTags = akVs4CalobTagger.CombinedSecondaryVertexBJetTags
akVs4CaloCombinedSecondaryVertexV2BJetTags = akVs4CalobTagger.CombinedSecondaryVertexV2BJetTags
akVs4CaloSecondaryVertexNegativeTagInfos = akVs4CalobTagger.SecondaryVertexNegativeTagInfos
akVs4CaloNegativeSimpleSecondaryVertexHighEffBJetTags = akVs4CalobTagger.NegativeSimpleSecondaryVertexHighEffBJetTags
akVs4CaloNegativeSimpleSecondaryVertexHighPurBJetTags = akVs4CalobTagger.NegativeSimpleSecondaryVertexHighPurBJetTags
akVs4CaloNegativeCombinedSecondaryVertexBJetTags = akVs4CalobTagger.NegativeCombinedSecondaryVertexBJetTags
akVs4CaloPositiveCombinedSecondaryVertexBJetTags = akVs4CalobTagger.PositiveCombinedSecondaryVertexBJetTags
akVs4CaloNegativeCombinedSecondaryVertexV2BJetTags = akVs4CalobTagger.NegativeCombinedSecondaryVertexV2BJetTags
akVs4CaloPositiveCombinedSecondaryVertexV2BJetTags = akVs4CalobTagger.PositiveCombinedSecondaryVertexV2BJetTags
akVs4CaloSoftPFMuonsTagInfos = akVs4CalobTagger.SoftPFMuonsTagInfos
akVs4CaloSoftPFMuonsTagInfos.primaryVertex = cms.InputTag("offlinePrimaryVertices")
akVs4CaloSoftPFMuonBJetTags = akVs4CalobTagger.SoftPFMuonBJetTags
akVs4CaloSoftPFMuonByIP3dBJetTags = akVs4CalobTagger.SoftPFMuonByIP3dBJetTags
akVs4CaloSoftPFMuonByPtBJetTags = akVs4CalobTagger.SoftPFMuonByPtBJetTags
akVs4CaloNegativeSoftPFMuonByPtBJetTags = akVs4CalobTagger.NegativeSoftPFMuonByPtBJetTags
akVs4CaloPositiveSoftPFMuonByPtBJetTags = akVs4CalobTagger.PositiveSoftPFMuonByPtBJetTags
akVs4CaloPatJetFlavourIdLegacy = cms.Sequence(akVs4CaloPatJetPartonAssociationLegacy*akVs4CaloPatJetFlavourAssociationLegacy)
#Not working with our PU sub, but keep it here for reference
#akVs4CaloPatJetFlavourAssociation = akVs4CalobTagger.PatJetFlavourAssociation
#akVs4CaloPatJetFlavourId = cms.Sequence(akVs4CaloPatJetPartons*akVs4CaloPatJetFlavourAssociation)
akVs4CaloJetBtaggingIP = cms.Sequence(akVs4CaloImpactParameterTagInfos *
(akVs4CaloTrackCountingHighEffBJetTags +
akVs4CaloTrackCountingHighPurBJetTags +
akVs4CaloJetProbabilityBJetTags +
akVs4CaloJetBProbabilityBJetTags
)
)
akVs4CaloJetBtaggingSV = cms.Sequence(akVs4CaloImpactParameterTagInfos
*
akVs4CaloSecondaryVertexTagInfos
* (akVs4CaloSimpleSecondaryVertexHighEffBJetTags+
akVs4CaloSimpleSecondaryVertexHighPurBJetTags+
akVs4CaloCombinedSecondaryVertexBJetTags+
akVs4CaloCombinedSecondaryVertexV2BJetTags
)
)
akVs4CaloJetBtaggingNegSV = cms.Sequence(akVs4CaloImpactParameterTagInfos
*
akVs4CaloSecondaryVertexNegativeTagInfos
* (akVs4CaloNegativeSimpleSecondaryVertexHighEffBJetTags+
akVs4CaloNegativeSimpleSecondaryVertexHighPurBJetTags+
akVs4CaloNegativeCombinedSecondaryVertexBJetTags+
akVs4CaloPositiveCombinedSecondaryVertexBJetTags+
akVs4CaloNegativeCombinedSecondaryVertexV2BJetTags+
akVs4CaloPositiveCombinedSecondaryVertexV2BJetTags
)
)
akVs4CaloJetBtaggingMu = cms.Sequence(akVs4CaloSoftPFMuonsTagInfos * (akVs4CaloSoftPFMuonBJetTags
+
akVs4CaloSoftPFMuonByIP3dBJetTags
+
akVs4CaloSoftPFMuonByPtBJetTags
+
akVs4CaloNegativeSoftPFMuonByPtBJetTags
+
akVs4CaloPositiveSoftPFMuonByPtBJetTags
)
)
akVs4CaloJetBtagging = cms.Sequence(akVs4CaloJetBtaggingIP
*akVs4CaloJetBtaggingSV
*akVs4CaloJetBtaggingNegSV
# *akVs4CaloJetBtaggingMu
)
akVs4CalopatJetsWithBtagging = patJets.clone(jetSource = cms.InputTag("akVs4CaloJets"),
genJetMatch = cms.InputTag("akVs4Calomatch"),
genPartonMatch = cms.InputTag("akVs4Caloparton"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akVs4Calocorr")),
JetPartonMapSource = cms.InputTag("akVs4CaloPatJetFlavourAssociationLegacy"),
JetFlavourInfoSource = cms.InputTag("akVs4CaloPatJetFlavourAssociation"),
trackAssociationSource = cms.InputTag("akVs4CaloJetTracksAssociatorAtVertex"),
useLegacyJetMCFlavour = True,
discriminatorSources = cms.VInputTag(cms.InputTag("akVs4CaloSimpleSecondaryVertexHighEffBJetTags"),
cms.InputTag("akVs4CaloSimpleSecondaryVertexHighPurBJetTags"),
cms.InputTag("akVs4CaloCombinedSecondaryVertexBJetTags"),
cms.InputTag("akVs4CaloCombinedSecondaryVertexV2BJetTags"),
cms.InputTag("akVs4CaloJetBProbabilityBJetTags"),
cms.InputTag("akVs4CaloJetProbabilityBJetTags"),
#cms.InputTag("akVs4CaloSoftPFMuonByPtBJetTags"),
#cms.InputTag("akVs4CaloSoftPFMuonByIP3dBJetTags"),
cms.InputTag("akVs4CaloTrackCountingHighEffBJetTags"),
cms.InputTag("akVs4CaloTrackCountingHighPurBJetTags"),
),
jetIDMap = cms.InputTag("akVs4CaloJetID"),
addBTagInfo = True,
addTagInfos = True,
addDiscriminators = True,
addAssociatedTracks = True,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = True,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
# embedCaloTowers = False,
# embedPFCandidates = True
)
akVs4CaloNjettiness = Njettiness.clone(
src = cms.InputTag("akVs4CaloJets"),
R0 = cms.double( 0.4)
)
akVs4CalopatJetsWithBtagging.userData.userFloats.src += ['akVs4CaloNjettiness:tau1','akVs4CaloNjettiness:tau2','akVs4CaloNjettiness:tau3']
akVs4CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akVs4CalopatJetsWithBtagging"),
genjetTag = 'ak4HiGenJets',
rParam = 0.4,
matchJets = cms.untracked.bool(False),
matchTag = 'patJetsWithBtagging',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("hiGeneralTracks"),
fillGenJets = True,
isMC = True,
doSubEvent = True,
useHepMC = cms.untracked.bool(False),
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator"),
doLifeTimeTagging = cms.untracked.bool(True),
doLifeTimeTaggingExtras = cms.untracked.bool(False),
bTagJetName = cms.untracked.string("akVs4Calo"),
jetName = cms.untracked.string("akVs4Calo"),
genPtMin = cms.untracked.double(5),
hltTrgResults = cms.untracked.string('TriggerResults::'+'HISIGNAL'),
doTower = cms.untracked.bool(True),
doSubJets = cms.untracked.bool(False),
doGenSubJets = cms.untracked.bool(False),
subjetGenTag = cms.untracked.InputTag("ak4GenJets"),
doGenTaus = True
)
akVs4CaloJetSequence_mc = cms.Sequence(
#akVs4Caloclean
#*
akVs4Calomatch
#*
#akVs4CalomatchGroomed
*
akVs4Caloparton
*
akVs4Calocorr
*
#akVs4CaloJetID
#*
akVs4CaloPatJetFlavourIdLegacy
#*
#akVs4CaloPatJetFlavourId # Use legacy algo till PU implemented
*
akVs4CaloJetTracksAssociatorAtVertex
*
akVs4CaloJetBtagging
*
akVs4CaloNjettiness
*
akVs4CalopatJetsWithBtagging
*
akVs4CaloJetAnalyzer
)
akVs4CaloJetSequence_data = cms.Sequence(akVs4Calocorr
*
#akVs4CaloJetID
#*
akVs4CaloJetTracksAssociatorAtVertex
*
akVs4CaloJetBtagging
*
akVs4CaloNjettiness
*
akVs4CalopatJetsWithBtagging
*
akVs4CaloJetAnalyzer
)
akVs4CaloJetSequence_jec = cms.Sequence(akVs4CaloJetSequence_mc)
akVs4CaloJetSequence_mb = cms.Sequence(akVs4CaloJetSequence_mc)
akVs4CaloJetSequence = cms.Sequence(akVs4CaloJetSequence_jec)
akVs4CaloJetAnalyzer.genPtMin = cms.untracked.double(1)
akVs4CaloJetAnalyzer.jetPtMin = cms.double(1)
| [
"rchudasa@cern.ch"
] | rchudasa@cern.ch |
a56cc49b7af3847ff824b21c622bd6cfb1a5aba0 | 9c6c92f1df99b1cd996b99defda2a68b8f672215 | /detection.py | 28322274600d4f5cd2ada7e0072d405322d10835 | [] | no_license | oanders/Object_detection | 98d27630400bc174dc693afd20d11d27014beca4 | 328d0f2c3004e48c4b3289bb81ea4a5a2076ef1b | refs/heads/master | 2021-01-10T05:50:29.449343 | 2016-04-10T19:14:49 | 2016-04-10T19:14:49 | 54,318,149 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,865 | py | import cv2
import numpy as np
from matplotlib import pyplot as plot
import os
#from os import listdir, makedirs
#from os.path import isfile, join, isdir
import copy
from class_sift import Sift
from class_akaze import AKaze
from class_orb import ORB
from class_detector import Detector
MIN_MATCH_COUNT = 10
def main():
choice = int(raw_input("To run a single tests, press (1). To run all test, press (2). : "))
if choice == 1:
m1()
elif choice == 2:
m2()
else:
main()
#Main method, it calls other functions and
#tests different detection algortithms on images
def m1():
#Ask wich object we are running tests on
folder = choose_folder()
test = choose_test(folder)
#Path for a result directory to be created
directory = 'test_results_kaze_2/' + folder + '/' + test #Quick test, remove 2 later
#Path for Training image
path = 'Images/' + folder + '/' + test
img1 = path + '/' + 'tr.jpg'
print('using training image: ' + img1)
tr_img, tr_grey = load__greyScale(img1)
#Loop through test images
nr = read_nr_images(path)
i = 1
while i < nr:
img2 = path + '/' + 't' + str(i) + '.jpg'
print('using test image: ' + img2)
test_img, test_grey = load__greyScale(img2)
#Run algorithms
res_sift_img, good_matches_sift, time_sift, res_kaze_img, good_matches_kaze, time_kaze, res_akaze_img, good_matches_akaze, time_akaze, res_orb_img, good_matches_orb, time_orb = run_test_algorithms(tr_grey, test_grey, tr_img, test_img)
#Table containing the number of matches for each algorithm
table_img = create_table(res_orb_img, good_matches_sift, time_sift, good_matches_akaze, time_akaze, good_matches_orb, time_orb)
#Draw plots for the resulting images
draw_plots(res_sift_img, res_kaze_img, res_akaze_img, res_orb_img, table_img, i, directory, folder, test)
#go to next image in the folder
i = i+1
#Runs all tests
def m2():
folders = read_folders('Images')
nr_folders = len(folders)
curr_folder = 0
for folder in folders:
tests = read_folders('Images/' + folder)
nr_tests = len(tests)
curr_test = 1
curr_folder = curr_folder + 1
for test in tests:
print('----Working on object ' + str(curr_folder) + ' out of ' + str(nr_folders) + ' and test ' + str(curr_test) + ' out of ' + str(nr_tests) + '----')
curr_test = curr_test + 1
#Path for a result directory to be created
directory = 'test_results_kaze_2/' + folder + '/' + test #Quick test, remove 2 later
#Path for Training image
path = 'Images/' + folder + '/' + test
img1 = path + '/' + 'tr.jpg'
print('using training image: ' + img1)
tr_img, tr_grey = load__greyScale(img1)
#Loop through test images
nr = read_nr_images(path)
i = 1
while i < nr:
img2 = path + '/' + 't' + str(i) + '.jpg'
print('using test image: ' + img2)
test_img, test_grey = load__greyScale(img2)
#Run algorithms
res_sift_img, good_matches_sift, time_sift, res_kaze_img, good_matches_kaze, time_kaze, res_akaze_img, good_matches_akaze, time_akaze, res_orb_img, good_matches_orb, time_orb = run_test_algorithms(tr_grey, test_grey, tr_img, test_img)
#Table containing the number of matches for each algorithm
table_img = create_table(res_orb_img, good_matches_sift, time_sift, good_matches_akaze, time_akaze, good_matches_orb, time_orb)
#Draw plots for the resulting images
draw_plots(res_sift_img, res_kaze_img, res_akaze_img, res_orb_img, table_img, i, directory, folder, test)
#go to next image in the folder
i = i+1
#Method that goes to a desired folder
def choose_folder():
print('\nAvailable objects:\n')
folders = read_folders('Images')
i = 1
while i <= len(folders):
print('(' + str(i) + '): ' + folders[i-1])
i = i+1
choice = int(raw_input('\nChoose a folder nr: '))
folder = folders[choice-1]
return folder
#Read a folder containing images and return a list of urls
def read_folders(path):
#List of names of all training images
names = [f for f in os.listdir(path)]
return names
#choses a folder with one test
def choose_test(folder):
print('\nAvailable tests:\n')
tests = read_folders('Images/' + folder)
i = 1
for test in tests:
print('(' + str(i) + '): ' + test)
i = i+1
chosen_test = int(raw_input('\nChoose test nr: '))
test = tests[chosen_test-1]
return test
#Returns the number of images of the given path/folder
def read_nr_images(path):
images = [im for im in os.listdir(path) if os.path.isfile(os.path.join(path, im))]
return len(images)
#Read an image and return its grey picture
def load__greyScale(image):
print('Loading: ' + image)
img = cv2.imread(image)
h, w, d = img.shape
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, grey
#Runs all 3 algorithms on the same object and test.
#Returns 1 image for the result of each algorithm tested.
def run_test_algorithms(tr_grey, test_grey, tr_img, test_img):
#Create detectors
sift_algo = cv2.xfeatures2d.SIFT_create()
kaze_algo = cv2.KAZE_create()
akaze_algo = cv2.AKAZE_create()
orb_algo = cv2.ORB_create()
sift = Detector(sift_algo, 'N')
kaze = Detector(kaze_algo, 'N')
akaze = Detector(akaze_algo, 'N')
orb = Detector(orb_algo, 'NORM_HAMMING')
#Call sift
kpAS, descAS, kpBS, descBS, good_matches_sift, time_sift = sift.match(tr_grey, test_grey)
#Call Kaze
kpAK, descAK, kpBK, descBK, good_matches_kaze, time_kaze = kaze.match(tr_grey, test_grey)
#Call Akaze class
kpAAK, descAAK, kpBAK, descBAK, good_matches_akaze, time_akaze = akaze.match(tr_grey, test_grey)
#Call ORB class
kpAorb, descAorb, kpBorb, descBorb, good_matches_orb, time_orb = orb.match(tr_grey, test_grey)
#Mask for all three algorithms
maskS, dtsS = location_extraction(kpAS, kpBS, good_matches_sift,tr_img)
maskK, dtsK = location_extraction(kpAK, kpBK, good_matches_kaze,tr_img)
maskAK, dtsAK = location_extraction(kpAAK, kpBAK, good_matches_akaze,tr_img)
maskorb, dtsorb = location_extraction(kpAorb, kpBorb, good_matches_orb,tr_img)
#Copy of image so that lines from first method do not last to second.
tmp_img1 = copy.copy(test_img)
tmp_img2 = copy.copy(test_img)
tmp_img3 = copy.copy(test_img)
res_sift_img = create_results(tr_img, test_img, kpAS, kpBS, dtsS,
maskS, good_matches_sift)
res_kaze_img = create_results(tr_img, tmp_img1, kpAK, kpBK, dtsK,
maskK, good_matches_kaze)
res_orb_img = create_results(tr_img, tmp_img2, kpAorb, kpBorb, dtsorb,
maskorb, good_matches_orb)
res_akaze_img = create_results(tr_img, tmp_img3, kpAAK, kpBAK, dtsAK,
maskAK, good_matches_akaze)
return res_sift_img, good_matches_sift, time_sift, res_kaze_img, good_matches_kaze, time_kaze, res_akaze_img, good_matches_akaze, time_akaze, res_orb_img, good_matches_orb, time_orb
#Takes keypoint descriptor and extracts its location
def location_extraction(kpA, kpB, good_matches, tr_img):
if len(good_matches) > MIN_MATCH_COUNT:
src_pts = np.float32([kpA[m.queryIdx].pt for m in good_matches]).reshape(-1,1,2)
dst_pts = np.float32([kpB[m.trainIdx].pt for m in good_matches]).reshape(-1,1,2)
M,mask = cv2.findHomography(src_pts,dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h,w,d = tr_img.shape
pts = np.float32( [ [0,0], [0,h-1], [w-1,h-1],[w-1,0] ] ).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
return matchesMask, dst
else:
print("Not enough matches")
matchesMask = None
return matchesMask, None
#Create a result image showing the detected matches if the algorithm
#successfully found the object. Otherwise it presents an image with
#all the detected keypoints
def create_results(tr_img, tmp_img, kpA, kpB, dst, matchesMask, good_matches):
if dst != None:
tmp_img = cv2.polylines(tmp_img,[np.int32(dst)],True, 255,3,cv2.LINE_AA)
draw_params = dict(matchColor = (0, 255, 0),
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
res_img = cv2.drawMatches(tr_img,kpA,tmp_img,kpB,good_matches,None,**draw_params)
return res_img
else:
#Draw keypoints that were detected for each picture
res_img1 = cv2.drawKeypoints(tr_img, kpA, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
res_img2 = cv2.drawKeypoints(tmp_img, kpB, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#Get size of both pictures
h1, w1 , d1 = res_img1.shape
h2, w2, d2 = res_img2.shape
#create an empty array with the size to hold both pictures
res_img = np.zeros((max(h1,h2), w1+w2, d1), np.uint8)
res_img[:h1, :w1] = res_img1
res_img[:h2, w1:w1+w2] = res_img2
return res_img
#Creates a table with the number of matches that each algortihm found
#for the object.
def create_table(res_orb_img, good_matches_sift, time_sift, good_matches_akaze, time_akaze, good_matches_orb, time_orb):
h, w, d = res_orb_img.shape
table_img = np.zeros((h,w,d), np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
nr_matches_sift = 'Time of detection SIFT: ' + str(time_sift)
cv2.putText(table_img, nr_matches_sift, (100, 100), font, 1, (255,255,255), 2, cv2.LINE_AA)
nr_matches_akaze = 'Time of detection KAZE: ' + str(time_akaze)
cv2.putText(table_img, nr_matches_akaze, (100, 200), font, 1, (255,255,255), 2, cv2.LINE_AA)
nr_matches_orb = 'Time of detection ORB: ' + str(time_orb)
cv2.putText(table_img, nr_matches_orb, (100, 300), font, 1, (255,255,255), 2, cv2.LINE_AA)
return table_img
def draw_plots(sift, kaze, akaze, orb, table, index, directory, folder, test):
plot.subplot(221), plot.imshow(sift), plot.title('Sift')
plot.subplot(222), plot.imshow(kaze), plot.title('KAZE')
plot.subplot(223), plot.imshow(akaze), plot.title('AKAZE')
plot.subplot(224), plot.imshow(orb), plot.title('ORB')
if not os.path.isdir(directory):
os.makedirs(directory)
number = index
fig_name = directory + '/' + folder+'_'+test+'_'+str(index) + '.png'
plot.savefig(fig_name, format ='png', dpi = 600)
main()
| [
"oanders@kth.se"
] | oanders@kth.se |
73dde30ee3e5e9b336b4af24f9c38c43d0e0cf60 | a5698f82064aade6af0f1da21f504a9ef8c9ac6e | /huaweicloud-sdk-cce/huaweicloudsdkcce/v3/region/cce_region.py | 8075aff2ddabc7a62cba30087f4176a99207fa16 | [
"Apache-2.0"
] | permissive | qizhidong/huaweicloud-sdk-python-v3 | 82a2046fbb7d62810984399abb2ca72b3b47fac6 | 6cdcf1da8b098427e58fc3335a387c14df7776d0 | refs/heads/master | 2023-04-06T02:58:15.175373 | 2021-03-30T10:47:29 | 2021-03-30T10:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | py | # coding: utf-8
import types
from huaweicloudsdkcore.region.region import Region
class CceRegion:
def __init__(self):
pass
CN_NORTH_1 = Region(id="cn-north-1", endpoint="https://cce.cn-north-1.myhuaweicloud.com")
CN_NORTH_4 = Region(id="cn-north-4", endpoint="https://cce.cn-north-4.myhuaweicloud.com")
CN_SOUTH_1 = Region(id="cn-south-1", endpoint="https://cce.cn-south-1.myhuaweicloud.com")
CN_EAST_2 = Region(id="cn-east-2", endpoint="https://cce.cn-east-2.myhuaweicloud.com")
CN_EAST_3 = Region(id="cn-east-3", endpoint="https://cce.cn-east-3.myhuaweicloud.com")
CN_SOUTHWEST_2 = Region(id="cn-southwest-2", endpoint="https://cce.cn-southwest-2.myhuaweicloud.com")
AP_SOUTHEAST_1 = Region(id="ap-southeast-1", endpoint="https://cce.ap-southeast-1.myhuaweicloud.com")
AP_SOUTHEAST_2 = Region(id="ap-southeast-2", endpoint="https://cce.ap-southeast-2.myhuaweicloud.com")
AP_SOUTHEAST_3 = Region(id="ap-southeast-3", endpoint="https://cce.ap-southeast-3.myhuaweicloud.com")
AF_SOUTH_1 = Region(id="af-south-1", endpoint="https://cce.af-south-1.myhuaweicloud.com")
static_fields = types.MappingProxyType({
"cn-north-1": CN_NORTH_1,
"cn-north-4": CN_NORTH_4,
"cn-south-1": CN_SOUTH_1,
"cn-east-2": CN_EAST_2,
"cn-east-3": CN_EAST_3,
"cn-southwest-2": CN_SOUTHWEST_2,
"ap-southeast-1": AP_SOUTHEAST_1,
"ap-southeast-2": AP_SOUTHEAST_2,
"ap-southeast-3": AP_SOUTHEAST_3,
"af-south-1": AF_SOUTH_1,
})
@staticmethod
def value_of(region_id, static_fields=static_fields):
if region_id is None or len(region_id) == 0:
raise KeyError("Unexpected empty parameter: region_id.")
if not static_fields.get(region_id):
raise KeyError("Unexpected region_id: " + region_id)
return static_fields.get(region_id)
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
e911275049761c3d34e3f24d5c1a7c806c6d85e4 | a70ff84caceb693723846542c19d300bea321deb | /coins_test/settings.py | 53bc0e9900f1b3562edf8ec612055aea94864f58 | [] | no_license | Greyvend/coins_test | 10420bffa7d4510de20bca9fe7b71bd7091c01b1 | a259907a160d482bc0217aeecbb97c1665db62dd | refs/heads/master | 2021-04-12T10:13:33.417998 | 2016-08-14T16:42:13 | 2016-08-14T16:42:13 | 65,676,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,245 | py | """
Django settings for coins_test project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0)8vo9&t7i_=0ori8xz#*nfwohp#vusboee1h6pe_+@@5utgnp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY_APPS = [
'rest_framework',
]
LOCAL_APPS = [
'payments'
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'coins_test.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'coins_test.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"svmosin@gmail.com"
] | svmosin@gmail.com |
9175323c790049f9662192795528961fbfa3ae6f | 0256a449bd686479aa91905a1763973548d9923c | /two_sum.py | 35b7d25d54872dd0bc05ae114b9aa529adf58ac5 | [] | no_license | zjgwhcn/StartCodingNow | 2aa776865788bec1c8d11e14bb4ef0c97280a68d | b235eb280cc11082c680563dc2a261dfc2f2cdce | refs/heads/master | 2021-06-13T03:47:31.435068 | 2017-02-19T13:25:34 | 2017-02-19T13:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | #!/usr/bin/env python
# -*-coding:utf-8 -*-
# Time:16/9/2016
# 1
class Solution(object):
def twoSum(self, nums, target):
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if target == nums[i] + nums[j]:
return [i,j]
#2
'''
num = [1,5,8,2,7]
target = 13
dict = {}
print dict
for i in xrange(len(num)):
x = num[i]
if target-x in dict:
print [dict[target-x], i]
dict[x] = i
print dict[x]
print dict
print dict[8]
'''
class Solution(object):
def twoSum(self, num, target):
dict = {}
for i in xrange(len(num)):
x = num[i]
if target-x in dict:
return (dict[target-x], i)
dict[x] = i
| [
"louchaooo@qq.com"
] | louchaooo@qq.com |
8418693b0b7f600bc206c9513a976a8685d46f52 | 7a7ed5656b3a162523ba0fd351dd551db99d5da8 | /x11/library/wayland/actions.py | 73d6d6bb7fcd4510e4e0b35f12090d0231dd9fe0 | [] | no_license | klaipedetis/PisiLinux | acd4953340ebf14533ea6798275b8780ad96303b | 3384e5dfa1acd68fa19a26a6fa1cf717136bc878 | refs/heads/master | 2021-01-24T22:59:30.055059 | 2013-11-08T21:43:39 | 2013-11-08T21:43:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
Libdir = "/usr/lib32" if get.buildTYPE() == "emul32" else "/usr/lib"
def setup():
autotools.autoreconf("-vif")
autotools.configure("--disable-documentation --disable-static")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
if get.buildTYPE() == "emul32":
return
pisitools.dodoc("COPYING", "TODO", "README") | [
"namso-01@hotmail.it"
] | namso-01@hotmail.it |
9eb758de63ec95f04758e32b359b987bccabd53c | 10a79a489ae800b25332c12ec829f99f0480c6cf | /floreria/settings.py | 94470c918c589eb73cf0e8a9ca339410226fcef1 | [] | no_license | taamfernandez/floreria | 2af0855ee413fbd617926c24a55d95a5894ee06e | 7f04b85a1dc4cf186dbe7a5f1e367f97116bd232 | refs/heads/master | 2020-09-26T04:13:23.938569 | 2019-12-18T16:42:41 | 2019-12-18T16:42:41 | 226,162,344 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,582 | py | """
Django settings for floreria project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '723_q8l5(pd=u@#jgw@_25cho=a)th0q2=(1k)$nami@v*9e_*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_FACEBOOK_KEY = '826344021127651'
SOCIAL_AUTH_FACEBOOK_SECRET = 'dd6e96795ccda4ab50a60a9d1c5723f7'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core.apps.CoreConfig',
'crispy_forms',
'rest_framework',
'social_django',
'pwa',
'fcm_django',
]
CRISPY_TEMPLATE_PACK = 'bootstrap4'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'floreria.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'floreria.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'es-mx'
TIME_ZONE = 'America/Santiago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
#URL mediante la cual accederan a la simagenes que ya estan subidas
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#la ruta en donde quedan almacenada las imagenes
AUTHENTICATION_BACKENDS = (
'social_core.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
PWA_SERVICE_WORKER_PATH = os.path.join(BASE_DIR, 'serviceworker.js')
FCM_DJANGO_SETTINGS = {
"APP_VERBOSE_NAME": "floreria",
# default: _('FCM Django')
"FCM_SERVER_KEY": "AIzaSyBYdNgB6IL05qJIOI7T7eQCfETWeaR8-5Q",
# true if you want to have only one active device per registered user at a time
# default: False
"ONE_DEVICE_PER_USER": False,
# devices to which notifications cannot be sent,
# are deleted upon receiving error response from FCM
# default: False
"DELETE_INACTIVE_DEVICES": True,
} | [
"ta.fernandeza@alumnos.duoc.cl"
] | ta.fernandeza@alumnos.duoc.cl |
107f09e9df2f036798d34862b440c2ebd70a7a7a | a9117d287019a6860693e8f6dbfac152f5e92a75 | /fe/lda_fe.py | 3467205482cca57aa79025515bb32b7db3f508ca | [] | no_license | backonhighway/kaggle_elo | 48a2e4eda9ef5025665564b22f99bfe2cf296dc6 | 636c1ecc64d70c9f1375653687679b745b8bf6db | refs/heads/master | 2020-04-29T01:26:23.269169 | 2019-02-28T15:29:39 | 2019-02-28T15:29:39 | 175,730,137 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,351 | py | import numpy as np
import pandas as pd
import gc
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from typing import List, Tuple
from multiprocessing.pool import Pool
from functools import partial
import itertools
from concurrent import futures
class GoldenLDA:
def __init__(self, timer, name=None):
self.timer = timer
self.width = 5
self.name = "lda"
def create_document_term_matrix(self, df, col2):
word_list = self.create_word_list(df, col2)
vectorizer = CountVectorizer()
return vectorizer.fit_transform(word_list)
def compute_latent_vectors(self, col2, df) -> np.ndarray:
document_term_matrix = self.create_document_term_matrix(df, col2)
transformer = LatentDirichletAllocation(n_components=5, learning_method="online", random_state=99)
return transformer.fit_transform(document_term_matrix)
def create_features(self, df, target_cols) -> pd.DataFrame:
target_cols = target_cols
col2s = []
latent_vectors = []
future_list = list()
with futures.ProcessPoolExecutor(max_workers=len(target_cols)) as executor:
for c in target_cols:
col2s.append(c)
future_list.append(executor.submit(self.compute_latent_vectors, c, df))
future_results = [f.result() for f in future_list]
for res in future_results:
latent_vectors.append(res.astype(np.float32))
self.timer.time("done lda ")
# gc.collect()
# with Pool(15) as p:
# for col1, col2, latent_vector in p.map(
# partial(self.compute_latent_vectors, train, test), column_pairs):
# col1s.append(col1)
# col2s.append(col2)
# latent_vectors.append(latent_vector.astype(np.float32))
gc.collect()
return self.get_feature(df, col2s, latent_vectors)
def get_feature(self, df: pd.DataFrame, cs2: List[str], vs: List[np.ndarray]) -> pd.DataFrame:
card_set = list(set(df["card_id"]))
features = np.zeros(shape=(len(card_set), len(cs2) * self.width), dtype=np.float32)
columns = list()
for i, (col2, latent_vector) in enumerate(zip(cs2, vs)):
offset = i * self.width
for j in range(self.width):
columns.append(self.name + '-' + col2 + '-' + str(j))
for j, val1 in enumerate(card_set):
features[j, offset:offset + self.width] = latent_vector[val1]
ret_df = pd.DataFrame(data=features, columns=columns)
ret_df["card_id"] = card_set
return ret_df
@staticmethod
def create_word_list(df: pd.DataFrame, col2: str) -> List[str]:
# col1_size = df["card_id"].max() + 1
# col2_list = [[] for _ in range(col1_size)]
# for val2 in df[col2]:
# col2_list[val2].append(val2+10) # 1-9 is a stop word
# return [' '.join(map(str, a_list)) for a_list in col2_list]
card_set = list(set(df["card_id"]))
col2_list = list()
for val1 in card_set:
_df = df[df["card_id"] == val1]
col2_list.append(list(_df[col2]+10)) # add 10 to avoid stop-word
return [' '.join(map(str, a_list)) for a_list in col2_list]
| [
"shota.okubo@dena.com"
] | shota.okubo@dena.com |
aeb31cb150012236b10aba55815c798a1a949273 | f559186ea67099b0a58a0e99c17aec291fd941e6 | /inscription/models/Contacts.py | ed32fa0c89f0980aaaca7c689f096372e10adb0b | [] | no_license | JairoDuarte/inscriptionLP | 611f17e9a03d1a0f25d862803d924622a95be501 | 2312d79b9f3f952691a7a529257e5f45175838e5 | refs/heads/master | 2020-05-25T18:16:02.494195 | 2017-09-09T10:52:10 | 2017-09-09T10:52:10 | 84,953,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
from .Candidat import Candidat
class Contacts(models.Model):
candidat = models.OneToOneField(
Candidat,
on_delete=models.CASCADE,
verbose_name=_('Candidatinfo')
)
email = models.EmailField(
_('Email'),
max_length=80,
unique=True)
portable_phone = models.CharField(
_('Telephone portable'),
max_length=20,
unique=True)
fixe_phone = models.CharField(
_('Telephone fixe'),
max_length=20,
blank=True)
adresse = models.CharField(
_('adresse de résidence'),
max_length=255)
ville = models.CharField(
_('Ville de résidence'),
max_length=100)
pays = models.CharField(
_('pays de résidence'),
max_length=100)
def __str__(self):
return self.adresse + self.ville + "-" + self.pays
class Meta:
verbose_name_plural = 'contacts'
db_table = "contacts"
| [
"alfredojairo17@hotmail.com"
] | alfredojairo17@hotmail.com |
e87625a78f32a96dadb585f31cd7212e2872e95d | 2cd2746c16e0435d57282cac463da4969dc098ac | /metricas.py | 16dcd91c8c1a80470f05b747cbfaf8f813e9a8d0 | [] | no_license | joseallones/Flex | 06a4a1bad454eab28e841dbfe027b2f0c7751e9b | 363a185d6359c05452bbb203781c14fd387066df | refs/heads/master | 2023-03-11T21:48:09.491721 | 2021-02-16T19:11:04 | 2021-02-16T19:11:04 | 294,165,187 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,968 | py |
import os
#Do traducido automáticamente mira canto é de wordnet e canto de mymemmory
termos = 0
num_total_traducidos_gl_wordnet = 0
num_total_traducidos_pt_wordnet = 0
num_total_traducidos_gl_mymemmory = 0
num_total_traducidos_pt_mymemmory = 0
def obtenInfoPaqueteDoCsv(path_file):
global termos
global num_total_traducidos_gl_wordnet
global num_total_traducidos_pt_wordnet
global num_total_traducidos_gl_mymemmory
global num_total_traducidos_pt_mymemmory
f = open(path_file, encoding='utf-8')
for line in f:
print("\nLine: " + line.strip())
if("ili\tlema" in line):
continue
termos += 1
data_line = line.rstrip().split('\t')
print("data_line: " + str(data_line))
if(data_line[2]!='[]'):
num_total_traducidos_pt_wordnet += 1
if (data_line[3] != '[]'):
num_total_traducidos_gl_wordnet += 1
if (data_line[4] != '[]'):
print("data_line: " + data_line[4])
num_total_traducidos_pt_mymemmory += 1
if (data_line[5] != '[]'):
num_total_traducidos_gl_mymemmory += 1
rutaDirectorio = "/home/jose/PycharmProjects/Flex/paquete/output/" #RUTA SALIDA
if(os.path.isdir(rutaDirectorio)):
for file in os.listdir(rutaDirectorio):
if(file.endswith("servizoweb.xlsx")):
print(file)
rutaFichero = os.path.join(rutaDirectorio, file)
infoPaquete = obtenInfoPaqueteDoCsv(rutaFichero)
print(termos)
print(num_total_traducidos_pt_wordnet)
print(num_total_traducidos_gl_wordnet)
print(num_total_traducidos_pt_mymemmory)
print(num_total_traducidos_gl_mymemmory)
print(num_total_traducidos_pt_wordnet + num_total_traducidos_pt_mymemmory)
print(num_total_traducidos_gl_wordnet + num_total_traducidos_gl_mymemmory)
print('\n\nTotal')
print("num_total_termos " + str(termos))
print("num_total_traducidos_pt_wordnet " + str(num_total_traducidos_pt_wordnet ) + "\t" + str(num_total_traducidos_pt_wordnet * 100 / termos))
print("num_total_traducidos_gl_wordnet " + str(num_total_traducidos_gl_wordnet) + "\t" + str(num_total_traducidos_gl_wordnet * 100 / termos))
print("num_total_traducidos_pt_mymemmory " + str(num_total_traducidos_pt_mymemmory) + "\t" + str(num_total_traducidos_pt_mymemmory * 100 / termos))
print("num_total_traducidos_gl_mymemmory " + str(num_total_traducidos_gl_mymemmory) + "\t" + str(num_total_traducidos_gl_mymemmory * 100 / termos))
print("num_total_traducidos_pt " + str(num_total_traducidos_pt_wordnet + num_total_traducidos_pt_mymemmory) + "\t" + str((num_total_traducidos_pt_wordnet + num_total_traducidos_pt_mymemmory) * 100 / termos))
print("num_total_traducidos_gl " + str(num_total_traducidos_gl_wordnet + num_total_traducidos_gl_mymemmory) + "\t" + str((num_total_traducidos_gl_wordnet + num_total_traducidos_gl_mymemmory) * 100 / termos))
| [
"joseallones87@gmail.com"
] | joseallones87@gmail.com |
ec6863f6fad89f0a79981b9ebe0b04003f60a4e1 | 38fa69b9334acd23a076372b340b8c1230265b05 | /Console.py | 5a713642b55f572d87502da149577781dccff197 | [
"Apache-2.0"
] | permissive | gauravssnl/IPViewer | c880a098e2300a95b63b8d73f6373d9491fed2ba | 3a04711aa3ba79a961e44e163a479e788de1d7bf | refs/heads/master | 2021-01-22T05:00:49.588000 | 2017-09-10T15:01:16 | 2017-09-10T15:01:16 | 81,607,532 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,955 | py |
#Console.py script for PyS60
import sys
import e32
import appuifw
ru = lambda text, : text.decode('utf-8', 'ignore')
class Console :
__module__ = __name__
def __init__(self, logger = False):
self.logger = logger
from e32 import Ao_lock as Ao_lock
from key_codes import EKeyEnter as EKeyEnter
self.input_wait_lock = Ao_lock()
self.input_stopped = False
self.control = self.text = appuifw.Text()
self.text.font = ('title', 16, None)
self.text.color = 0
self.savestderr = sys.stderr
self.savestdout = sys.stdout
self.savestdin = sys.stdin
sys.stderr = self
sys.stdout = self
sys.stdin = self
self.writebuf = []
self._doflush = self.clear()
self._flushgate = self.clear()
if self.logger :
def make_flusher(text, buf):
def doflush():
text.set_pos(text.len())
text.add(ru(''.join(buf)))
del buf[:]
return doflush
self._doflush = make_flusher(self.text, self.writebuf)
self._flushgate = e32.ao_callgate(self._doflush)
else :
self.logger = False
self.clear()
return None
def __del__(self):
sys.stderr = self.savestderr
sys.stdout = self.savestdout
sys.stdin = self.savestdin
self.control = self.text = None
return None
def stop_input(self):
self.input_stopped = True
self.input_wait_lock.signal()
def clear(self):
self.text.clear()
def write(self, obj):
self.writebuf.append(obj)
self.flush()
def writelines(self, list):
self.write(''.join(list))
def flush(self):
if len(self.writebuf) > 0 :
if e32.is_ui_thread() :
self._doflush()
else :
self._flushgate()
pass
def readline(self):
if not (e32.is_ui_thread()) :
raise IOError('Cannot call readline from non-UI thread')
pos = self.text.get_pos()
len = self.text.len()
save_exit_key_handler = appuifw.app.exit_key_handler
appuifw.app.exit_key_handler = self.stop_input
self.input_wait_lock.wait()
appuifw.app.exit_key_handler = save_exit_key_handler
if self.input_stopped :
self.text.add(u'\n')
self.input_stopped = False
raise EOFError
new_pos = self.text.get_pos()
new_len = self.text.len()
if (new_pos <= pos | (new_len - len) != (new_pos - pos)) :
new_pos = self.text.len()
self.text.set_pos(new_pos)
self.text.add(u'\n')
user_input = ''
else :
user_input = self.text.get(pos, ((new_pos - pos) - 1))
return user_input.encode('utf8')
| [
"noreply@github.com"
] | noreply@github.com |
8367d61559cdf696618b6d909051533b8def93b0 | 97899228dbe6c0811783f7c830212febfc54f4c3 | /algorithm_PS/BEAKJOON/String/1157.py | 68937ea53be20eecbe1ef6030f14cc57285ba17d | [] | no_license | ksy37667/Algorithm-study | f977abcd5c44582b71f78e589f4a8a174d35a8f0 | 51680e236cf6bba09a2e0824ec72536ee23bba31 | refs/heads/master | 2021-07-12T20:45:50.849105 | 2021-03-27T06:44:18 | 2021-03-27T06:44:18 | 241,117,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | string = input().upper()
li = []
for i in set(string):
li.append(string.count(i))
idx = [i for i, x in enumerate(li) if x == max(li)]
print(idx)
if len(idx) > 1:
print("?")
else:
print(list(set(string))[li.index(max(li))])
| [
"ksy37667@gmail.com"
] | ksy37667@gmail.com |
a88be55fbce8783a3e8e5780a1ad3fe6b790e992 | a4ab53aad0a6e1780f9eabd978c4d16f4822e38f | /Hexagon/Handlers/AcceptChallenge.py | ee4d575d8fc81f526b9c4c3e199ed1cc11f61d87 | [] | no_license | rywit/Hex | 8fc6af035bf37204cde8d2b8ede0b4590b1bf5ed | bdf43c53e491fcfa951013d10c29d9186877af9e | refs/heads/master | 2020-05-18T12:14:12.220213 | 2012-07-07T01:27:52 | 2012-07-07T01:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | from Handlers.BaseHandler import BaseHandler
from Views.GameView import GameView
class AcceptChallenge( BaseHandler ):
def get(self):
## Make sure the user is logged in
if not self.user:
self.redirect( "/login" )
return
## Pull off the game id from the request
gameid = self.request.get( "gameid" )
if not gameid:
self.render( "error.html", message = "Could not find game ID in request" )
return
## Grab the game with this ID
game = GameView( gameid = gameid )
## Update the status of the game to "ACTIVE"
game.update_status( "ACTIVE" )
## Go to home page
self.redirect( "/home" )
| [
"witko.ryan@gmail.com"
] | witko.ryan@gmail.com |
4f5c3e036dff53545e0841fdd9c6bd81bce99281 | 56e1844ca3e1c7bcc8f1d2a8c1465bf7120b3c08 | /Hypothesis_Candy.py | c8aa330e8e736dd7a36bae62d4fc8a82f052bd38 | [] | no_license | rl9703/Artificial-Intelligence | d95c32ffcd83a63cdc5b0461842dea6366c65838 | e39736ba6c33136b5287968df4612be6044770d6 | refs/heads/master | 2022-02-14T23:57:30.674789 | 2022-01-30T08:13:58 | 2022-01-30T08:13:58 | 208,346,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,287 | py | '''
Author: Rishab Lalwani
Title: Q1 Generate a data set of length 100 and
plot the cor- responding graphs for P(hi|d1,...,dN) and P(DN+1=lime|d1,...,dN)
'''
import random as rand
import matplotlib.pyplot as plt
def P(x):
'''
:param x: Hypothesis of candy bags
:return: Graph plot of all hypothesis
'''
post = [0.1, 0.2, 0.4, 0.2, 0.1]
hypothesis1 = []
hypothesis2 = []
hypothesis3 = []
hypothesis4 = []
hypothesis5 = []
alpha = 1
train = []
for hype in range(10):
new_list = []
train.append(alpha)
for i in range(len(x)):
cherry = x[i].count('Lime')
new_list.append((((alpha * cherry) / len(x[i])) * post[i]))
alpha = 1 / sum(new_list)
post = new_list
hypothesis1.append(new_list[0])
hypothesis2.append(new_list[1])
hypothesis3.append(new_list[2])
hypothesis4.append(new_list[3])
hypothesis5.append(new_list[4])
number_of_obs=list(range(1,11))
#Plotting
plt.plot(number_of_obs, hypothesis1)
plt.plot(number_of_obs, hypothesis2)
plt.plot(number_of_obs, hypothesis3)
plt.plot(number_of_obs, hypothesis4)
plt.plot(number_of_obs, hypothesis5)
plt.show()
pred(x, post)
def pred(x,post):
'''
:param x: Hypothesis of candy bags
:param post: P(h/D) probabilities
:return: predicted output along with graph plot
'''
pred_output= []
post=[0.1, 0.2, 0.4, 0.2, 0.1]
for j in range(10):
list = []
for i in range(len(x)):
lime = x[i].count("Lime")
len_lime=(lime / len(x[i]))
formula=(len_lime) * post[i]
list.append(formula)
pred_output.append(1-sum(list))
post = list
print(pred_output)
plt.plot([1,2,3,4,5,6,7,8,9,10], pred_output)
plt.show()
def main():
'''
:return: Create hypothesis and call probability function
'''
h1 = ['Cherry'] * 100
h2 = ['Cherry'] * 75 + ['Lime'] * 25
h3 = ['Cherry'] * 50 + ['Lime'] * 50
h4 = ['Cherry'] * 25 + ['Lime'] * 75
h5 = ['Lime'] * 100
rand.shuffle(h2)
rand.shuffle(h3)
rand.shuffle(h4)
hypothesis = [h1,h2,h3,h4,h5]
P(hypothesis)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
8581dd3dea171c93ad4e0fc231e5f70bdd96d430 | 0ca60d532c0a77c261f2b107c0e697e82af6000d | /svm/svmdemo1.py | 7cb5f27dd62519e6a8e909c08ad182c2ad32519c | [] | no_license | bnhalder/basic_ml | 6b3ad193622b0632fe449a83cc17a0a5211bdd5f | 91579c34adee090e75eb992f439fefd212741168 | refs/heads/master | 2020-03-10T01:35:09.090807 | 2018-04-11T15:29:41 | 2018-04-11T15:29:41 | 129,113,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,523 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 18 22:54:30 2017
@author: jabong
"""
import numpy as np
import pylab as pl
iris = np.loadtxt('../data/iris_proc.data', delimiter=',')
imax = np.concatenate((iris.max(axis=0)*np.ones((1,5)),iris.min(axis=0)*np.ones((1,5))),axis=0).max(axis=0)
target = -np.ones((np.shape(iris)[0],3),dtype=float);
indices = np.where(iris[:,4]==0)
target[indices,0] = 1.
indices = np.where(iris[:,4]==1)
target[indices,1] = 1.
indices = np.where(iris[:,4]==2)
target[indices,2] = 1.
train = iris[::2,0:4]
traint = target[::2]
test = iris[1::2,0:4]
testt = target[1::2]
output = np.zeros((np.shape(test)[0],3))
import svm
reload(svm)
# Learn the full data
#svm0 = svm.svm(kernel='linear')
#svm0 = svm.svm(kernel='poly',C=0.1,degree=3)
svm0 = svm.svm(kernel='rbf')
svm0.train_svm(train,np.reshape(traint[:,0],(np.shape(train[:,:2])[0],1)))
output[:,0] = svm0.classifier(test,soft=True).T
#svm1 = svm.svm(kernel='linear')
#svm1 = svm.svm(kernel='poly',C=0.1,degree=3)
svm1 = svm.svm(kernel='rbf')
svm1.train_svm(train,np.reshape(traint[:,1],(np.shape(train[:,:2])[0],1)))
output[:,1] = svm1.classifier(test,soft=True).T
#svm2 = svm.svm(kernel='linear')
#svm2 = svm.svm(kernel='poly',C=0.1,degree=3)
svm2 = svm.svm(kernel='rbf')
svm2.train_svm(train,np.reshape(traint[:,2],(np.shape(train[:,:2])[0],1)))
output[:,2] = svm2.classifier(test,soft=True).T
bestclass = np.argmax(output,axis=1)
print bestclass
print iris[1::2,4]
err = np.where(bestclass!=iris[1::2,4])[0]
print err
print float(np.shape(testt)[0] - len(err))/ (np.shape(testt)[0]) , "test accuracy"
# Plot 2D version is below
#svm0 = svm.svm(kernel='linear')
svm0 = svm.svm(kernel='poly',degree=3)
#svm0 = svm.svm(kernel='rbf')
svm0.train_svm(train[:,:2],np.reshape(traint[:,0],(np.shape(train[:,:2])[0],1)))
output[:,0] = svm0.classifier(test[:,:2],soft=True).T
#svm1 = svm.svm(kernel='linear')
svm1 = svm.svm(kernel='poly',degree=3)
#svm1 = svm.svm(kernel='rbf')
svm1.train_svm(train[:,:2],np.reshape(traint[:,1],(np.shape(train[:,:2])[0],1)))
output[:,1] = svm1.classifier(test[:,:2],soft=True).T
#svm2 = svm.svm(kernel='linear')
svm2 = svm.svm(kernel='poly',degree=3)
#svm2 = svm.svm(kernel='rbf')
svm2.train_svm(train[:,:2],np.reshape(traint[:,2],(np.shape(train[:,:2])[0],1)))
output[:,2] = svm2.classifier(test[:,:2],soft=True).T
# Make a decision about which class
# Pick the one with the largest margin
bestclass = np.argmax(output,axis=1)
print bestclass
print iris[1::2,4]
err = np.where(bestclass!=iris[1::2,4])[0]
print err
print float(len(err))/ (np.shape(testt)[0]) , "test accuracy"
# Make a plot
pl.figure()
step=0.01
f0,f1 = np.meshgrid(np.arange(np.min(train[:,0])-0.5, np.max(train[:,0])+0.5, step), np.arange(np.min(train[:,1])-0.5, np.max(train[:,1])+0.5, step))
out = np.zeros((np.shape(f0.ravel())[0],3))
out[:,0] = svm0.classifier(np.c_[np.ravel(f0), np.ravel(f1)],soft=True).T
out[:,1] = svm1.classifier(np.c_[np.ravel(f0), np.ravel(f1)],soft=True).T
out[:,2]= svm2.classifier(np.c_[np.ravel(f0), np.ravel(f1)],soft=True).T
out = np.argmax(out[:,:3],axis=1)
print out
out = out.reshape(f0.shape)
pl.contourf(f0, f1, out, cmap=pl.cm.Paired)
#pl.axis('off')
# Plot also the training points
#traint = np.where(traint==-1,0,1)
pl.plot(train[svm0.sv,0],train[svm0.sv,1],'o',markerfacecolor=None,markeredgecolor='r',markeredgewidth=3)
pl.scatter(train[:, 0], train[:, 1], c=iris[::2,4], cmap=pl.cm.Paired)
#pl.plot(train[:, 0], train[:, 1],'o', c=traint, cmap=pl.cm.Paired)
| [
"biswanath.halder@jabong.com"
] | biswanath.halder@jabong.com |
1c7c615a5d5f6ffd5521a19f65c1f52ac3bc6411 | ee6b18145acdd00821b4a7578882ab7bac07b237 | /lab10/main.py | d396d90a7bc35119a4c779f98b3a6ca2dd319b45 | [] | no_license | Szymek13/Wizualizacja_danych | ec5238f074c686479fe1f318b2a87ddf7afcd7a8 | 01051f37cc863eb82dc6ee1a0364a79da217e1f7 | refs/heads/main | 2023-05-28T03:03:55.121856 | 2021-05-30T14:22:20 | 2021-05-30T14:22:20 | 343,164,922 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Zadanie 1
x = np.arange(20, 40, 1)
y = (1/x)
plt.plot(x, y, 'b-', label='f(x)')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.axis([20, 40, 0.02, 0.05])
plt.title('Wykres funkcji f(x)')
plt.show()
# Zadanie 2
x = np.arange(20, 40, 1)
y = (1/x)
plt.plot(x, y, 'bo--', label='f(x)')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.axis([20, 40, 0.02, 0.05])
plt.title('Wykres funkcji f(x)')
plt.show()
# Zadanie 3
x1 = np.arange(0, 45, 0.1)
x2 = np.arange(0, 45, 0.1)
y1 = np.sin(x1)
y2 = np.cos(x2)
plt.plot(x1, y1, '-', label='sin(x)')
plt.plot(x2, y2, '--', label='cos(x)')
plt.axis([0, 45, -1, 1])
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(loc='lower right')
plt.show()
# Zadanie 4
x1 = np.arange(0, 45, 0.1)
x2 = np.arange(0, 45, 0.1)
y1 = np.sin(x1+np.pi)
y2 = np.sin(x2)+2
plt.plot(x1, y1, '-', label='sin(x)')
plt.plot(x2, y2, '--', label='sin(x)')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend(loc='lower right')
plt.show()
# Zadanie 5
pliczek = pd.read_csv('iris.data', header=None, sep=',', decimal='.')
wykres = {'c': np.random.randn(150),
'x': pliczek[0],
'y': pliczek[1],
's': abs(pliczek[0] - pliczek[1])}
plt.scatter('x', 'y', c='c', s='s', data=wykres)
plt.xlabel('sepal length')
plt.ylabel('sepal width')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
85d65df06168b2114299f77d388cbe712b4b7085 | 458c487a30df1678e6d22ffdb2ea426238197c88 | /ubcsp/add_gc.py | e6be0db995f628f5c02f95391bfd50d28fde12ec | [
"MIT"
] | permissive | pluck992/ubc | 04062d2cdeef8d983da1bfaa0ff640a3b25c72c2 | 54fc89ae6141775321d5ea770e973ff09be51c0c | refs/heads/master | 2023-02-19T05:01:42.401329 | 2021-01-21T06:32:15 | 2021-01-21T06:32:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | import pp
from gdslib import plot_circuit
from simphony.library import siepic
from simphony.netlist import Subcircuit
def add_gc_te(circuit, gc=siepic.ebeam_gc_te1550):
""" add input and output gratings
Args:
circuit: needs to have `input` and `output` pins
gc: grating coupler
"""
c = Subcircuit(f"{circuit}_gc")
gc = pp.call_if_func(gc)
c.add([(gc, "gci"), (gc, "gco"), (circuit, "circuit")])
c.connect_many(
[("gci", "n1", "circuit", "input"), ("gco", "n1", "circuit", "output")]
)
# c.elements["circuit"].pins["input"] = "input_circuit"
# c.elements["circuit"].pins["output"] = "output_circuit"
c.elements["gci"].pins["n2"] = "input"
c.elements["gco"].pins["n2"] = "output"
return c
if __name__ == "__main__":
import matplotlib.pyplot as plt
from ubc.cm.mzi import mzi
c1 = mzi()
c2 = add_gc_te(c1)
plot_circuit(c2)
plt.show()
| [
"j"
] | j |
88c6be80cd7eb38f457df5fa97ae76cdc8bb9cce | 2f3f48b87266a62a17d54c090f4b40f11d778f26 | /lib/utils/misc.py | 44179919e604f85ff607872d718a6e66a15eaf07 | [
"MIT"
] | permissive | DarioRugg/Self-Supervised_Pearson_Search | b9f78a2e61946f8709399bfbdeaabe1f303ff960 | 731ff0888b336076ec42c26808417809fb78e3cf | refs/heads/main | 2023-07-15T22:05:37.207853 | 2021-07-11T10:15:16 | 2021-07-11T10:15:16 | 379,900,903 | 0 | 0 | MIT | 2021-06-24T18:09:00 | 2021-06-24T11:24:10 | Python | UTF-8 | Python | false | false | 5,164 | py | # -*- coding: utf-8 -*-
# Reference:
# https://github.com/pytorch/vision/blob/fe3b4c8f2c/references/detection/utils.py
import argparse
import sys
import torch
import huepy as hue
from .serialization import read_json, write_json
class Nestedspace(argparse.Namespace):
def __setattr__(self, name, value):
if '.' in name:
group, name = name.split('.', 1)
ns = getattr(self, group, Nestedspace())
setattr(ns, name, value)
self.__dict__[group] = ns
else:
self.__dict__[name] = value
def __getattr__(self, name):
if '.' in name:
group, name = name.split('.', 1)
try:
ns = self.__dict__[group]
except KeyError:
raise AttributeError
return getattr(ns, name)
else:
raise AttributeError
def to_dict(self, args=None, prefix=None):
out = {}
args = self if args is None else args
for k, v in args.__dict__.items():
if isinstance(v, Nestedspace):
out.update(self.to_dict(v, prefix=k))
else:
if prefix is not None:
out.update({prefix + '.' + k: v})
else:
out.update({k: v})
return out
def from_dict(self, dic):
for k, v in dic.items():
self.__setattr__(k, v)
def export_to_json(self, file_path):
write_json(self.to_dict(), file_path)
def load_from_json(self, file_path):
self.from_dict(read_json(file_path))
def lazy_arg_parse(parser):
'''
Only parse the given flags.
'''
def parse_known_args():
args = sys.argv[1:]
namespace = Nestedspace()
try:
namespace, args = parser._parse_known_args(args, namespace)
if hasattr(namespace, '_unrecognized_args'):
args.extend(getattr(namespace, '_unrecognized_args'))
delattr(namespace, '_unrecognized_args')
return namespace, args
except argparse.ArgumentError:
err = sys.exc_info()[1]
parser.error(str(err))
args, argv = parse_known_args()
if argv:
msg = _('unrecognized arguments: %s')
parser.error(msg % ' '.join(argv))
return args
def ship_data_to_cuda(batch, device):
f = lambda sample: ship_data_to_cuda_singe_sample(
sample[0], sample[1], device=device)
return tuple(map(list, zip(*map(f, batch))))
def ship_data_to_cuda_singe_sample(img, target, device):
img = img.to(device)
if target is not None:
target['boxes'] = target['boxes'].to(device)
target['labels'] = target['labels'].to(device)
if 'heatmaps' in target:
target['heatmaps'] = target['heatmaps'].to(device)
return img, target
def resume_from_checkpoint(args, model, optimizer=None, lr_scheduler=None):
load_name = args.resume
checkpoint = torch.load(load_name)
args.train.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model'])
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
print(hue.good('loaded checkpoint %s' % (load_name)))
print(hue.info('model was trained for %s epochs' % (args.train.start_epoch)))
return args, model, optimizer, lr_scheduler
def get_optimizer(args, model):
lr = args.train.lr
params = []
for key, value in dict(model.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr':lr * (args.train.double_bias + 1),
'weight_decay': args.train.bias_decay and args.train.weight_decay or 0}]
else:
params += [{'params': [value], 'lr':lr,
'weight_decay': args.train.weight_decay}]
optimizer = torch.optim.SGD(params, momentum=args.train.momentum)
return optimizer
def get_lr_scheduler(args, optimizer):
if args.train.lr_decay_milestones is not None:
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=args.train.lr_decay_milestones,
gamma=args.train.lr_decay_gamma)
else:
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.train.lr_decay_step,
gamma=args.train.lr_decay_gamma)
return scheduler
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def lucky_bunny(i):
print('')
print('| ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄|')
print('| TRAINING |')
print('| epoch |')
print('| ' + hue.bold(hue.green(str(i))) + ' |')
print('| ________|')
print(' (\__/) ||')
print(' (•ㅅ•) || ')
print(' / づ')
print('')
| [
"munjalbharti@gmail.com"
] | munjalbharti@gmail.com |
6b757a2650f5ee9d6947c57165ed1c0576b47485 | b628d0cf1717c466f193c90ce6a63fd40a1c3888 | /test.py | 5964dbe14b4c6bcc15952d1dbebe5e841e89d60a | [] | no_license | RduMarais/pioupiou | 8193cb00536e8479ac31b005916bd699baaa8636 | eafccc0dd3fa1071428a46b8d3680b1f75cf427d | refs/heads/master | 2020-06-02T15:34:48.038832 | 2019-06-10T18:21:53 | 2019-06-10T18:21:53 | 191,210,534 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | #!/bin/python3
import datetime
import cryptography
from Crypto import Random
from Crypto.Cipher import AES as AES
from Crypto.Cipher import DES as DES
from Crypto.Cipher import DES3 as DES3
from Crypto.PublicKey import RSA as RSA
from Crypto.Cipher import Blowfish as Blowfish
# from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey as X25519
import matplotlib.pyplot as plt
def main():
resultats = []
resultats.extend(tester(32))
resultats.extend(tester(64))
resultats.extend(tester(128))
resultats.extend(tester(256))
resultats.extend(tester(512))
resultats.extend(tester(1024))
resultats.extend(tester(2048))
resultats.extend(tester(4096))
resultats.extend(tester(8192))
resultats.extend(tester(16384))
# print(resultats)
faire_graphe(resultats)
#genere le graphique
def faire_graphe(resultats):
rsa = []
# x25519 = []
aes = []
blow = []
des = []
des3 = []
for res in resultats:
(label,value) = res
if(label=='RSA'):
if(value):
rsa.append(value)
else:
rsa.append(0)
# if(label=='X25519'):
# x25519.append(value)
if(label=='AES'):
aes.append(value)
if(label=='DES'):
des.append(value)
if(label=='triple DES'):
des3.append(value)
if(label=='Blowfish'):
blow.append(value)
axe = [32,64,128,256,512,1024,2048,4096,8192,16384]
# plt.plot(axe,aes,'bs',axe,des3,'gs',axe,des,'g--',axe,blow,'b--',axe[:3],rsa[:3],'r^',axe,x25519,'r--')
# la ligne suivante genere le graphique :
# AES en carres bleus
# DES en pointiles verts
# 3DES en carres verts
# Blowfish en pointilles bleus
plt.plot(axe,aes,'bs',axe,des3,'gs',axe,des,'g--',axe,blow,'b--',axe[:3],rsa[:3],'r^')
plt.title('Vitesse des algorithmes en fonction de la taille du message à chiffrer')
# plt.show()
plt.savefig('resultats_tpe.jpg')
def tester(msgSize):
resultats = []
input_msg = Random.new().read(msgSize)
# print(input_msg)
#preparer AES
aes_IV = Random.new().read(AES.block_size )
aes_symKey = Random.new().read(AES.block_size )
cipher_aes = AES.new(aes_symKey,AES.MODE_CBC,aes_IV)
#preparer RSA
rsa_privKey = RSA.generate(1024, Random.new().read) #generate pub and priv key
rsa_publicKey = rsa_privKey.publickey() # pub key export for exchange
#preparer DES
des_symKey = b'8bytekey'
des_IV = Random.new().read(DES.block_size )
cipher_des = DES.new(des_symKey,DES.MODE_OFB,des_IV)
#preparer 3DES
des3_symKey = b'Sixteen byte key'
des3_IV = Random.new().read(DES3.block_size )
cipher_3des = DES3.new(des3_symKey,DES3.MODE_OFB,des3_IV)
#Préparer Blowfish
blow_symKey = b'une cle de taille arbitraire'
blow_IV = Random.new().read(Blowfish.block_size)
cipher_blow = Blowfish.new(blow_symKey, Blowfish.MODE_CBC,blow_IV)
#preparer X25519
# x25519_privKey = X25519.generate()
# x25519_pubKey = x25519_privKey.public_key()
resultats.append(chronometrer_rsa_1('RSA',rsa_privKey,rsa_publicKey, input_msg))
# resultats.append(chronometrer_x25519('X25519',x25519_privKey,x25519_pubKey,input_msg))
resultats.append(chronometrer_sym('AES', cipher_aes, input_msg))
resultats.append(chronometrer_sym('Blowfish',cipher_blow, input_msg))
resultats.append(chronometrer_sym('DES',cipher_des, input_msg))
resultats.append(chronometrer_sym('triple DES',cipher_3des, input_msg))
return resultats
### FONCTIONS POUR CHIFFRER ET CHRONOMETRER
# def chronometrer_x25519(algo,privKey,publicKey,input_msg):
# print(algo)
# chrono = None
# try:
# tstart = datetime.datetime.now()
# message = privKey.sign(input_msg)
# tfinish = datetime.datetime.now()
# chrono = (tfinish - tstart).microseconds
# except ValueError as e:
# print('### ERREUR : '+str(e)+' = (en français) : Le message à chiffrer est trop large')
# print(algo+' : fini en : '+str(chrono))
# return(algo, chrono)
def chronometrer_rsa_1(algo, privKey,publicKey, input_msg):
print(algo)
chrono = None
try:
tstart = datetime.datetime.now()
# message = publicKey.encrypt(input_msg,32)
message = privKey.sign(input_msg,32)
tfinish = datetime.datetime.now()
chrono = (tfinish - tstart).microseconds
except ValueError as e:
print('### ERREUR : '+str(e)+' = (en français) : Le message à chiffrer est trop large')
print(algo +' : fini en : '+str(chrono))
return(algo, chrono)
def chronometrer_sym(algo,cipher, input_msg):
print(algo)
tstart = datetime.datetime.now()
message = cipher.encrypt(input_msg)
# print(message)
tfinish = datetime.datetime.now()
chrono = (tfinish - tstart).microseconds
print(algo +' : fini en : '+str(chrono))
return(algo, chrono)
## POUR ROMAIN SEULEMENT
if __name__ == '__main__':
main()
| [
"rmichon@telecom-paristech.fr"
] | rmichon@telecom-paristech.fr |
b3c1357b284e6b73b5f72802de36f0a28ddb3683 | ce965cb69fd1f071dfae85e926b70a9a82eb560b | /main.py | d117bb7f83c82eef07f3af0121e6cee6a6ad88e0 | [] | no_license | tk14shiina/learningtool | c7a2c5ee1ce1ddb216534505df39e41426df2674 | e0bab8ce0c96d30683e01996e896b0e4ebfcbffa | refs/heads/main | 2023-04-29T14:39:22.443660 | 2021-05-18T20:48:36 | 2021-05-18T20:48:36 | 368,662,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,568 | py | from dbhelper import DBHelper
from tkinter import *
db = DBHelper()
root = Tk()
root.title('Quizlett')
root.geometry("700x550")
#root.resizable(width = False, height = False)
tk_ss = Entry(root, width = 30)
tk_ss.grid(row = 0, column = 1, padx = 20, pady = 5)
tk_ssLabel = Label(root, text = "New study set")
tk_ssLabel.grid(row = 0, column = 0)
tk_ssIdDel = Entry(root, width = 30)
tk_ssIdDel.grid(row = 2, column = 1, pady = 5)
tk_ssIdDelLabel = Label(root, text = "Study set ID")
tk_ssIdDelLabel.grid(row = 2, column = 0)
tk_ssIdUpd = Entry(root, width = 30)
tk_ssIdUpd.grid(row = 4, column = 1)
tk_ssIdUpdLabel = Label(root, text = "Study set ID")
tk_ssIdUpdLabel.grid(row = 4, column = 0)
tk_newSs = Entry(root, width = 30)
tk_newSs.grid(row = 5, column = 1, pady = 5)
tk_newSsLabel = Label(root, text = "Rename")
tk_newSsLabel.grid(row = 5, column = 0)
#mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm
tk_ssword = Entry(root, width = 30)
tk_ssword.grid(row = 0, column = 4, padx = 20, pady = 5)
tk_sswordLabel = Label(root, text = "Study set Id")
tk_sswordLabel.grid(row = 0, column = 3)
tk_word = Entry(root, width = 30)
tk_word.grid(row = 1, column = 4, pady = 5)
tk_wordLabel = Label(root, text = "Word")
tk_wordLabel.grid(row = 1, column = 3)
tk_def = Entry(root, width = 30)
tk_def.grid(row = 2, column = 4, pady = 5)
tk_defLabel = Label(root, text = "Definition")
tk_defLabel.grid(row = 2, column = 3)
tk_sswordIdDel = Entry(root, width = 30)
tk_sswordIdDel.grid(row = 4, column = 4, pady = 20)
tk_sswordIdDelLabel = Label(root, text = "Study set ID")
tk_sswordIdDelLabel.grid(row = 4, column = 3)
tk_wordIdDel = Entry(root, width = 30)
tk_wordIdDel.grid(row = 5, column = 4, pady = 5)
tk_wordIdDelLabel = Label(root, text = "Word ID")
tk_wordIdDelLabel.grid(row = 5, column = 3)
tk_sswordIdUpd = Entry(root, width = 30)
tk_sswordIdUpd.grid(row = 7, column = 4)
tk_sswordIdUpdLabel = Label(root, text = "Study set ID")
tk_sswordIdUpdLabel.grid(row = 7, column = 3)
tk_wordIdUpd = Entry(root, width = 30)
tk_wordIdUpd.grid(row = 8, column = 4, pady = 5)
tk_wordIdUpdLabel = Label(root, text = "Word ID")
tk_wordIdUpdLabel.grid(row = 8, column = 3)
tk_newWord = Entry(root, width = 30)
tk_newWord.grid(row = 9, column = 4, pady = 5)
tk_newWordLabel = Label(root, text = "New word")
tk_newWordLabel.grid(row = 9, column = 3)
tk_newDef = Entry(root, width = 30)
tk_newDef.grid(row = 10, column = 4, pady = 5)
tk_newDefLabel = Label(root, text = "New definition")
tk_newDefLabel.grid(row = 10, column = 3)
def submitAdd1():
print(tk_ss.get())
db.insert_studySet(tk_ss.get())
tk_ss.delete(0, END)
def submitDel1():
print(tk_ssIdDel.get())
db.delete_studySet(tk_ssIdDel.get())
tk_ssIdDel.delete(0, END)
def submitUpd1():
print(tk_ssIdUpd.get())
print(tk_newSs.get())
db.update_studySet(tk_ssIdUpd.get(), tk_newSs.get())
tk_ssIdUpd.delete(0, END)
tk_newSs.delete(0, END)
def submitShow1():
print("all")
res = db.fetch_all_studySet()
id = 0
top = Toplevel()
top.geometry("200x200")
w = Label(top, text ='List of study sets', font = "50")
w.pack()
scroll_bar = Scrollbar(top)
scroll_bar.pack( side = RIGHT, fill = Y)
mylist = Listbox(top, yscrollcommand = scroll_bar.set)
for r in res:
mylist.insert(END, "Id: "+ str(r[0]))
mylist.insert(END, "Title: "+ r[1])
mylist.insert(END, "------------")
mylist.pack(side = LEFT, fill = BOTH)
scroll_bar.config(command = mylist.yview)
#>>>>>
def submitAdd2():
print(tk_ssword.get())
print(tk_word.get())
print(tk_def.get())
db.insert_word(tk_ssword.get(), tk_word.get(), tk_def.get())
tk_ssword.delete(0, END)
tk_word.delete(0, END)
tk_def.delete(0, END)
def submitDel2():
print(tk_sswordIdDel.get())
print(tk_wordIdDel.get())
db.delete_word(tk_sswordIdDel.get(), tk_wordIdDel.get())
tk_sswordIdDel.delete(0, END)
tk_wordIdDel.delete(0, END)
def submitUpd2():
print(tk_sswordIdUpd.get())
print(tk_wordIdUpd.get())
print(tk_newWord.get())
print(tk_newDef.get())
db.update_word(tk_sswordIdUpd.get(), tk_wordIdUpd.get(), tk_newWord.get(), tk_newDef.get())
tk_sswordIdUpd.delete(0, END)
tk_wordIdUpd.delete(0, END)
tk_newWord.delete(0, END)
tk_newDef.delete(0, END)
def submitShow2():
print("all")
res = db.fetch_all_word()
print(res)
top = Toplevel()
top.geometry("200x200")
w = Label(top, text ='Wordlist', font = "50")
w.pack()
scroll_bar = Scrollbar(top)
scroll_bar.pack(side = RIGHT, fill = Y)
mylist = Listbox(top, yscrollcommand = scroll_bar.set)
for r in res:
mylist.insert(END, "ID: "+ str(r[0]))
mylist.insert(END, "Study Set Id: "+ str(r[1]))
mylist.insert(END, "Word: "+ r[2])
mylist.insert(END, "Definition: "+ r[3])
mylist.insert(END, "------------")
mylist.pack( side = LEFT, fill = BOTH)
scroll_bar.config(command = mylist.yview)
addButton1 = Button(root, text = "Add", command = submitAdd1, bg = '#3399FF', fg = 'white')
addButton1.grid(row = 1, column = 1, columnspan = 2, ipadx = 20)
deleteButton1 = Button(root, text = "Delete", command = submitDel1, bg = '#3399FF', fg = 'white')
deleteButton1.grid(row = 3, column = 1, columnspan = 2, ipadx = 20)
updateButton1 = Button(root, text = "Update", command = submitUpd1, bg = '#3399FF', fg = 'white')
updateButton1.grid(row = 6, column = 1, columnspan = 2, padx = 10, ipadx = 20)
showButton1 = Button(root, text = "Show all", command = submitShow1, bg = '#3399FF', fg = 'white')
showButton1.grid(row = 7, column = 1, columnspan = 2, pady = 5, padx = 10, ipadx = 20)
addButton2 = Button(root, text = "Add", command = submitAdd2, bg = '#FF6699', fg = 'white')
addButton2.grid(row = 3, column = 4, columnspan = 2, ipadx = 20)
deleteButton2 = Button(root, text = "Delete", command = submitDel2, bg = '#FF6699', fg = 'white')
deleteButton2.grid(row = 6, column = 4, columnspan = 2, pady = 5, ipadx = 20)
updateButton2 = Button(root, text = "Update", command = submitUpd2, bg = '#FF6699', fg = 'white')
updateButton2.grid(row = 11, column = 4, columnspan = 2, pady = 5, padx = 10, ipadx = 20)
showButton2 = Button(root, text = "Show all", command = submitShow2, bg = '#FF6699', fg = 'white')
showButton2.grid(row = 12, column = 4, columnspan = 2, pady = 5, padx = 10, ipadx = 20)
tk_topic = Entry(root, width = 30)
tk_topic.grid(row = 9, column = 1, pady = 5)
tk_topicLabel = Label(root, text = "Choose study set")
tk_topicLabel.grid(row = 9, column = 0)
def submitLearn():
top = Toplevel()
top.title('Quiz')
top.geometry("100x100")
top.resizable(width = False, height = False)
data = db.fetch_one_word(int(tk_topic.get()))
qs = data[0]
ans = data[1]
w = Label(top, text = qs, font = "30")
w.pack()
scroll_bar = Scrollbar(top)
scroll_bar.pack( side = RIGHT, fill = Y)
mylist = Listbox(top, yscrollcommand = scroll_bar.set)
for i in range (0, 10):
mylist.insert(END, " ")
mylist.insert(END, " " + ans)
mylist.pack(side = LEFT, fill = BOTH)
scroll_bar.config(command = mylist.yview)
learnButton = Button(root, text = "Learn", command = submitLearn, bg = '#FF9900', fg = 'white')
learnButton.grid(row = 10, column = 1, columnspan = 1, pady = 5, ipadx = 50)
mainloop() | [
"noreply@github.com"
] | noreply@github.com |
f1404f7a787c775f3ce768b273fdb666e2071008 | 2d3dc770005c152f459be6f59062b736dc00aa69 | /2048/code/tkinter_event.py | 3d237a061bb54630c553e75a0c527c7db1110946 | [] | no_license | s-python-git/Python-The-code-base | 42394f9459cbcc6544486fb98ab04352eecad568 | b85052af64e5909d38649ee4ab576e608607ba50 | refs/heads/master | 2020-04-27T23:48:30.650696 | 2019-11-25T09:29:52 | 2019-11-25T09:29:52 | 174,791,058 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | # tkinter_event.py
import tkinter
root = tkinter.Tk()
def onKeyDown(event):
print("有键盘按键被按下:event=", event)
print(event.keycode, event.keysym, event.char)
def onKeyUp(event):
print("有键盘按键抬起!", event)
root.bind('<KeyPress>', onKeyDown)
root.bind('<KeyRelease>', onKeyUp)
def mouseDown(e):
print("有鼠标按键按下在", e.x, e.y, e.x_root, e.y_root)
root.bind('<Button>', mouseDown)
def mouseUp(e):
print("有鼠标按键抬起", e.x, e.y)
if e.num == 2:
print("中间键抬起!!!")
root.bind("<ButtonRelease>", mouseUp)
root.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
4b0d73f1c9d3600690df7eb5b54d526f2b7a0427 | 2cf2df2807fff90d4c82c1cbbbece272a4b469c2 | /gplib/core/__init__.py | 1038489a4cd5070d90c14b2d9f7b42cb4ce8684d | [] | no_license | marcpalaci689/gplib | 826466f42da085b91d37297631fcc709c00edc3a | 859ed08d7b77b1a4f4ed3f0cdb5db8930ee95465 | refs/heads/master | 2021-09-01T12:19:30.683620 | 2017-12-27T00:11:02 | 2017-12-27T00:11:02 | 115,462,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | from .gp import GP
| [
"marcpalaci689@gmail.com"
] | marcpalaci689@gmail.com |
a05cf8f5a0bbfbc66fc6366b15421295bcad5546 | 68e66947c2b2a2f1a1cac52a99363ce37f33e9bb | /getdata.py | 2eb211b79b5f329ebe7dad60d8c9b3cf6781e662 | [] | no_license | trmcdade/Manifesto | 30c424127bf734a63f9af13bb25671769077926a | 9a0d150783485492e8c45ebeb37ee41c9c31b253 | refs/heads/master | 2020-06-11T09:31:32.493158 | 2019-06-28T17:14:09 | 2019-06-28T17:14:09 | 193,917,576 | 1 | 0 | null | 2019-06-26T19:35:28 | 2019-06-26T14:10:24 | R | UTF-8 | Python | false | false | 950 | py | import urllib.request, json, ssl
#bypass SSL verification
context = ssl._create_unverified_context()
#you will need your own API. play around with key= parm also
with urllib.request.urlopen("https://manifesto-project.wzb.eu/tools/api_get_core.json?api_key=d00c54e1a64ef97f7a032c91ff45a627&key=MPDS2018b", context=context) as url:
cmp_test = json.loads(url.read().decode())
#returns a list
#basic packages
import pandas as pd
import numpy as np
#create index col
index= list(range(len(cmp_test)))
#turn imported data into pd dataframe
d = pd.DataFrame(data = cmp_test,columns=cmp_test[0], index=index)
#checks:
len(d) #this year N=3925
#say we want 2/3 to be the training set:
#train_amount = round(len(d)*2/3)
#and the rest to be the validation set:
#val_amount = 1- train_amount
#slice data (non-random):
#train_set = d.iloc[[1,train_amount],:]
#val_set = d.iloc[[train_amount + 1, len(d) - 1],:]
#carry on with the actual training
| [
"kylechan@unc.edu"
] | kylechan@unc.edu |
291e37a89529fee6456f713f03a74745d05ca459 | ef905b3f490049212ea7edf777f82eba85328741 | /hist_nsepy.py | 5ecf1a9717dedea68167734c50dd3c3d462a1bc8 | [] | no_license | ghoshsudipto/DA | 98d144541a7355efddd783bc304af396548af5e9 | dbd8725c64bfa9a7e55ff75ee5fa2e8fa270719a | refs/heads/master | 2023-08-18T04:58:12.607293 | 2023-07-27T09:11:07 | 2023-07-27T09:11:07 | 217,709,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,284 | py | import pandas as pd
from datetime import date
from nsepy import get_history
scrip = input('Scrip:')
series = date(2004, 1, 29), date(2004, 2, 26), date(2004, 3, 25), date(2004, 4, 29), date(2004, 5, 27), date(2004, 6, 24), date(2004, 7, 29),\
date(2004, 8, 26), date(2004, 9, 30), date(2004, 10, 28), date(2004, 11, 25), date(2004, 12, 30), date(2005, 1, 27), date(2005, 2, 24),\
date(2005, 3, 31), date(2005, 4, 28), date(2005, 5, 26), date(2005, 6, 30), date(2005, 7, 28), date(2005, 8, 25), date(2005, 9, 29),\
date(2005, 10, 27), date(2005, 11, 24), date(2005, 12, 29), date(2006, 1, 25), date(2006, 2, 23), date(2006, 3, 30), date(2006, 4, 27),\
date(2006, 5, 25), date(2006, 6, 29), date(2006, 7, 27), date(2006, 8, 31), date(2006, 9, 28), date(2006, 10, 26), date(2006, 11, 30),\
date(2006, 12, 28), date(2007, 1, 25), date(2007, 2, 22), date(2007, 3, 29), date(2007, 4, 26), date(2007, 5, 31), date(2007, 6, 28),\
date(2007, 7, 26), date(2007, 8, 30), date(2007, 9, 27), date(2007, 10, 25), date(2007, 11, 29), date(2007, 12, 27), date(2008, 1, 31), \
date(2008, 2, 28), date(2008, 3, 27), date(2008, 4, 24), date(2008, 5, 29), date(2008, 6, 26), date(2008, 7, 31), date(2008, 8, 28),\
date(2008, 9, 25), date(2008, 10, 29), date(2008, 11, 27), date(2008, 12, 25), date(2009, 1, 29), date(2009, 2, 26), date(2009, 3, 26),\
date(2009, 4, 30), date(2009, 5, 28), date(2009, 6, 25), date(2009, 7, 30), date(2009, 8, 27), date(2009, 9, 24), date(2009, 10, 29),\
date(2009, 11, 26), date(2009, 12, 31), date(2010, 1, 28), date(2010, 2, 25), date(2010, 3, 25), date(2010, 4, 29), date(2010, 5, 27),\
date(2010, 6, 24), date(2010, 7, 29), date(2010, 8, 26), date(2010, 9, 30), date(2010, 10, 28), date(2010, 11, 25), date(2010, 12, 30),\
date(2011, 1, 27), date(2011, 2, 24), date(2011, 3, 31), date(2011, 4, 28), date(2011, 5, 26), date(2011, 6, 30), date(2011, 7, 28),\
date(2011, 8, 25), date(2011, 9, 29), date(2011, 10, 25), date(2011, 11, 24), date(2011, 12, 29), date(2012, 1, 25), date(2012, 2, 23),\
date(2012, 3, 29), date(2012, 4, 26), date(2012, 5, 31), date(2012, 6, 28), date(2012, 7, 26), date(2012, 8, 30), date(2012, 9, 27),\
date(2012, 10, 25), date(2012, 11, 29), date(2012, 12, 27), date(2013, 1, 31), date(2013, 2, 28), date(2013, 3, 28), date(2013, 4, 25),\
date(2013, 5, 30), date(2013, 6, 27), date(2013, 7, 25), date(2013, 8, 29), date(2013, 9, 26), date(2013, 10, 31), date(2013, 11, 28),\
date(2013, 12, 26), date(2014, 1, 30), date(2014, 2, 26), date(2014, 3, 27), date(2014, 4, 24), date(2014, 5, 29), date(2014, 6, 26),\
date(2014, 7, 31), date(2014, 8, 28), date(2014, 9, 25), date(2014, 10, 30), date(2014, 11, 27), date(2014, 12, 24), date(2015, 1, 29),\
date(2015, 2, 26), date(2015, 3, 26), date(2015, 4, 30), date(2015, 5, 28), date(2015, 6, 25), date(2015, 7, 30), date(2015, 8, 27),\
date(2015, 9, 24), date(2015, 10, 29), date(2015, 11, 26), date(2015, 12, 31), date(2016, 1, 28), date(2016, 2, 25), date(2016, 3, 31),\
date(2016, 4, 28), date(2016, 5, 26), date(2016, 6, 30), date(2016, 7, 28), date(2016, 8, 25), date(2016, 9, 29), date(2016, 10, 27),\
date(2016, 11, 24), date(2016, 12, 29), date(2017, 1, 25), date(2017, 2, 23), date(2017, 3, 30), date(2017, 4, 27), date(2017, 5, 25),\
date(2017, 6, 29), date(2017, 7, 27), date(2017, 8, 31), date(2017, 9, 28), date(2017, 10, 26), date(2017, 11, 30), date(2017, 12, 28),\
date(2018, 1, 25), date(2018, 2, 22), date(2018, 3, 28), date(2018, 4, 26), date(2018, 5, 31), date(2018, 6, 28), date(2018, 7, 26),\
date(2018, 8, 30), date(2018, 9, 27), date(2018, 10, 25), date(2018, 11, 29), date(2018, 12, 27), date(2019, 1, 31), date(2019, 2, 28),\
date(2019, 3, 28), date(2019, 4, 25), date(2019, 5, 30), date(2019, 6, 27), date(2019, 7, 25), date(2019, 8, 29), date(2019, 9, 26), \
date(2019, 10, 31), date(2019, 11, 28), date(2019, 12, 26)
col = pd.DataFrame({'Date': [], 'Symbol': [], 'Expiry': [], 'Open': [], 'High': [], 'Low': [], 'Close': [], 'LTP': [], 'Settlement Price': [],
'Number of Contracts': [], 'Turnover': [], 'Open Interest': [], 'Change in OI': [], 'Underlying': []})
open(f'D:\homework\dataset\{scrip}.csv', 'w')
col.to_csv(f'D:\homework\dataset\{scrip}.csv', header=True, index=False)
for i in series:
df_fut = get_history(symbol=scrip, start=date(2000, 1, 1), end=date(2020, 1, 20), futures=True, expiry_date=i)
df_fut.to_csv(f'D:\homework\dataset\{scrip}.csv', mode='a', header=False)
Sub format()
Range("A1:Z5000").Find(what:="Open Interest").Offset(1).Select
Range(Selection, Selection.End(xlDown)).Select
Selection.Replace what:=",", Replacement:="", LookAt:=xlPart, _
SearchOrder:=xlByRows, MatchCase:=True, SearchFormat:=False, _
ReplaceFormat:=False
Range("A1:Z5000").Find(what:="open Interest Int").Offset(1).Select
ActiveCell.FormulaR1C1 = "=INT(RC[-3])"
ActiveCell.Offset(0, -1).Range("A1").Select
Selection.End(xlDown).Select
ActiveCell.Offset(0, 1).Range("A1").Select
Range(Selection, Selection.End(xlUp)).Select
Selection.FillDown
ActiveSheet.Calculate
Range("A1:Z5000").Find(what:="open Interest Int").Offset(1).Select
End Sub
| [
"noreply@github.com"
] | noreply@github.com |
2f7739039404274a4296d6d5ede3692379b78d93 | c72069c173dcbc475d051ac23dde5c69017604f3 | /testcase/test02_home_search.py | 36eba48b20cf9a3e777424a0d53d671f226d81f5 | [] | no_license | NamedWu/test1 | de454907bbd2455ed1a45cc1b240b6525b3c5512 | db0f5d1f5737e4975cfe5d8f704f26aad84e169a | refs/heads/master | 2023-01-08T23:43:16.536079 | 2020-11-14T12:20:38 | 2020-11-14T12:20:38 | 312,811,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.common.by import By
import time
def test_select():
driver = webdriver.Chrome()
path = 'https://www.hibobi.com/'
driver.get(path)
# 通过显示等待的方法判断元素是否出现
WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='c-header']/div[2]/div[3]/div/input")))
select = driver.find_element_by_xpath('//*[@id="c-header"]/div[2]/div[3]/div/input')
Select(select).select_by_visible_text('Boy')
time.sleep(2)
if __name__ == '__main__':
pytest.main()
# # 根据下标进行选择,从0开始
# Select(select).select_by_index(1)
# time.sleep(2)
# # 根据value的值选择
# Select(select).select_by_value('daily')
# time.sleep(2)
# # 根基text选择
#
#
# # 判断选择是否预期
# WebDriverWait(driver,20).until(EC.element_located_to_be_selected((By.XPATH,'//*[contains(text(),"关注了")]'))) | [
"dengqingqign@duiba.com.cn"
] | dengqingqign@duiba.com.cn |
c4b5c68f3abcb5ab6cc0ae450634d6e199046023 | 060a1f91c43e8931a8e2b5e023f6325f4c612862 | /excel_main.py | f7b56bc153bfbe4451309c9162a84fc8e3ce9a25 | [] | no_license | Dianuma/excel | 2ac8ee81f4c41fca007cb6eaa0eacd749ca378b1 | 5a9cf5e22d3b4d15f5c32b0b0326815fa249beab | refs/heads/main | 2023-04-25T03:40:46.789204 | 2021-05-11T10:50:40 | 2021-05-11T10:50:40 | 365,394,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,077 | py | try:
import Tkinter as tk
except:
import tkinter as tk
import tkinter.messagebox
import tkinter.ttk as ttk
import sys
import os
import numpy as np
import time
import openpyxl
import pyexcel as p
import pyexcel_xls
import pyexcel_xlsx
import pyexcel_xlsxw
import math
#font=("UD Digi Kyokasho N-B", 20, "bold")
font=("TkDefaultFont",10)
class SampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.title("엑셀 변환기")
self.geometry("750x530+100+100")
self.resizable(False, False)
self._frame = None
self.temp=[]
self.temp_2=[]
self.temp_3={}
self.temp_4=[]
self.selec_temp={}
self.ID_Number=0
self.switch_frame(StartPage)
def switch_frame(self, frame_class):
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
def _exit(self):
sys.exit()
class StartPage(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.master=master
self.file_name=os.listdir('엑셀 넣는 곳')
self.all_values = {}
self.key=self.key_sort()
tk.Button(self, text="EXIT", font=("TkDefaultFont",15,"bold"),command=lambda: self.master._exit()).pack(side="bottom",anchor="e")
self.get_item()
def get_item(self):
tk.Label(self, text="\n변환할 파일을 모두 선택해주세요",font=("TkDefaultFont",13,"bold")).pack()
frame_1=tk.Frame(self,width=600,height=200)
frame_1.pack()
scrollbar = tk.Scrollbar(frame_1,orient=tk.HORIZONTAL)
scrollbar_2 = tk.Scrollbar(frame_1)
text = tk.Text(frame_1,relief="flat",xscrollcommand=scrollbar.set,yscrollcommand=scrollbar_2.set,borderwidth=0)
scrollbar.config(command=text.xview,)
scrollbar.pack(side="bottom",fill="x")
scrollbar_2.config(command=text.yview)
scrollbar_2.pack(side="right",fill="y")
text.pack(side="top",fill="both",expand=True)
self.Var=[]
self.check_box=[]
for i in range(len(self.key)):
self.Var.append(tk.IntVar())
cb = tk.Checkbutton(frame_1,text=self.key[i],variable=self.Var[i], font=font,padx=0,pady=0,bd=0,bg="white",borderwidth =0)
self.check_box.append(cb)
text.window_create("end", window=cb)
text.insert("end", "\n")
scrollbar["command"]=text.xview
scrollbar_2["command"]=text.yview
tk.Button(self,text='모두 선택',command=self.set_all).pack()
tk.Button(self,text='모두 선택 취소',command=self.deselect_all).pack()
tk.Button(self,text='선택 완료',command=self.item).pack()
def key_sort(self):
key_int=[]
key_str=[]
for i in self.file_name:
if i.split('.')[1]=="xlsx" or i.split('.')[1]=="xls":
try :
int(i.split('.')[0])
key_int.append(i)
except :
key_str.append(i)
key=sorted(key_int,key=lambda fname: int(fname.split('.')[0]))+sorted(key_str)
return key
def item(self):
self.master.temp=[self.key[i] for i in range(len(self.Var)) if self.Var[i].get()==1]
self.master.switch_frame(PageOne)
def set_all(self):
[i.select() for i in self.check_box]
def deselect_all(self):
[i.deselect() for i in self.check_box]
class PageOne(tk.Frame):
def __init__(self, master):
self.master=master
tk.Frame.__init__(self, master)
tk.Frame.configure(self)
self.worksheet_CP=[]
self.worksheet_NH=[]
self.worksheet_SH=[]
self.serial_number=[[7,5],[8,6],[2,4]]
self.serial_count=0
self.load_excel()
tk.Label(self, text="\n상대 계좌 번호 항목이 존재하지 않거나\n상대 계좌 번호가 같은 건이 존재하지 않는 파일의 경우 자동으로 무시됩니다.",font=("TkDefaultFont",13,"bold"),fg="blue").pack(side="bottom")
tk.Button(self, text="EXIT", font=("TkDefaultFont",15,"bold"),command=lambda: self.master._exit()).pack(side="bottom",anchor="e")
temp_temp=list(self.master.temp_2)
while len(temp_temp)>=1:
self.pre=temp_temp.pop(0)
self.all_values=self.master.temp_3[self.pre]
self.data=np.array(self.all_values)[6:]
self.selec=self.change_second()
if len(self.selec.keys())>=1:
self.master.selec_temp[self.pre]=self.selec
else:
self.master.temp_2.remove(self.pre)
self.get_item()
def change_second(self):
set_list=list(set(self.data[:,6]))
if self.data[len(self.data)-1,5]==None:
length=len(self.data)-1
elif self.data[len(self.data)-1,5]!=None:length=len(self.data)
total=0
case_by_total=0
selec=[]
for number in set_list:
if number!=None:
business_number=[]
for count in range(length):
if self.data[count,5]!=None:
if self.data[count,6]==number:
business_number.append(count)
count+=1
business_name=[]
for i in business_number:
business_name.append(self.data[i,5])
set_business_name=list(set(business_name))
if len(set_business_name)>=1.5:
case_by_total+=len(business_number)
selec.append([business_name,business_number])
total+=len(business_number)
selection={}
for i in selec:
temp_selec={}
for j in range(len(i[0])):
temp_selec[i[0][j]]=[]
for j in range(len(i[0])):
temp_selec[i[0][j]].append(i[1][j])
temp_selec=self.dictionary_sort(temp_selec)
selection[min(i[0],key=len)]=temp_selec
selection=self.dictionary_sort(selection)
return selection
def dictionary_sort(self,dic):
A=sorted(dic.keys(),key=len)
B={}
for i in A:
B[i]=dic[i]
return B
def get_item(self):
tk.Label(self, text="\n상대 계좌 번호를 이용해 변환할 파일을 모두 선택해주세요",font=("TkDefaultFont",13,"bold")).pack()
frame_1=tk.Frame(self,width=600,height=200)
frame_1.pack(side="left")
scrollbar = tk.Scrollbar(frame_1,orient=tk.HORIZONTAL)
scrollbar_2 = tk.Scrollbar(frame_1)
text = tk.Text(frame_1,relief="flat",xscrollcommand=scrollbar.set,yscrollcommand=scrollbar_2.set,borderwidth=0)
scrollbar.config(command=text.xview,)
scrollbar.pack(side="bottom",fill="x")
scrollbar_2.config(command=text.yview)
scrollbar_2.pack(side="right",fill="y")
text.pack(side="top",fill="both",expand=True)
self.Var=[]
self.check_box=[]
for i in range(len(self.master.temp_2)):
self.Var.append(tk.IntVar())
cb = tk.Checkbutton(frame_1,text=self.master.temp_2[i],variable=self.Var[i], font=font,padx=0,pady=0,bd=0,bg="white",borderwidth =0)
self.check_box.append(cb)
text.window_create("end", window=cb)
text.insert("end", "\n")
scrollbar["command"]=text.xview
scrollbar_2["command"]=text.yview
tk.Button(self,text='모두 선택',command=self.set_all).pack()
tk.Button(self,text='모두 선택 취소',command=self.deselect_all).pack()
tk.Button(self,text='선택 완료',command=self.item).pack()
def item(self):
self.master.temp_4=[self.master.temp_2[i] for i in range(len(self.Var)) if self.Var[i].get()==1]
self.master.switch_frame(PageTwo)
def set_all(self):
[i.select() for i in self.check_box]
def deselect_all(self):
[i.deselect() for i in self.check_box]
def load_excel(self):
for pre in self.master.temp:
if (pre).split(".")[1]=="xls":
try:
p.save_book_as(file_name='엑셀 넣는 곳\\'+pre, dest_file_name=pre+'x')
pre_save=pre+'x'
workbook=openpyxl.load_workbook(pre+'x')
worksheed=workbook[workbook.sheetnames[0]]
os.remove(pre+'x')
except:
tk.messagebox.showerror("오류","엑셀파일이 제대로 된 파일인지 확인해 주세요.")
elif (pre).split(".")[1]=="xlsx":
try:
workbook=openpyxl.load_workbook('엑셀 넣는 곳\\'+pre)
pre_save=pre
worksheed=workbook[workbook.sheetnames[0]]
except:
tk.messagebox.showerror("오류","엑셀파일이 제대로 된 파일인지 확인해 주세요.")
all_values = []
serial_number=[[7,5],[8,6],[2,4]]
for row in worksheed.rows:
row_value = []
for cell in row:
row_value.append(cell.value)
all_values.append(row_value)
if all_values[5][5]=="거래내용":
self.master.temp_2.append(pre)
self.serial_count=0
elif all_values[6][6]=="거래기록사항":
self.serial_count=1
elif all_values[0][4]=="내용":
self.serial_count=2
count=self.serial_number[self.serial_count][0]
after_cell="%s%d"%(chr(ord("A")+len(all_values[self.serial_number[self.serial_count][0]-2])),count)
before_cell="%s%d"%(chr(ord("A")+self.serial_number[self.serial_count][1]),count)
for i in range(len(all_values)-self.serial_number[self.serial_count][0]+1):
worksheed[after_cell].value=worksheed[before_cell].value
count+=1
after_cell="%s%d"%(chr(ord("A")+len(all_values[self.serial_number[self.serial_count][0]-2])),count)
before_cell="%s%d"%(chr(ord("A")+self.serial_number[self.serial_count][1]),count)
all_values = []
for row in worksheed.rows:
row_value = []
for cell in row:
row_value.append(cell.value)
all_values.append(row_value)
data=np.array(all_values)[self.serial_number[self.serial_count][0]-1:]
#필요 없는 문자열 제거
num=['1','2','3','4','5','6','7','8','9','0','1','2','3','4','5','6','7','8','9','0']
mon=['상','하','월']
delete_file=open("삭제 단어 목록.txt", encoding='UTF8')
delete_=delete_file.read()
dele=[]
if delete_:
dele=list((map(str,delete_.split("\n"))))
change_file=open("변환 단어 목록.txt", encoding='UTF8')
change_=change_file.read()
chan=[]
if change_:
chan=[[i.split("//")[0],i.split("//")[1]] for i in list((map(str,change_.split("\n"))))]
replace_file=open("수정 단어 목록.txt", encoding='UTF8')
replace_=replace_file.read()
repl=[]
if replace_:
repl=[[i.split("//")[0],i.split("//")[1]] for i in list((map(str,replace_.split("\n"))))]
judge_file=open("기본 적용.txt", encoding='UTF8')
judge=[str(i.split("=")[1]).replace(" ","") for i in list((map(str,judge_file.read().split("\n"))))]
deli=[]
if all_values[self.serial_number[self.serial_count][0]-2][0]=="거래일시":
YY=data[0,0][:4]
elif all_values[self.serial_number[self.serial_count][0]-2][1]=="거래일시":
YY=data[0,1][:4]
for i in num:
for j in mon:
deli.append(i+j)
count=0
for i in data[:,self.serial_number[self.serial_count][1]]:
if i!=None:
if judge[1]=='1':
for j in deli:
i=i.replace(j,'')
if judge[0]=='1':
for j in num:
i=i.replace(j,'')
for j in dele:
i=i.replace(j,'')
if judge[2]=='1':
if ("년결산" or "년 결산") in i:
i=YY+"년결산"
if judge[3]=='1':
for j in ['(주)','주)','(주','(주)','㈜','주식회사)','주식)','주)','(주식회사','(주식','(주','주식회사','주식회','주식']:
if j in i:
i="㈜"+i.replace(j,'')
if "㈜" in i:
i=i.replace("㈜","(주)")
for j in range(len(chan)):
if chan[j][0] in i:
i=chan[j][1]
for j in range(len(repl)):
i=i.replace(repl[j][0],repl[j][1])
if "()" in i:
i=i.replace("()","")
data[count,self.serial_number[self.serial_count][1]]=i
count+=1
for i in range(len(data[:,self.serial_number[self.serial_count][1]])):
if data[i,self.serial_number[self.serial_count][1]] != None:
k=self.serial_number[self.serial_count][0]+i-1
all_values[k][self.serial_number[self.serial_count][1]]=data[i,self.serial_number[self.serial_count][1]]
if self.serial_count==0:
self.master.temp_3[pre]=all_values
wb=openpyxl.Workbook()
ws=wb.active
for i in all_values:
ws.append(i)
wb.save('엑셀 나오는 곳\\수정 후_'+pre_save)
class PageTwo(tk.Frame):
def __init__(self, master):
self.master=master
tk.Frame.__init__(self, master)
tk.Frame.configure(self)
self.pre=self.master.temp_4.pop(0)
self.all_values=self.master.temp_3[self.pre]
self.data=np.array(self.all_values)[6:]
self.selec=self.master.selec_temp[self.pre]
tk.Label(self,text=self.pre,font=("TkDefaultFont",15,"bold")).pack()
self.top_frame=tk.Frame(self, relief="sunken", bd=2)
self.top_frame.pack(side="top",fill="both",expand=True)
self.bottom_frame=tk.Frame(self, relief="sunken", bd=2)
self.bottom_frame.pack(side="bottom",fill="both",expand=True)
self.frame1=None
self.frame2=None
self.frame3=None
self.frame4=None
self.jud_frame2=None
self.jud_frame3=None
self.listbox1=None
self.listbox2=None
self.Frame1()
self.Frame2()
self.Frame3()
self.Frame4()
def delete_Frame(self):
try :
self.frame1.destroy()
self.frame2.destroy()
self.frame3.destroy()
self.frame4.destroy()
self.destroy()
except :
k=None
def Frame1(self):
try :
self.frame1.destroy()
self.frame1=tk.Frame(self.top_frame, relief="sunken", bd=2, bg='white')
self.frame1.pack(side="left",fill="both",expand=True)
except :
self.frame1=tk.Frame(self.top_frame, relief="sunken", bd=2, bg='white')
self.frame1.pack(side="left",fill="both",expand=True)
scrollbar=tk.Scrollbar(self.frame1)
scrollbar.pack(side="right",fill="y")
scrollbar_2=tk.Scrollbar(self.frame1,orient=tk.HORIZONTAL)
scrollbar_2.pack(side="bottom",fill="x")
self.listbox1=tk.Listbox(self.frame1, width=25,height=18, selectmode="extended", xscrollcommand=scrollbar_2.set,yscrollcommand=scrollbar.set,font=("TkDefaultFont",10))
for i in range(len(self.selec.keys())):
self.listbox1.insert(i,list(self.selec.keys())[i])
self.listbox1.bind('<Double-1>',self.Frame1_clickevent)
self.listbox1.pack(fill="both",expand=True)
scrollbar["command"]=self.listbox1.yview
scrollbar_2["command"]=self.listbox1.xview
def Frame1_clickevent(self,event):
self.jud_frame2=str(self.listbox1.selection_get())
self.jud_frame3=None
self.Frame2()
self.Frame3()
def Frame2(self):
try :
self.frame2.destroy()
self.frame2=tk.Frame(self.top_frame, relief="sunken", bd=2, bg='white')
self.frame2.pack(side="right",fill="both",expand=True)
except :
self.frame2=tk.Frame(self.top_frame, relief="sunken", bd=2, bg='white')
self.frame2.pack(side="right",fill="both",expand=True)
if self.jud_frame2!=None:
scrollbar=tk.Scrollbar(self.frame2)
scrollbar.pack(side="right",fill="y")
scrollbar_2=tk.Scrollbar(self.frame2,orient=tk.HORIZONTAL)
scrollbar_2.pack(side="bottom",fill="x")
self.listbox2=tk.Listbox(self.frame2, width=75,height=18, selectmode="extended", xscrollcommand=scrollbar_2.set,yscrollcommand=scrollbar.set,font=("TkDefaultFont",10))
for i in range(len(self.selec[self.jud_frame2].keys())):
self.listbox2.insert(i,list(self.selec[self.jud_frame2].keys())[i])
self.listbox2.bind('<Double-1>',self.Frame2_clickevent)
self.listbox2.pack(fill="both",expand=True)
scrollbar["command"]=self.listbox2.yview
scrollbar_2["command"]=self.listbox2.xview
elif self.jud_frame2==None:
scrollbar=tk.Scrollbar(self.frame2)
scrollbar.pack(side="right",fill="y")
scrollbar_2=tk.Scrollbar(self.frame2,orient=tk.HORIZONTAL)
scrollbar_2.pack(side="bottom",fill="x")
self.listbox2=tk.Listbox(self.frame2, width=75,height=18, selectmode="extended", xscrollcommand=scrollbar_2.set,yscrollcommand=scrollbar.set,font=("TkDefaultFont",10))
self.listbox2.pack(fill="both",expand=True)
scrollbar["command"]=self.listbox2.yview
scrollbar_2["command"]=self.listbox2.xview
def Frame2_clickevent(self,event):
self.jud_frame3=str(self.listbox2.selection_get())
self.selec[self.jud_frame3]=self.selec.pop(self.jud_frame2)
self.jud_frame2=self.jud_frame3
self.Frame1()
def Frame3(self):
try :
self.frame3.destroy()
self.frame3=tk.Frame(self.bottom_frame, width=650,height=200, relief="sunken", bd=2, bg='white')
self.frame3.pack(side="left",fill="both",expand=True)
except :
self.frame3=tk.Frame(self.bottom_frame, width=650,height=200, relief="sunken", bd=2, bg='white')
self.frame3.pack(side="left",fill="both",expand=True)
if self.jud_frame3!=None:
column=[]
column_name=[]
for i in range(len(self.all_values[5])):
column.append(i+1)
column_name.append(self.all_values[5][i])
if column_name[len(column_name)-1]==None:
column_name[len(column_name)-1]="백업 내용"
len_treelist=[]
for i in range(len((self.selec[self.jud_frame2])[self.jud_frame3])):
j=list(self.data[((self.selec[self.jud_frame2])[self.jud_frame3])[i]])
for k in range(len(j)):
if len(len_treelist)<len(j):
len_treelist.append(str(j[k]))
else:
len_treelist[k]=str(max([len_treelist[k],str(j[k])],key=len))
len_treeview=[]
for i in len_treelist:
lenght=0
for j in str(i):
if (ord("a")<=ord(j) and ord("z")>=ord(j)) or (ord("0")<=ord(j) and ord("9")>=ord(j)) or ord(j)==45 or ord(j)==58:
lenght+=9
elif (ord("A")<=ord(j) and ord("Z")>=ord(j)):
lenght+=12
else : lenght+=17
len_treeview.append(lenght)
column_name_len_treeview=[]
for i in column_name:
lenght=0
for j in str(i):
if (ord("a")<=ord(j) and ord("z")>=ord(j)) or (ord("0")<=ord(j) and ord("9")>=ord(j)) or ord(j)==45 or ord(j)==58:
lenght+=9
elif (ord("A")<=ord(j) and ord("Z")>=ord(j)):
lenght+=12
else : lenght+=17
column_name_len_treeview.append(lenght)
for i in range(len(column_name_len_treeview)):
len_treeview[i]=max([len_treeview[i],column_name_len_treeview[i]])
treeview=ttk.Treeview(self.frame3, columns=column, displaycolumns=column)
scroll_x = ttk.Scrollbar(self.frame3, orient="horizontal", command=treeview.xview)
scroll_x.pack(side='bottom', fill='x')
treeview.configure(xscrollcommand=scroll_x.set)
scroll_y = ttk.Scrollbar(self.frame3, orient="vertical", command=treeview.yview)
scroll_y.pack(side='right', fill='y')
treeview.configure(yscrollcommand=scroll_y.set)
treeview.column("#0", width=40, anchor="center")
treeview.heading("#0", text="index", anchor="center")
treeview.pack()
for i in range(len(column)):
treeview.column("#%d"%(i+1), width=len_treeview[i], anchor="center")
treeview.heading(i+1, text=column_name[i], anchor="center")
for i in range(len((self.selec[self.jud_frame2])[self.jud_frame3])):
treeview.insert('', 'end', text=i, values=list(self.data[((self.selec[self.jud_frame2])[self.jud_frame3])[i]]), iid=str(i)+"번")
elif self.jud_frame3==None:
scrollbar=tk.Scrollbar(self.frame3)
scrollbar.pack(side="right",fill="y")
scrollbar_2=tk.Scrollbar(self.frame3,orient=tk.HORIZONTAL)
scrollbar_2.pack(side="bottom",fill="x")
listbox=tk.Listbox(self.frame3, width=90,height=20, selectmode="extended", xscrollcommand=scrollbar_2.set,yscrollcommand=scrollbar.set,font=("TkDefaultFont",10))
listbox.bind('<Double-1>')
listbox.pack(fill="both",expand=True)
scrollbar["command"]=listbox.yview
scrollbar_2["command"]=listbox.xview
def Frame4(self):
try :
self.frame4.destroy()
self.frame4=tk.Frame(self.bottom_frame, width=150,height=200, relief="sunken", bd=2)
self.frame4.pack(side="right",fill="both",expand=True)
except :
self.frame4=tk.Frame(self.bottom_frame, width=150,height=200, relief="sunken", bd=2)
self.frame4.pack(side="right",fill="both",expand=True)
tk.Button(self.frame4, text="정보확인",font=("TkDefaultFont",15,"bold"),command=lambda:self.checking()).pack()
tk.Button(self.frame4, text="넘어가기",font=("TkDefaultFont",15,"bold"),command=lambda:self.deleting()).pack()
tk.Button(self.frame4, text="짧게변경",font=("TkDefaultFont",15,"bold"),command=lambda:self.all_setting_short()).pack()
tk.Button(self.frame4, text="길게변경",font=("TkDefaultFont",15,"bold"),command=lambda:self.all_setting_long()).pack()
if len(self.master.temp_4)>=1:
tk.Button(self.frame4, text="다음으로",font=("TkDefaultFont",15,"bold"),command=lambda: self.next()).pack()
tk.Button(self.frame4, text="EXIT", font=("TkDefaultFont",15,"bold"),command=lambda: self._exit()).pack()
def _exit(self):
MsgBox = tk.messagebox.askquestion ('종료','저장하시겠습니까?\n("예" 를 누르시면 현재까지 진행된 내용이 바뀝니다.)')
if MsgBox=="yes":
self.save_excel()
sys.exit()
def next(self):
MsgBox = tk.messagebox.askquestion ('다음','저장하시겠습니까?\n("예" 를 누르시면 변경된 내용이 저장된 후 다음 파일로 넘어갑니다.)')
if MsgBox=="yes":
self.save_excel()
self.delete_Frame()
self.master.switch_frame(PageTwo)
elif MsgBox=="no":
self.delete_Frame()
self.master.switch_frame(PageTwo)
def save_excel(self):
for i in list(self.selec.keys()):
for j in list(self.selec[str(i)].keys()):
for k in (self.selec[str(i)])[str(j)]:
self.data[k,5]=i
for i in range(len(self.data[:,5])):
if self.data[i,5] != None:
k=6+i
self.all_values[k][5]=self.data[i,5]
wb=openpyxl.Workbook()
ws=wb.active
for i in self.all_values:
ws.append(i)
wb.save('엑셀 나오는 곳\\수정 후_'+self.pre.split(".")[0]+".xlsx")
def checking(self):
self.jud_frame3=self.listbox2.selection_get()
self.Frame3()
def deleting(self):
if self.jud_frame2!=None:
MsgBox = tk.messagebox.askquestion ('삭제','정말로 넘어가시겠습니까?\n("예" 를 누르시면 변경 목록에서 삭제됩니다)')
if MsgBox=="yes":
self.selec.pop(str(self.jud_frame2))
self.jud_frame2=None
self.jud_frame3=None
self.Frame1()
self.Frame2()
self.Frame3()
def all_setting_short(self):
MsgBox = tk.messagebox.askquestion ('일괄변경','정말로 일괄변경 하시겠습니까?\n"예" 를 누르시면 가장 짧은 내용이 선택됩니다.')
if MsgBox=="yes":
self.jud_frame2=None
self.jud_frame3=None
new={}
for i in self.selec.keys():
new[str(min(list(self.selec[str(i)].keys()),key=len))]=self.selec[str(i)]
self.selec=new
self.Frame1()
self.Frame2()
self.Frame3()
def all_setting_long(self):
MsgBox = tk.messagebox.askquestion ('일괄변경','정말로 일괄변경 하시겠습니까?\n"예" 를 누르시면 가장 긴 내용이 선택됩니다.')
if MsgBox=="yes":
self.jud_frame2=None
self.jud_frame3=None
new={}
for i in self.selec.keys():
new[str(max(list(self.selec[str(i)].keys()),key=len))]=self.selec[str(i)]
self.selec=new
self.Frame1()
self.Frame2()
self.Frame3()
if __name__ == "__main__":
app = SampleApp()
app.mainloop() | [
"dltmdgus1208@gmail.com"
] | dltmdgus1208@gmail.com |
18735fd8a03cafe6b5b2fa7d241a8f0e31a492b3 | 51d504622c8bde5096d954bf9b38789d48ba4ff7 | /Python/flask_mysql/crud/users/flask_app/__init__.py | f2767c96c43e0e7e60569fcd53ccee202e799e37 | [] | no_license | BLee1126/Dojo-Assignments | 36c8fb2294c5cd6a04c065415aae12225c0eb483 | d40d8f6569b1f504d1785d8f34d27c58eab406c8 | refs/heads/main | 2023-07-10T16:00:30.863709 | 2021-08-20T16:42:10 | 2021-08-20T16:42:10 | 368,209,920 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | # __init__.py
from flask import Flask
app = Flask(__name__)
app.secret_key = "shhhhhh"
| [
"blee1126@gmail.com"
] | blee1126@gmail.com |
7e7ca2bb4d9720d03335e98c0747b14c5fab0583 | cc6c9996e8601c28dc0d00bad1daf7280dd8338c | /python/qa_zcz_despreading.py | f4eb192b2e226828cf919e6b466340698b08add4 | [] | no_license | eokeeffe/gr-spreading | 5fad5c6e2db6637bbbf182a93bc45295d1d1246a | 886e1ebea61fa0c19688aff399aefe8339c1a097 | refs/heads/master | 2020-04-07T20:37:06.621572 | 2018-11-22T12:31:13 | 2018-11-22T12:31:13 | 158,696,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
from gnuradio import blocks
import spreading_swig as spreading
class qa_zcz_despreading (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001_t (self):
# set up fg
self.tb.run ()
# check data
if __name__ == '__main__':
gr_unittest.run(qa_zcz_despreading, "qa_zcz_despreading.xml")
| [
"evan.o-keeffe@ucdconnect.ie"
] | evan.o-keeffe@ucdconnect.ie |
33dfd9eb9fc3006a8361ccc098a0c07b7feb98b1 | b106f6d4b3776b14fca34864bee4ccf3fb5186a9 | /OLS(F, M).py | effaf45acf0a63c0e1e6b56d04e9b9acec62eb90 | [] | no_license | KalyakulinaAnastasiya/DNA | 935bdacb3d5a723abd1657008f63e50696ca4679 | 171287d9c9eba2e9140851a6f73982691f6febbd | refs/heads/master | 2020-09-09T15:18:30.442332 | 2020-05-05T18:51:32 | 2020-05-05T18:51:32 | 221,481,882 | 0 | 0 | null | 2019-11-13T14:46:44 | 2019-11-13T14:46:43 | null | UTF-8 | Python | false | false | 1,529 | py | import numpy as np
import statsmodels.api as sm
import pickle
import matplotlib.pyplot as plt
file = open('observables.txt', 'r')
age_key = 'age'
#pers_key = 'geo_accession'
gender_key = 'gender'
line = file.readline().rstrip()
line_list = line.split('\t')
#pers_id = line_list.index(pers_key)
age_id = line_list.index(age_key)
gender_id = line_list.index(gender_key)
line_age = []
line_m = []
line_f = []
age_m = []
age_f = []
i = 0
for line in file:
line_list = line.rstrip().split('\t')
line_age.append(int(line_list[age_id]))
if line_list[gender_id] == 'M':
line_m.append(i)
else:
line_f.append(i)
i += 1
file.close()
for i in line_m:
age_m.append(line_age[i])
for i in line_f:
age_f.append(line_age[i])
with open('gene_row', 'rb') as handle:
gene_row = pickle.load(handle)
gene_id = gene_row['ELOVL2']
data = np.load('gene_npz.txt.npz')
betas = data['arr_0']
cpg_betas = betas[gene_id]
betas_m = []
betas_f = []
for i in line_m:
betas_m.append(cpg_betas[i])
for i in line_f:
betas_f.append(cpg_betas[i])
X = sm.add_constant(age_m)
model = sm.OLS(betas_m, X)
results_m = model.fit()
Y = sm.add_constant(age_f)
model = sm.OLS(betas_f, Y)
results_f = model.fit()
plt.scatter(age_m, betas_m, label='', color='c', s=8)
plt.scatter(age_f, betas_f, label='', color='m', s=8)
plt.plot(X, results_m.predict(X), color='blue', linewidth=2)
plt.plot(Y, results_f.predict(Y), color='red', linewidth=2)
plt.title('ELOVL2')
plt.xlabel('age')
plt.ylabel('betas')
plt.show() | [
"aaron.blare@mail.ru"
] | aaron.blare@mail.ru |
42a05627e3a98cc0ead95e20b03d32f2cefee727 | 67d99eaf3e2355664d6b476e0cdfb4a376f5ace3 | /setup.py | 931fc78bd4758db172c223643c75612eea8bf75f | [] | no_license | Jef808/TicTacToePython | 7e9868b2efd28f1c2714ddce2db7eaae2afda9ea | c60f40bb3f4dd4beeee4533cb2d05a0b9251e98e | refs/heads/master | 2022-12-11T06:25:27.343644 | 2020-08-26T01:18:10 | 2020-08-26T01:18:10 | 290,363,299 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from setuptools import setup, find_packages
setup(name='TicTacToePython', version='2.0', packages=find_packages())
| [
"jf.arbour@gmail.com"
] | jf.arbour@gmail.com |
e62ac7a0915b7c7f70a113110172c99d24e59e8f | 81638739f723dbca5e662e572a9cef790319a430 | /conductr_cli/test/test_sandbox_logs.py | 56d88dbe3bb0f4c75921fbf7daebfa183a99c4ba | [
"LicenseRef-scancode-unknown-license-reference",
"JSON",
"Apache-2.0"
] | permissive | typesafehub/conductr-cli | 2afe5909720a1bc6eae24dd7677ac66ec2c9822d | 0ed890284228ec8acc894d49a2ea2a598f16e130 | refs/heads/master | 2023-06-19T18:59:16.175762 | 2018-02-07T16:14:56 | 2018-02-07T16:14:56 | 28,919,275 | 14 | 20 | NOASSERTION | 2022-11-04T02:29:09 | 2015-01-07T15:04:13 | Python | UTF-8 | Python | false | false | 1,043 | py | from conductr_cli.test.cli_test_case import CliTestCase
from conductr_cli import sandbox_logs
from unittest.mock import MagicMock
import io
import tempfile
class TestSandboxLogs(CliTestCase):
def test_log_files_is_correct(self):
self.assertEqual(
['/image/dir/core/logs/conductr.log', '/image/dir/agent/logs/conductr-agent.log'],
sandbox_logs.log_files(MagicMock(**{'image_dir': '/image/dir'}))
)
def test_tail_reads_files(self):
one_fd, one_path = tempfile.mkstemp()
two_fd, two_path = tempfile.mkstemp()
one_file = open(one_path, 'w')
two_file = open(two_path, 'w')
output = io.StringIO()
one_file.write("line 1\nline 2\nline 3\n")
one_file.close()
two_file.write("line a\nline b\nline c\n")
two_file.close()
sandbox_logs.tail([one_path, two_path], False, output, 8, 0.25)
self.assertEqual(
"line 1\nline 2\nline 3\nline a\nline b\nline c\n",
output.getvalue()
)
| [
"longshorej@gmail.com"
] | longshorej@gmail.com |
1fa1a301a80606168abdda73ff6ba0c7c75eb089 | 0c6c7365d6ff8b694bc906ec5f74c741e8bb0d37 | /Algorithms/1-Two-Sum.py | 5a8065a1ff799e35aa89d3fd7283348dbcfd26ad | [] | no_license | XiongQiuQiu/leetcode-slove | d58ab90caa250c86b7a1ade8b60c669821d77995 | 60f0da57b8ea4bfb937e2fe0afe3caea719cd7e4 | refs/heads/master | 2021-01-23T11:21:15.069080 | 2019-07-08T15:42:48 | 2019-07-08T15:42:48 | 93,133,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | '''
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
Example:
Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
have = {}
for i in xrange(len(nums)):
if target - nums[i] in have:
return (have[target - nums[i]], i)
else:
have[nums[i]] = i
| [
"zjw2goo@gmail.com"
] | zjw2goo@gmail.com |
c808420814784eb74158420818d1e193c2cff1fe | eed5c6267fe9ac9031c21eae6bc53010261505ac | /tests/metrics/test_default_metrics.py | 9609d6d88e052ff6a942b412ee06c84c93ff3b82 | [
"MIT"
] | permissive | voxmedia/thumbor | 3a07ae182143b5a850bf63c36887a1ee8e3ad617 | 29b92b69e4c241ddd5ba429f8269d775a1508e70 | refs/heads/master | 2022-08-25T13:07:12.136876 | 2022-08-18T16:15:00 | 2022-08-18T16:15:00 | 22,433,808 | 6 | 0 | MIT | 2019-09-13T18:05:03 | 2014-07-30T15:33:42 | Python | UTF-8 | Python | false | false | 1,049 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
import mock
from preggy import expect
import thumbor.metrics
from thumbor.importer import Importer
from tests.base import TestCase
class DefaultMetricsTestCase(TestCase):
def get_importer(self):
importer = Importer(self.config)
importer.import_modules()
return importer
def test_can_create_context_with_default_metrics(self):
expect(self.context).not_to_be_null()
expect(self.context.metrics).to_be_instance_of(thumbor.metrics.logger_metrics.Metrics)
@mock.patch('thumbor.metrics.BaseMetrics.initialize')
def test_can_initizalize_when_request_comes(self, mocked_initialize):
expect(mocked_initialize.call_count).to_equal(0)
self.fetch('/unsafe/smart/image.jpg')
expect(mocked_initialize.call_count).to_equal(1)
| [
"rflorianobr@gmail.com"
] | rflorianobr@gmail.com |
8792f9fb40411dda7586be8db31e4e63b961154c | 2dd814284a1408706459e7dd6295a4575617c0c6 | /cupyx/scipy/special/digamma.py | af54d2a7fd9ec2e5072f91abcaa7fd7cf6a903c3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | dendisuhubdy/cupy | 4e31c646fa697f69abbb07f424910cc8e5f0e595 | b612827e858b8008455a76e8d9b396386c1e4467 | refs/heads/master | 2021-01-23T10:56:45.639699 | 2018-07-12T17:41:26 | 2018-07-12T17:41:26 | 93,111,021 | 0 | 0 | MIT | 2019-12-09T06:55:54 | 2017-06-02T00:31:07 | Python | UTF-8 | Python | false | false | 4,681 | py | # This source code contains SciPy's code.
# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/psi.c
#
#
# Cephes Math Library Release 2.8: June, 2000
# Copyright 1984, 1987, 1992, 2000 by Stephen L. Moshier
#
#
# Code for the rational approximation on [1, 2] is:
#
# (C) Copyright John Maddock 2006.
# Use, modification and distribution are subject to the
# Boost Software License, Version 1.0. (See accompanying file
# LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import cupy
from cupy import core
_digamma_kernel = None
polevl_definition = '''
template<int N> static __device__ double polevl(double x, double coef[])
{
double ans;
double *p;
p = coef;
ans = *p++;
for (int i = 0; i < N; ++i){
ans = ans * x + *p++;
}
return ans;
}
'''
psi_definition = '''
__constant__ double A[] = {
8.33333333333333333333E-2,
-2.10927960927960927961E-2,
7.57575757575757575758E-3,
-4.16666666666666666667E-3,
3.96825396825396825397E-3,
-8.33333333333333333333E-3,
8.33333333333333333333E-2
};
__constant__ double PI = 3.141592653589793;
__constant__ double EULER = 0.5772156649015329;
__constant__ float Y = 0.99558162689208984f;
__constant__ double root1 = 1569415565.0 / 1073741824.0;
__constant__ double root2 = (381566830.0 / 1073741824.0) / 1073741824.0;
__constant__ double root3 = 0.9016312093258695918615325266959189453125e-19;
__constant__ double P[] = {
-0.0020713321167745952,
-0.045251321448739056,
-0.28919126444774784,
-0.65031853770896507,
-0.32555031186804491,
0.25479851061131551
};
__constant__ double Q[] = {
-0.55789841321675513e-6,
0.0021284987017821144,
0.054151797245674225,
0.43593529692665969,
1.4606242909763515,
2.0767117023730469,
1.0
};
static __device__ double digamma_imp_1_2(double x)
{
/*
* Rational approximation on [1, 2] taken from Boost.
*
* Now for the approximation, we use the form:
*
* digamma(x) = (x - root) * (Y + R(x-1))
*
* Where root is the location of the positive root of digamma,
* Y is a constant, and R is optimised for low absolute error
* compared to Y.
*
* Maximum Deviation Found: 1.466e-18
* At double precision, max error found: 2.452e-17
*/
double r, g;
g = x - root1 - root2 - root3;
r = polevl<5>(x - 1.0, P) / polevl<6>(x - 1.0, Q);
return g * Y + g * r;
}
static __device__ double psi_asy(double x)
{
double y, z;
if (x < 1.0e17) {
z = 1.0 / (x * x);
y = z * polevl<6>(z, A);
}
else {
y = 0.0;
}
return log(x) - (0.5 / x) - y;
}
double __device__ psi(double x)
{
double y = 0.0;
double q, r;
int i, n;
if (isnan(x)) {
return x;
}
else if (isinf(x)){
if(x > 0){
return x;
}else{
return nan("");
}
}
else if (x == 0) {
return -1.0/0.0;
}
else if (x < 0.0) {
/* argument reduction before evaluating tan(pi * x) */
r = modf(x, &q);
if (r == 0.0) {
return nan("");
}
y = -PI / tan(PI * r);
x = 1.0 - x;
}
/* check for positive integer up to 10 */
if ((x <= 10.0) && (x == floor(x))) {
n = (int)x;
for (i = 1; i < n; i++) {
y += 1.0 / i;
}
y -= EULER;
return y;
}
/* use the recurrence relation to move x into [1, 2] */
if (x < 1.0) {
y -= 1.0 / x;
x += 1.0;
}
else if (x < 10.0) {
while (x > 2.0) {
x -= 1.0;
y += 1.0 / x;
}
}
if ((1.0 <= x) && (x <= 2.0)) {
y += digamma_imp_1_2(x);
return y;
}
/* x is large, use the asymptotic series */
y += psi_asy(x);
return y;
}
'''
def _get_digamma_kernel():
global _digamma_kernel
if _digamma_kernel is None:
_digamma_kernel = core.ElementwiseKernel(
'T x', 'T y',
"""
y = psi(x)
""",
'digamma_kernel',
preamble=polevl_definition+psi_definition
)
return _digamma_kernel
def digamma(x):
"""The digamma function.
Args:
x (cupy.ndarray): The input of digamma function.
Returns:
cupy.ndarray: Computed value of digamma function.
.. seealso:: :data:`scipy.special.digamma`
"""
if x.dtype.char in '?ebBhH':
x = x.astype(cupy.float32)
elif x.dtype.char in 'iIlLqQ':
x = x.astype(cupy.float64)
y = cupy.zeros_like(x)
_get_digamma_kernel()(x, y)
return y
| [
"yoshikawa@preferred.jp"
] | yoshikawa@preferred.jp |
ad7ed55ad4a24f93212d491b8b82e0afff803c67 | 6a389811490cf5c4bbb8bb46cacf37b5992f9f22 | /Villordo_Cristian/001/001.py | dcd18937829e5283fa9ebd6c61418b22424defe1 | [] | no_license | informatorio2020com07/actividades | 40e2ba899e1a9d8ea6ac312ed7a480218ee6b79a | b9b76a5f8bc0a7da17ff378f954b38564f41fa66 | refs/heads/master | 2022-12-20T05:02:21.567549 | 2020-10-06T22:41:03 | 2020-10-06T22:41:03 | 285,361,906 | 2 | 26 | null | 2020-10-06T22:41:31 | 2020-08-05T17:39:32 | Python | UTF-8 | Python | false | false | 327 | py | # 001-
# Programa que pregunte al usuario, el precio del producto y la cantidad de unidades vendidas.
# Informar el total de ventas.
print("PRECIO DE PRODUCTOS")
producto=float(input())
print("CANTIDAD DE UNDIDADES VENDIDAS EN EL MES")
unidades=float(input())
print("el total de las ventas del mes es ", producto*unidades) | [
"cristianndvillordo11@gmail.com"
] | cristianndvillordo11@gmail.com |
67488237676831d1edf9d909b0ec985b4bd2e48b | 4959dcaf240badfcde1bb3de484dad7bf6dd88c6 | /Python/fun.py | a3d55d7cbb60a3e1db41d539ec80ba23962f5a89 | [] | no_license | awoltman/Computer_Science | 1025540f5cd087b8e5188e84557b9b276549a8de | 412de1d7519edea2e5ee0759ab3033d6ddfc81f5 | refs/heads/master | 2020-06-17T07:48:21.603061 | 2019-08-12T13:02:18 | 2019-08-12T13:02:18 | 195,849,350 | 0 | 0 | null | 2019-07-17T20:10:16 | 2019-07-08T16:27:34 | C++ | UTF-8 | Python | false | false | 60 | py | #first Hello World
message="This is Bob!"
print(message)
| [
"woltmanap@gmail.com"
] | woltmanap@gmail.com |
884ab6fcd860c6c1708f1f0f92d0a0b46f140e7f | a8fd25898010c2bfaa3e5c07782b6ec8738b6806 | /venv/Scripts/easy_install-script.py | d7ea1e7cc8f97577707b83df3e04f2a8694b7f18 | [] | no_license | manasarora98/FatiguedOperatorDetectionAlarming_PythonOpenCV | 2b9a312116dcdf99a12e760dd012b81bf0067e34 | 05b6949b6c6131a08d71aef377223662aca7791d | refs/heads/master | 2022-04-17T19:28:07.155742 | 2020-03-30T18:24:14 | 2020-03-30T18:24:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | #!"C:\Users\Manas Arora\PycharmProjects\FatigueDetection\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"manasarora98@gmail.com"
] | manasarora98@gmail.com |
699cc4b622f8e16c0d1b318294e129f9b6a4884c | cd8f1c6ceac85a870de457f237ea74509903cad3 | /tests/python/test_services/test_2/test.py | 754fecab92c98b3edb529bb68095be05eb69f7d2 | [] | no_license | HugoCornelis/sspy | 379cbfc08dc572c40f9dfba75662fb7ffa8c0d50 | e63e23a12add054c3fe98258877808b37ab76287 | refs/heads/master | 2020-06-04T17:06:07.893013 | 2019-06-16T15:25:47 | 2019-06-16T15:25:47 | 192,116,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py |
#*************************** Begin Service ********************************
class Service:
#---------------------------------------------------------------------------
def __init__(self, name=None, plugin=None, arguments=None, verbose=False):
"""
"""
self._plugin_data = plugin
self._name = name
self._module_name = ""
self._arguments = None
if arguments is not None:
pass
#---------------------------------------------------------------------------
def GetObject(self):
return None
#---------------------------------------------------------------------------
def GetName(self):
return self._name
#---------------------------------------------------------------------------
def GetPluginName(self):
return self._plugin_data.GetName()
#---------------------------------------------------------------------------
def GetType(self):
return self._plugin_data.GetName()
#---------------------------------------------------------------------------
def GetModuleName(self):
return self._module_name
#---------------------------------------------------------------------------
def GetArguments(self):
return self._arguments
#******************************* End Service ********************************
| [
"mandorodriguez@gmail.com"
] | mandorodriguez@gmail.com |
6072d7b1aeea4e91805c782132b00e67448bf944 | 303041f23462a2095fa107976269c09618d1642f | /source/check_source.py | c28c64f71c2bea8891142c4f6dfbeaa0aba8225e | [
"MIT"
] | permissive | billyquith/GWork | 99a890e75713040739ef4f3393cdb27d9507263a | ea2d70137c97701e5b9eb6114a80d5ecd2aaaff8 | refs/heads/gwork | 2021-04-18T19:17:14.936917 | 2019-07-06T14:27:46 | 2019-07-06T14:27:46 | 7,449,996 | 216 | 54 | NOASSERTION | 2019-06-26T21:30:51 | 2013-01-05T01:20:46 | C++ | UTF-8 | Python | false | false | 1,532 | py | #!/bin/env python
"""
Check source code follows the rules!
"""
import os
CHECK_EXTS = ('.h', '.cpp')
CHECK_NAMES = ('CMakeLists.txt',)
IGNORE_DIRS = ('./platform/include/Gwork/External',)
IGNORE_FILES = ('DebugBreak.h', 'FontData.h')
class Stats:
def __init__(self):
self.files = 0
self.lines = 0
self.problems = 0
STATS = Stats()
def report(fpath, linenb, line, msg):
print '[{}:{}] {}'.format(fpath, line, line)
print '\t{}'.format(msg)
STATS.problems += 1
def check_line(fpath, line, linenb):
STATS.lines += 1
# check for tabs used
if '\t' in line:
report(fpath, linenb, line, 'Tab used. Use spaces to indent.')
# check line length
# if len(line) > 100:
# report(fpath, linenb, line, 'Line too long.')
def check_file(fpath):
STATS.files += 1
with open(fpath, 'rb') as fh:
for (i, line) in enumerate(fh.readlines()):
check_line(fpath, line, i+1)
def check():
def check_dir(arg, dirname, names):
if dirname in IGNORE_DIRS:
return
for fname in names:
if fname in IGNORE_FILES:
return
name,ext = os.path.splitext(fname)
if (ext in CHECK_EXTS) or (fname in CHECK_NAMES):
check_file(os.path.join(dirname,fname))
os.path.walk('.', check_dir, None)
print '{} files checked.'.format(STATS.files)
print '{} lines.'.format(STATS.lines)
print '{} problems were found.'.format(STATS.problems)
check()
| [
"chinbillybilbo@gmail.com"
] | chinbillybilbo@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.