blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
228c37ece0125c92fb794abdbe15e5d3e7549659
|
fcbdf75b7ed7a07eddbbf76f4f372ce86b063dff
|
/pywal/post-wal-hook.py
|
5d7f40d53fab1f9e6d4625d07172ed89ca20293c
|
[] |
no_license
|
jcpetkovich/etc
|
b23cdfc0d09e0e6c58331988facf33ecebdec8c9
|
849c6821c3e15f0ffdab11f846f7cf16366ffb8f
|
refs/heads/master
| 2021-04-09T17:50:12.743242
| 2019-05-19T13:48:55
| 2019-05-19T13:48:55
| 2,250,102
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
#!/usr/bin/env python
import os
import pywal
import subprocess
wallpaper = pywal.wallpaper.get()
colours = pywal.colors.get(wallpaper)
homedir = os.path.expanduser("~")
template = os.path.join(
homedir,
"etc",
"pywal",
"stresources"
)
with open(template, "r") as f:
template = f.read()
template = template.format(
foreground = colours["special"]["foreground"],
background = colours["special"]["background"],
cursor = colours["special"]["cursor"]
)
stresources = os.path.join(homedir, ".cache/wal/stresources")
with open(stresources, "w") as f:
f.write(template)
pywal.reload.xrdb(stresources)
subprocess.check_call(os.path.join(homedir, ".config", "bspwm", "theme"))
|
[
"jcpetkovich@gmail.com"
] |
jcpetkovich@gmail.com
|
ec28931ea5a08543eec3a1e6c60e61002578a4b5
|
f375f786dc428aaff33ee03d119fc12206d64090
|
/sprd/vowifi/tmtc_ut/sample/selecttest.py
|
21c876317f721078a05a11dabff09e389c9a2e83
|
[
"MIT"
] |
permissive
|
deevarvar/myLab
|
1b77773d5bfa334ee0331cf1926ebbb141c82984
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
refs/heads/master
| 2021-04-15T15:39:39.248990
| 2020-02-12T13:18:53
| 2020-02-12T13:18:53
| 21,227,445
| 0
| 3
|
MIT
| 2019-12-19T09:47:29
| 2014-06-26T03:58:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
#-*- coding=utf-8 -*-
#author: zhihua.ye@spreadtrum.com
import os
import select, sys, subprocess
import shlex
import time
netstat="netstat -antpu"
vmstat = subprocess.Popen(shlex.split(netstat), shell=True, bufsize=1024,
stdout=subprocess.PIPE)
vmstat_pipe = vmstat.stdout
iostat_pipe = subprocess.Popen('top', shell=True, bufsize=1024,
stdout=subprocess.PIPE).stdout
pipe_dict = {vmstat_pipe.fileno():vmstat_pipe, iostat_pipe.fileno():iostat_pipe}
p = select.poll()
p.register(vmstat_pipe, select.POLLIN|select.POLLERR|select.POLLHUP)
p.register(iostat_pipe, select.POLLIN|select.POLLERR|select.POLLHUP)
while 1:
result = p.poll(5000)
if len(result) != 0:
for m in result:
# Polls the set of registered file descriptors, and returns a possibly-empty list containing (fd, event)
if m[1] & select.POLLHUP and m[0] == iostat_pipe.fileno():
print 'Get HUPUP from pipe', m[0]
exit()
if m[1] & select.POLLIN:
#print "Get", pipe_dict[m[0]].readline(), "from pipe", m[0]
print "Get", pipe_dict[m[0]].readline(), "from pipe", m[0]
|
[
"zhihua.ye@spreadtrum.com"
] |
zhihua.ye@spreadtrum.com
|
bf9588ee2a4af4d9357a51e13ab14a30f1f6561b
|
fe6775ca8c5b42710785e3a923974ae079f92c8f
|
/剑指offer/剑指 Offer 34. 二叉树中和为某一值的路径.py
|
d2c0d8e5407fb27e278426b603c91c3daad77a31
|
[] |
no_license
|
AiZhanghan/Leetcode
|
41bda6676fa1a25fa19e393553c1148ed51fdf72
|
101bce2fac8b188a4eb2f5e017293d21ad0ecb21
|
refs/heads/master
| 2021-06-28T10:48:07.865968
| 2020-11-20T09:45:15
| 2020-11-20T09:45:15
| 188,155,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root, target):
"""
Args:
root: TreeNode
target: int
Return:
list[list[int]]
"""
self.res = []
self.dfs(root, [], target)
return self.res
def dfs(self, root, path, target):
"""
Args:
root: TreeNode
path: list[int]
target: int
"""
if not root:
return
path.append(root.val)
target -= root.val
if target == 0 and not root.left and not root.right:
self.res.append(path[:])
self.dfs(root.left, path, target)
self.dfs(root.right, path, target)
path.pop()
|
[
"35103759+AiZhanghan@users.noreply.github.com"
] |
35103759+AiZhanghan@users.noreply.github.com
|
3d96ef785703ba706cef3debfff21cb923ef6a21
|
8aa473c740acae28c2f2392208335f7463cedb70
|
/scripts/new_summary.py
|
8ca66c2022577f20430c02d6e822b365db3be5cc
|
[] |
no_license
|
nikvaessen/deep-learning-papers
|
74ad10c687421561df84e74e451bf3c5633d60f0
|
0892e053fb30d8e9f90bbcbea2fef4af6028db4a
|
refs/heads/master
| 2020-12-11T10:52:15.966968
| 2020-01-23T07:46:47
| 2020-01-23T07:46:47
| 233,828,831
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,405
|
py
|
################################################################################
# This script will ask the user for some information regarding the soon-to-be
# read paper before generating the starting point for the summary according to
# a summary template.
#
# Usage: python new_summary.py
#
# Author: Nik Vaessen
################################################################################
import os
import re
import datetime
import json
from pathlib import Path
################################################################################
# Implement functionality of script
title_template_str = "---title---"
estimated_minutes_template_str = "---time---"
url_template_str = "---url---"
date_template_str = "---date---"
topics_template_str = "---topic---"
meta_json_template_str = "---json---"
def main():
# set working directory to root of path
script_path = Path(os.path.abspath(__file__))
root_dir = script_path.parent.parent.as_posix()
os.chdir(root_dir)
# Query essential information for creating new summary document
title = input("What is the title of the article?: ")
filename = input("What should the name of the summary markdown file be?: ")
url = input("What is the URL of of the article?: ")
estimated_minutes = input("How many minutes do you expect to read this paper?: ")
topics = input("Give a comma-separated list of covered topic(s): ")
date = datetime.datetime.now().strftime("%Y-%m-%d")
meta_obj = json.dumps({
"title": title,
"url": url,
"topics": topics,
"date": date,
"estimated_minutes": estimated_minutes
})
# Insert the information into the template file
file = Path("summary_template.md").read_text()
file = re.sub(title_template_str, title, file)
file = re.sub(estimated_minutes_template_str, estimated_minutes, file)
file = re.sub(url_template_str, url, file)
file = re.sub(date_template_str, date, file)
file = re.sub(topics_template_str, topics, file)
file = re.sub(meta_json_template_str, meta_obj, file)
# Create the new summary file
summaries_dir = os.path.join(root_dir, "summaries")
if not os.path.exists(summaries_dir):
os.mkdir(summaries_dir)
new_fn = os.path.join(summaries_dir, f"{filename}.md")
with open(new_fn, 'w') as f:
f.write(file)
if __name__ == '__main__':
main()
|
[
"nikvaes@gmail.com"
] |
nikvaes@gmail.com
|
137bfd132fed47b27d3068a21b99a6a95fa99caa
|
ab25d838bd9904d6fb7eb6d5259d30bab63f4ec5
|
/python scripts/temp_meas.py
|
5c0aeab89d07484341f1d0fa052cf70937ebe3ce
|
[] |
no_license
|
crilleman/sensors_rust_rtic
|
adc83b09f7998e4934db0547476421e0efd68e9d
|
47012d7e26b735de8a28ea32ffecffaa440f4f9c
|
refs/heads/main
| 2023-05-02T08:31:46.824640
| 2021-05-24T10:00:22
| 2021-05-24T10:00:22
| 370,303,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,864
|
py
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import serial
import datetime
# initialize serial port
ser = serial.Serial()
ser.port = '/dev/ttyACM0'
ser.baudrate = 115200
ser.timeout = 100 # specify timeout when using readline()
ser.open()
ser.flush()
if ser.is_open == True:
print("\nAll right, serial port now open. Configuration:\n")
print(ser, "\n") # print serial parameters
else:
exit(1)
t_delta1 = []
t_delta2 = []
t_delta1.append(datetime.datetime.now()) # Time difference for the last 100 readings
t_delta2.append(datetime.datetime.now()) # Time difference for the last 100 readings
t_flag = 0
# Parameters
x_len = 5000 # Number of points to display
y_range = [0, 30] # Range of possible Y values to display
# Create figure for plotting
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = list(range(0, x_len))
ys = [0] * x_len
ax.set_ylim(y_range)
# Initialize communication with TMP102
#tmp102.init()
# Create a blank line. We will update the line in animate
line, = ax.plot(xs, ys)
# Add labels
plt.title('Temperature over Time')
plt.xlabel('Samples')
plt.ylabel('Temperature (°C)')
# This function is called periodically from FuncAnimation
def animate(i, ys,):
# Read temperature (Celsius) from TMP102
#temp_c = round(tmp102.read_temp(), 2)
ser_bytes = ser.readline()
#print(ser_bytes)
decoded_bytes = float(ser_bytes[0:len(ser_bytes)].decode("utf-8"))
#print(decoded_bytes)
#decoded_bytes = 11;
# Add y to list
ys.append(decoded_bytes)
# Limit y list to set number of items
ys = ys[-x_len:]
# Update line with new Y values
line.set_ydata(ys)
return line,
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig,
animate,
fargs=(ys, ),
interval=50,
blit=True)
plt.show()
|
[
"crille.nilsson98@outlook.com"
] |
crille.nilsson98@outlook.com
|
8c95ca238969c6cc4e144442f02d17a785ad82f5
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/test_nn_Dropout3d.py
|
4e6564745ef606264205fa1908bb385bd9700f5b
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201
| 2023-08-31T04:19:23
| 2023-08-31T04:19:23
| 95,879,426
| 18,818
| 4,491
|
NOASSERTION
| 2023-09-14T15:44:56
| 2017-06-30T10:55:37
|
C++
|
UTF-8
|
Python
| false
| false
| 1,694
|
py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.dropout_0 = nn.Dropout3d()
self.dropout_1 = nn.Dropout3d(p=0.7)
def forward(self, x, y):
x = self.dropout_0(x)
y = self.dropout_1(y)
return x, y
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 12, 6, 8, 16)
y = torch.rand(1, 3, 4, 5, 6)
a0, a1 = net(x, y)
# export torchscript
mod = torch.jit.trace(net, (x, y))
mod.save("test_nn_Dropout3d.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_nn_Dropout3d.pt inputshape=[1,12,6,8,16],[1,3,4,5,6]")
# pnnx inference
import test_nn_Dropout3d_pnnx
b0, b1 = test_nn_Dropout3d_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
[
"noreply@github.com"
] |
Tencent.noreply@github.com
|
1180029814716f39bfe32a649fda8713b18156e3
|
310f39f99a975bb30fe7ec0231fe1ee75e96bb00
|
/Insertion_Sort/Insertion_Sort_Nested_ForLoop.py
|
e3eb2d3a8fbd7b84790c4c54fc95281fe7af5325
|
[] |
no_license
|
saurabhchris1/Algorithm-and-Data-Structure-Python
|
8ffc8b52b112f741758f6a5965c8c69cb2172745
|
152a217bb2c9f06cd1522516d3b9613d3dd07b57
|
refs/heads/master
| 2020-08-23T11:35:09.410942
| 2019-12-28T06:27:14
| 2019-12-28T06:27:14
| 216,606,904
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
# Insertion sort is a simple sorting algorithm that works the way
# we sort playing cards in our hands
def insertion_sort(arr):
for i in range(1, len(arr)):
key = arr[i]
for j in range(i - 1, -1, -1):
if key < arr[j]:
arr[j + 1] = arr[j]
arr[j] = key
else:
break
return arr
if __name__ == '__main__':
num = [2, 11, 6, 4, 7, 8]
sorted_arr = insertion_sort(num)
print ("The sorted array is : " + str(sorted_arr))
|
[
"saurabhchris1@gmail.com"
] |
saurabhchris1@gmail.com
|
18eb874b1c5193fb3f259bf045d9037e84ec371f
|
3b8f3c7e9b4dde71f96a4b98566937d0d1a912d3
|
/part3/NYT/Code/stemming.py
|
4c96b5ac1eafdf32ea33af83df6f45fce596e3ff
|
[] |
no_license
|
vijayjag-repo/Big-Data-Analysis
|
3951261f04943b20e550829a70eba454d41dbe36
|
273047cb42d2204883b4c721cd649987f0370d31
|
refs/heads/master
| 2020-06-17T09:00:53.278323
| 2019-07-08T19:16:33
| 2019-07-08T19:16:33
| 195,871,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
import nltk
import csv
import re
import stemming
from nltk.corpus import stopwords,wordnet
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.tokenize import word_tokenize
def main():
stop_words = stopwords.words('english') + ['advertisement','support']
print(stop_words)
#stop_words.append('said')
#stop_words.append('would')
#stop_words.append('s')
#raise NotImplementedError
ps = PorterStemmer()
wnl = WordNetLemmatizer()
for value in range(1,101):
text = ""
with open("./NHL/NHL_File_" + str(value)+ ".txt",'r') as f:
print("Running")
for line in f:
#print(para)
#para = ["runner","running","run"]
line = line.lower()
line = re.sub(r"[^A-Za-z]+",' ',line)
word_tokens = word_tokenize(line)
#word_tokens = ["runner","running","run"]
filtered_sentence = []
stemmed_words = ""
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
for w in filtered_sentence:
if w.endswith('e') or w.endswith('s') or w.endswith('y') or w.endswith('l'):
#stemmed_words.append(w +' : '+wnl.lemmatize(w))
stemmed_words = stemmed_words + wnl.lemmatize(w) + " "
else:
#stemmed_words.append(w+' : '+ps.stem(w))
stemmed_words = stemmed_words + ps.stem(w) + " "
text = text + stemmed_words
#print(text)
text_file = open("./NHL_STEM/NHL_STEM_" +str(value) +".txt", "w")
text_file.write(text)
text_file.close()
#raise NotImplementedError
if __name__ == "__main__":
main()
|
[
"vijayjag@Vijays-MacBook-Pro.local"
] |
vijayjag@Vijays-MacBook-Pro.local
|
68c7b4afa78595c8214c5f807fe0d4ac67aa3bdf
|
ce772fdcd3a84a9c00667051f008a3e0a7a62135
|
/voice.py
|
f1a3bb834a79e86928a7074ec7eb9010130a5ef4
|
[] |
no_license
|
mugeshk97/generative-chatbot
|
d8e3bd3cc22a8c8e9e9a3b39896959ffd612a0ce
|
d52f17c88b6471dcbd1937258ceed127f768db21
|
refs/heads/master
| 2022-12-07T10:12:59.535612
| 2020-08-28T09:17:53
| 2020-08-28T09:17:53
| 291,003,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
import speech_recognition as sr
import os
import playsound
import pyttsx3
from gtts import gTTS
r = sr.Recognizer()
def get_audio():
with sr.Microphone(sample_rate=48000, chunk_size=2048) as source:
r.adjust_for_ambient_noise(source)
playsound.playsound(f'Asset/Audio/start_sound.mp3')
audio = r.listen(source)
try:
text = r.recognize_google(audio)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return text
def put_audio(decoded_translation):
engine = pyttsx3.init()
engine.setProperty('rate', 120)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
engine.say(decoded_translation)
return engine.runAndWait()
|
[
"noreply@github.com"
] |
mugeshk97.noreply@github.com
|
e8f1cc28656377cae6863757ccfa1b6b8e74f65f
|
5384b403a64c51c658d0b1f93d1998ec41857bd2
|
/python/DES/HW4.py
|
57517fe19077d2562814fd3d61862384dfe3b31b
|
[] |
no_license
|
BryanColeman/Projects
|
741850b9c996c2ea9cda208b0960fd4dd418080b
|
0ead1eaaafdd7504f52a68479fe4285cd95d958c
|
refs/heads/master
| 2021-01-01T07:19:09.523771
| 2020-05-05T14:36:53
| 2020-05-05T14:36:53
| 239,166,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,221
|
py
|
'''
Bryan Coleman
BJC18BV
The program in this file is the individual work of Bryan Coleman
'''
from random import seed,randint
from datetime import datetime
import string
'''
All the boxes that we will need to permute values
'''
shift = [1,1,2,2,2,2,2,2,
1,2,2,2,2,2,2,1]
PC = [14, 17, 11, 24, 1, 5,
3, 28, 15, 6, 21, 10,
23, 19, 12, 4, 26, 8,
16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55,
30, 40, 51, 45, 33, 48,
44, 49, 39, 56, 34, 53,
46, 42, 50, 36, 29, 32]
initial_perm = [58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6,
64, 56, 48, 40, 32, 24, 16, 8,
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7]
expand = [32, 1 , 2 , 3 , 4 , 5 , 4 , 5,
6 , 7 , 8 , 9 , 8 , 9 , 10, 11,
12, 13, 12, 13, 14, 15, 16, 17,
16, 17, 18, 19, 20, 21, 20, 21,
22, 23, 24, 25, 24, 25, 26, 27,
28, 29, 28, 29, 30, 31, 32, 1 ]
sbox = [['1110','0100','1101','0001','0010','1111','1011','1000','0011','1010','0110','1100','0101','1001','0000','0111'],
['0000','1111','0111','0100','1110','0010','1101','0001','1010','0110','1100','1011','1001','0101','0011','1000'],
['0100','0001','1110','1000','1101','0110','0010','1011','1111','1100','1001','0111','0011','1010','0101','0000'],
['1111','1100','1000','0010','0100','1001','0001','0111','0101','1011','0011','1110','1010','0000','0110','1101']]
inter_perm = [16, 7, 20, 21, 29, 12, 28, 17,
1, 15, 23, 26, 5, 18, 31, 10,
2, 8, 24, 14, 32, 27, 3, 9,
19, 13, 30, 6, 22, 11, 4, 25]
final_perm = [40, 8, 48, 16, 56, 24, 64, 32,
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25]
def split_string_and_make_binary(plain_text):
'''
split the text into lengths of 8
if the last string is not of length 8 note that
then format using 08b, which makes give it the format 00000000 for 0
then if we had a length of less then 8 for the last string buff with zeros
:params:
plain_text: string that we are breaking up
return:
binary_list:list of 64 bit binary numbers
'''
splits = len(plain_text) // 8
extra = len(plain_text) % 8
split_list = [plain_text[i*8:(i+1)*8] for i in range(splits)]
if extra > 0:
split_list.append(plain_text[splits*8:])
binary_list = [(''.join(format(ord(c), '08b') for c in i)) for i in split_list]
if extra > 0:
temp = ''
for _ in range(8 - extra):
temp = temp + '00000000'
temp = temp + binary_list[-1]
binary_list[-1] = temp
return binary_list
def encrypt(plain_text, key):
'''
take in a message and encrypt it
:params:
plain_text: message we want to encrypt
key: key that we will use for encrypt and decrypt, random for each string
:return:
cipher_text: ciphered text
'''
split_list = split_string_and_make_binary(plain_text)
cipher_text = ''
for i in split_list:
current = DES(i,key)
for j in range(8):
temp = current[j*8:j*8+8]
cipher_text = cipher_text + chr(int(temp,2))
return cipher_text
def decrypt(cipher_text, key):
'''
take in a ciphered message and decrypt it
:params:
cipher_text: message we want to decrypt
key: key that we will use for encrypt and decrypt, random for each string
:return:
plain_text: decrypted message
'''
split_list = split_string_and_make_binary(cipher_text)
plain_text = ''
for i in split_list:
current = DES(i,key)
for j in range(8):
temp = current[j*8:j*8+8]
plain_text = plain_text + chr(int(temp,2))
return plain_text
def DES(number, key):
'''
:params:
number: a 64 bit binary number that represents either a plain_text or ciphered_text
key: key that we will use for encrypt and decrypt, random for each string
:return:
'''
first_perm = ''
for i in range(64):
first_perm = first_perm + number[initial_perm[i] - 1]
left = first_perm[:32]
right = first_perm[32:]
for i in range(16):
key = key[shift[i]::] + key[:shift[i]:]
bit_key = ''
for j in range(48):
bit_key = bit_key + key[PC[j] - 1]
expand_right = ''
for j in range(48):
expand_right = expand_right + right[expand[j] - 1]
xor = int(expand_right,2) ^ int(bit_key,2)
right_xor_key = '{0:0{1}b}'.format(xor,len(expand_right))
collapse_right = ''
for j in range(8):
temp = right_xor_key[j*6:j*6+6]
find_row = temp[0] + temp[-1]
row = int(find_row, 2)
find_col = temp[1:5]
col = int(find_col, 2)
res = sbox[row][col]
collapse_right = collapse_right + res
permute_right = ''
for j in range(32):
permute_right = permute_right + collapse_right[inter_perm[j] - 1]
xor = int(left,2) ^ int(right,2)
result = '{0:0{1}b}'.format(xor,len(left))
left = result
if i != 15:
left,right = right,left
bring_back = left + right
last_perm = ''
for i in range(64):
last_perm = last_perm + bring_back[final_perm[i] - 1]
return last_perm
def main():
print('DES Implementation:')
plain_text = input('Enter text to encrypt (\"Exit\" to quit): ')
while(plain_text.lower() != 'exit'):
key = [str(randint(0,1)) for _ in range(56)]
print(f'Encrypted text: {encrypt(plain_text,key)}')
print(f'Decrypted text: {decrypt(encrypt(plain_text,key),key)}')
plain_text = input('Next text: ')
if __name__ == '__main__':
seed(datetime.now())
main()
|
[
"colemanbryanj@gmail.com"
] |
colemanbryanj@gmail.com
|
d16d1e28a015971cd399228d22c3657e3dc0d5c5
|
df4d186b49cfe42d9a38e61084eea4b3c8efa66a
|
/photoslider/admin.py
|
53ded041d5327a894b85b102bab04a4695759f85
|
[] |
no_license
|
kaczor3213/Django-Practice
|
b2858e2bcf3dc5fb4fef42cfa8656f62de02781a
|
369e3386e58725c7e5fe1a54b9e52ad2bc74c699
|
refs/heads/master
| 2020-08-07T13:39:18.283363
| 2019-10-11T16:35:39
| 2019-10-11T16:35:39
| 213,473,118
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Photo)
|
[
"przemyslaw.markiewicz@besidethepark.com"
] |
przemyslaw.markiewicz@besidethepark.com
|
7286f58bf6438bc71997980f9fe2c6195b5975be
|
12a85f18bd3966672e6a009f7a424880983a98c0
|
/base/migrations/0004_auto_20201230_1113.py
|
7c50c6fd96c09c61c8009c1f5cba10b3a8accbf8
|
[] |
no_license
|
strange-hawk/portfolio_template
|
266b5198fb5cde4c8b47ac118f3c06cdb27f3724
|
d61b942328c3f15a99510f331841ee91c525314c
|
refs/heads/master
| 2023-02-08T16:47:54.297135
| 2020-12-30T12:32:51
| 2020-12-30T12:32:51
| 325,543,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
# Generated by Django 3.1.2 on 2020-12-30 11:13
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0003_post_slug'),
]
operations = [
migrations.AlterField(
model_name='post',
name='body',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
]
|
[
"201852004@iiitvadodara.ac.in"
] |
201852004@iiitvadodara.ac.in
|
fc7a71b09ff372d2f6c8f55f2101389855085b67
|
896e21e59afe46d8cd1885eb23af6e5b172ad67c
|
/en/src/connective.py
|
2e3b284deadb120c0093eb19cf258265d802105e
|
[] |
no_license
|
lzswangjian/conll2016
|
b8884567901bb2a4576149b1720da4ee42396229
|
5731a4094bf16c9be64727c7ea127105759758c7
|
refs/heads/master
| 2021-01-20T02:54:36.118646
| 2016-06-20T08:34:19
| 2016-06-20T08:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,780
|
py
|
# -*- coding: utf-8 -*-
import sys
import os
from common import *
from corpus import Corpus
logs = sys.stderr
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
class Connective():
"""
Connective Identification Component
"""
def __init__(self):
self.train_file = FILE_PATH + '/../data/conll.conn.train'
self.train_vec_file = FILE_PATH + '/../data/conll.conn.train.vec'
self.feat_map_file = FILE_PATH + '/../data/conll.conn.map'
self.test_file = FILE_PATH + '/../data/conll.conn.test'
self.test_vec_file = FILE_PATH + '/../data/conll.conn.test.vec'
self.model_file = FILE_PATH + '/../data/conll.conn.model'
self.predicted_file = FILE_PATH + '/../data/conll.conn.test.predicted'
def train(self):
# to_file = open(self.train_file, 'w')
# self.prepare_data(TRAIN_PARSE_PATH, TRAIN_REL_PATH, 'train', to_file)
# to_file.close()
Corpus.train_with_opennlp(self.train_file, self.model_file)
# gen_svm_train(self.train_file, self.train_vec_file, self.feat_map_file)
# svm_learn(self.train_vec_file, self.model_file)
def test(self):
# to_file = open(self.test_file, 'w')
# self.prepare_data(DEV_PARSE_PATH, DEV_REL_PATH, 'test', to_file)
# to_file.close()
Corpus.test_with_opennlp(self.test_file, self.model_file, self.predicted_file)
# feat_map = read_svm_map(self.feat_map_file)
# gen_svm_test(self.test_file, feat_map, self.test_vec_file)
# svm_classify(self.test_vec_file, self.model_file, self.predicted_file)
def prepare_data(self, parse_path, rel_path, which, to_file):
rel_dict = Corpus.read_relations(rel_path)
for art in Corpus.read_parses(parse_path, rel_dict):
for rel in art.relations:
if rel.rel_type != 'Explicit':
continue
rel.article = art
rel.get_conn_leaves()
self.print_features(art, which, to_file)
def eval_data(self, stand_data, predicted_data):
stand = [x.strip().split()[-1] for x in open(stand_data)]
predicted = [x.strip().split()[-1] for x in open(predicted_data)]
# predicted = [float(x.strip().split()[-1]) for x in open(predicted_data)]
# tmp = []
# for pred in predicted:
# if pred > 0:
# tmp.append('1')
# else:
# tmp.append('0')
# predicted = tmp
true_positive = true_negative = false_positive = false_negative = 0
for i in range(len(stand)):
if stand[i] == '1' and predicted[i] == '1':
true_positive += 1
elif stand[i] == '1' and predicted[i] == '0':
false_negative += 1
elif stand[i] == '0' and predicted[i] == '1':
false_positive += 1
else:
true_negative += 1
precision = true_positive*100.0/(true_positive+false_positive)
recall = true_positive*100.0/(true_positive+false_negative)
f1 = 2*precision*recall/(precision+recall)
acc = (true_positive+true_negative)*100.0/(true_positive+
true_negative+
false_positive+
false_negative)
print '====================result==================='
print 'precision:'+str(precision)
print 'recall:'+str(recall)
print 'F1:'+str(f1)
print 'accuracy:'+str(acc)
print '============================================='
return [precision, recall, f1, acc]
def print_features(self, article, which, to_file):
checked_conns = []
for sentence in article.sentences:
all_conns = sentence.check_connectives()
checked_conns += all_conns
for conn in all_conns:
conn_str = '_'.join(n.value for n in conn)
to_file_line = ''
to_file_line += 'conn_lc:'+conn_str.lower()+' '
to_file_line += 'conn:'+conn_str+' '
conn_pos = '_'.join([x.parent_node.value for x in conn])
to_file_line += 'lexsyn:conn_POS:'+conn_pos+' '
prev_leaf = Corpus.get_other_leaf(conn[0], -1, article)
if prev_leaf is not None:
to_file_line += 'lexsyn:with_prev_full:'+prev_leaf.value+'_'+conn_str+' '
prev_pos = prev_leaf.parent_node.value
to_file_line += 'lexsyn:prev_POS:'+prev_pos+' '
to_file_line += 'lexsyn:with_prev_POS:'+prev_pos+'_'+conn_pos.split('_')[0]+' '
to_file_line += 'lexsyn:with_prev_POS_full:'+prev_pos+'_'+conn_pos+' '
next_leaf = Corpus.get_other_leaf(conn[-1], 1, article)
if next_leaf is not None:
to_file_line += 'lexsyn:with_next_full:'+conn_str+'_'+next_leaf.value+' '
next_pos = next_leaf.parent_node.value
to_file_line += 'lexsyn:next_POS:'+next_pos+' '
to_file_line += 'lexsyn:with_next_POS:'+conn_pos.split('_')[-1]+'_'+next_pos+' '
to_file_line += 'lexsyn:with_next_POS_full:'+conn_pos+'_'+next_pos+' '
# Pitler & Nenkova (ACL 09) features:
# self_cat, parent_cat, left_cat, right_cat, right_VP, right_trace
res = sentence.get_connective_categories(conn)
res2 = ['selfCat:'+res[0],
'parentCat:'+res[1],
'leftCat:'+res[2],
'rightCat:'+res[3]]
if res[4]:
res2.append('rightVP')
if res[5]:
res2.append('rightTrace')
for e in res2:
to_file_line += 'syn:'+e+' '
for e in res2:
to_file_line += 'conn-syn:'+'conn:'+conn_str+'-'+e+' '
for j in range(0, len(res2)):
for pair in res2[j+1:]:
to_file_line += 'syn-syn:'+res2[j]+'-'+pair+' '
res3 = sentence.get_syntactic_features(*res[6])
to_file_line += 'path-self>root:'+res3[0]+' '
to_file_line += 'path-self>root2:'+res3[1]+' '
label = '0'
if conn in article.disc_connectives:
label = '1'
to_file.write(to_file_line+' '+label+'\n')
return checked_conns
if __name__ == '__main__':
handler = Connective()
handler.train()
handler.test()
handler.eval_data(handler.test_file, handler.predicted_file)
|
[
"qcl6355@gmail.com"
] |
qcl6355@gmail.com
|
7514df890190e4719f61257b18916137929a571d
|
bc2aec84f43df1918fd43f6a5adaea6280ae1282
|
/movement.py
|
39532c569e05d137ae790913ee0f447677176e54
|
[
"MIT"
] |
permissive
|
pennz/pi-zero-mouse
|
0e2e7ad7dbf250e4817bfa079b05fb57b5ed4168
|
7de6683430b7fecbe8fdf6b21d9818c2305c599c
|
refs/heads/master
| 2020-05-23T11:39:31.956667
| 2020-03-29T11:53:40
| 2020-03-29T11:53:40
| 186,741,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,682
|
py
|
import numpy as np
from numpy.random import normal
import time
import math
class operator():
def __init__(self, g_n='/dev/hidg0'):
self.gadget_name = g_n
self.dev_pf = open(self.gadget_name, "wb", buffering=0)
self._pointer_accuracy = 0 # for debug
self.screen_x_len = 1080
self.screen_y_len = 2340 # not pixel perfect, don't bother
self.x_max=127
self.x_min=-127
self.y_max=127
self.y_min=-127
self.x_pos = -1
self.y_pos = -1
self.button_state = 0
self._blank_state = self._mouse_op_to_binary(0,0, button_click=-1)
self._pasue_time_table_init()
self._debug_pause = 30/1000
self._init_pause = 200/1000
self._pointer_pos_init()
@staticmethod
def _clip(v,min_v, max_v):
if v > max_v:
return max_v
if v < min_v:
return min_v
return v
def _pasue_time_table_init(self):
init_pause_time = 20 # ms
end_pause_time = 30 # ms
mid_pause_time = 0 # fatest
mid_place = 70 # /100
end_place = 100
self._pause_time_table = np.array([
(init_pause_time*(mid_place-i) + mid_pause_time*i)/mid_place \
if i < mid_place else \
(mid_pause_time*(end_place-i) + end_pause_time*(i-mid_place))/(end_place-mid_place) \
for i in range(end_place)])
def _pointer_pos_init(self):
# go to middle to the right (right thrumb)
# first to to right
# then to the bottom
#
# then go middle and left a little
self._to_b_r(self._init_pause)
#time.sleep(1)
self.move_relative(-self.screen_x_len/4, -self.screen_y_len/2, self._init_pause)
#time.sleep(1)
def _update_pos(self,x,y, rel=True):
if rel:
x += self.x_pos
y += self.y_pos
self.x_pos = self._clip(x,0,self.screen_x_len)
self.y_pos = self._clip(y,0,self.screen_y_len)
def _to_b_r(self, pause_time):
for _ in range(19):
self._write_move_relative(self.x_max, self.y_max, sleep_time=pause_time)
self.x_pos=self.screen_x_len
self.y_pos=self.screen_y_len
def move_relative(self,x,y, pause_time):
x_sign = 1 if x > 0 else -1
y_sign = 1 if y > 0 else -1
x_abs = abs(x)
y_abs = abs(y)
x_steps = int(x_abs // self.x_max) # assume x_max and x_min same magnitude.
y_steps = int(y_abs // self.y_max)
if (x_steps < 1 and y_steps < 1):
self._write_move_relative(x, y)
self._update_pos(x,y)
return
x_remain = int(x_abs % self.x_max)
y_remain = int(y_abs % self.y_max)
for _ in range(min(x_steps, y_steps)):
self._write_move_relative(x_sign*self.x_max, y_sign*self.y_max, sleep_time=pause_time)
if x_steps >= y_steps: # go x, still need to x
y_extra_sign = 0
else: # go y
y_extra_sign = 1
for _ in range(abs(x_steps - y_steps)):
self._write_move_relative((1-y_extra_sign)*x_sign*self.x_max, y_extra_sign*y_sign*self.y_max, sleep_time=pause_time)
self._write_move_relative(x_remain*x_sign, y_remain*y_sign, sleep_time=pause_time)
self._update_pos(x,y)
def _mouse_op_to_binary(self,x,y,button_click=-1, button_release=False):
bytes_3 = bytearray()
if button_release:
self.button_state = 0 # reset state
else: # normal click or simply not click
if (button_click >= 0 and button_click < 3):
self.button_state = self.button_state | 1 << button_click
#bytes_3.append(button_report.to_bytes(1,'big',signed=False))
bytes_3.append(self.button_state)
x_report = self._clip(x, self.x_min, self.x_max)
y_report = self._clip(y, self.y_min, self.y_max)
bytes_3.append(x_report.to_bytes(1,'big',signed=True)[0])
bytes_3.append(y_report.to_bytes(1,'big',signed=True)[0])
return bytes_3
def close(self):
if self.dev_pf: self.dev_pf.close()
def _write_move_relative(self, x,y, sleep_time=0):
if sleep_time > 0: time.sleep(sleep_time)
#print( (x,y) )
binary_24_bits = self._mouse_op_to_binary(x,y)
self.dev_pf.write(bytes(binary_24_bits))
# reference https://www.codeproject.com/Tips/759391/Emulate-Human-Mouse-Input-with-Bezier-Curves-and-G
def move_click(self, x, y, button, pause=False, rel=False):
"""
move_click not relative for x,y here
"""
# target x and y, more nature, you can't be that accurate
# so need to consider the boxing box when try to click
x += normal()*self._pointer_accuracy
y += normal()*self._pointer_accuracy
self.move_along_bezier_curve(x,y, pause)
self.click(button)
def move_along_bezier_curve(self, x,y, pause=False, rel=False):
_x = x
_y = y
if rel:
x += self.x_pos
y += self.y_pos
orig_x = self.x_pos
orig_y = self.y_pos
mid_point_x = (x - orig_x)/2
mid_point_y = (y - orig_y)/2
mid_distance = math.sqrt(mid_point_x*mid_point_x+mid_point_y*mid_point_y)
#Find a co-ordinate normal to the straight line between start and end point, starting at the midpoint and normally distributed
#This is reduced by a factor of 4 to model the arc of a right handed user.
bezier_mid_x = int(mid_distance/4 * normal())+mid_point_x+orig_x
bezier_mid_y = int(mid_distance/4 * normal())+mid_point_y+orig_y
l_pause = len(self._pause_time_table)
num_data_points = int(30 * mid_distance*2/700)
num_data_points = self._clip(num_data_points,0, l_pause+1) # trace will minus 1
trace = beizier_curve_quad(orig_x,orig_y,bezier_mid_x, bezier_mid_y,
x, y, n=num_data_points)
trace = [self._clip_in_screen( ( int(round(c[0])),
int(round(c[1])) ) ) for c in trace]
trace = [(trace[i+1][0]-trace[i][0],
trace[i+1][1]-trace[i][1]) for i in range(len(trace)-1)]
if pause:
pause_counts = 20
pause_counts = self._clip(pause_counts,0, num_data_points)
#step_length_for_pause = num_data_points // pause_counts # clipped
# for pause time (speed), just triangle,(easier)(in fact, could also
# try bezier. just use easier one for test
pre_p_t = self._pause_time_table[
np.linspace(0, l_pause-1, pause_counts, endpoint=True, dtype=int)]
pause_time_sum = self._get_pause_time_sum(_x,_y,rel)
sleep_time_normalized = pause_time_sum/1000 * pre_p_t / pre_p_t.sum()
p_t_idx = np.linspace(0, num_data_points-1, pause_counts, endpoint=True, dtype=int)
sleep_time = np.zeros( (num_data_points,) )
for pre_i,idx in enumerate(p_t_idx):
sleep_time[idx] = sleep_time_normalized[pre_i]
for i,c in enumerate(trace):
self._write_move_relative(c[0],c[1], sleep_time=self._debug_pause+sleep_time[i])
#if sleep_time[i] > 0: time.sleep(sleep_time[i]) # sleep more
else:
for c in trace:
self._write_move_relative(c[0],c[1], sleep_time=self._debug_pause)
self._update_pos(_x,_y, rel)
def press_down(self, button):
binary_24_bits = self._mouse_op_to_binary(0,0, button) # button down, up use release_all_buttons
self.dev_pf.write(bytes(binary_24_bits))
def release_all_buttons(self):
binary_24_bits = self._mouse_op_to_binary(0,0,button_release=True) # button down, up use release_all_buttons
self.dev_pf.write(bytes(binary_24_bits))
def click(self, button):
self.press_down(button)
self.release_all_buttons()
def _get_pause_time_sum(self, x,y,rel=True):
if not rel:
x -= self.x_pos
y -= self.y_pos
return math.sqrt(x*x+y*y)/200 * 150 # ms, for 200 pixels
def _clip_in_screen(self, c):
return (self._clip(c[0], 0, self.screen_x_len),
self._clip(c[1], 0, self.screen_y_len))
def beizier_curve_quad(orig_x,orig_y,bezier_mid_x, bezier_mid_y, target_x, target_y, n=100):
#np.array
ts = np.linspace(0,1,n,endpoint=True)
return [( (1-t)**2*orig_x+2*(1-t)*t*bezier_mid_x+t*t*target_x,
(1-t)**2*orig_y+2*(1-t)*t*bezier_mid_y+t*t*target_y ) for t in ts]
if __name__ == "__main__":
o = operator()
o.move_along_bezier_curve(500, 500)
|
[
"fireflysuccess@gmail.com"
] |
fireflysuccess@gmail.com
|
a666d88336202c9bc0120035e3080b32cbd3505a
|
b4b900170acb702b12aa77c2708f59ec2107fa49
|
/blast/RNA_vs_rRNA.py
|
877d165b9820f2247f7c7d4451191c698b9dfe18
|
[] |
no_license
|
wegnerce/python_scripting
|
cf6435d0fa2ad41615c6ff06d44f28449904272c
|
6230d3f0489c67af18db5b13e48e888018b2cceb
|
refs/heads/main
| 2023-04-11T23:28:39.990998
| 2021-04-28T06:43:17
| 2021-04-28T06:43:17
| 362,132,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,804
|
py
|
'''
Created on June 18, 2012
@author: Carl-Eric - AG Liesack - Molecular Ecology - MPI Marburg
The given script compares BLAST outputs from PRINSEQ processed .fasta files, which have
been BLASTed against the latest release of SILVA NR SSU/LSU. Based on the comparison the
script generates three output files:
(1) sequence reads assigned to SILVA SSU - XYZ_to_SSU.fasta
(2) sequence reads assigned to SILVA LSU - XYZ_to_LSU.fasta
(3) sequence reads assigned to putative mRNA - XYZ_to_non_rRNA
'''
from Bio.Blast import NCBIXML
from Bio import SeqIO
blastLSU = NCBIXML.parse(open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_LSU.xml","rU")) #xml searched against LSU
blastSSU = NCBIXML.parse(open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_SSU.xml","rU")) #xml searched against SSU
output_LSU = open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_LSU.fasta","w") #seqs assigned to LSU rRNA
output_SSU = open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_SSU.fasta","w") #seqs assigned to SSU rRNA
output_mRNA = open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_non_rRNA.fasta","w") #seqs assigned to mRNA
fasta_handle=open("/home/calle/Desktop/RNA_Seq_20130827/BS7d_controlB_prinseq_good.fasta","rU") #fasta used in BLAST search
i=0
ii=0
seq_list0=[]
bits_against_LSU=[]
## Sequences have matches in LSU
for record0 in blastLSU: ## LSU
if record0.alignments:
ab=record0.query
cd=ab[0:14]
seq_list0.append(cd)
bits_against_LSU.append(record0.alignments[0].hsps[0].bits)
i=i+1
else:
ii=ii+1
print "LSU:",i, ii
## Sequences have matches in SSU
j=0
jj=0
seq_list1=[]
bits_against_SSU=[]
ab=[]
for record1 in blastSSU: ## SSU
if record1.alignments:
ab=record1.query
cd=ab[0:14]
seq_list1.append(cd)
bits_against_SSU.append(record1.alignments[0].hsps[0].bits)
j=j+1
else:
jj=jj+1
print "SSU:",j,jj
count_LSU_SSU=0
count_SSU_LSU=0
count_only_LSU=0
count_SSU=0
p=0
LSU=[]
SSU=[]
for name in seq_list0:
# print name
if name in seq_list1:
x=seq_list1.index(name)
if bits_against_LSU[p] > bits_against_SSU[x]:
count_LSU_SSU = count_LSU_SSU + 1
LSU.append(name)
else:
count_SSU_LSU = count_SSU_LSU + 1
else:
count_only_LSU=count_only_LSU+1
LSU.append(name)
p=p+1
print "##########################################################################"
print "LSU:", p
print "LSU > SSU:", count_LSU_SSU
print "SSU > LSU:", count_SSU_LSU
print "only found in LSU:", count_only_LSU
count_LSU_SSU=0
count_SSU_LSU=0
count_only_SSU=0
count_SSU=0
p=0
for name in seq_list1:
if name in seq_list0:
x=seq_list0.index(name)
if bits_against_SSU[p] >= bits_against_LSU[x]:
count_SSU_LSU = count_SSU_LSU + 1
SSU.append(name)
else:
count_LSU_SSU = count_LSU_SSU + 1
else:
count_only_SSU=count_only_SSU+1
SSU.append(name)
p=p+1
print "########################################################################"
print "SSU:", p
print "SSU > LSU:", count_SSU_LSU
print "LSU > SSU:", count_LSU_SSU
print "only found in SSU:", count_only_SSU
print len(LSU)
print len(SSU)
z=0
LSU_rRNA=[]
SSU_rRNA=[]
non_rRNA=[]
for record in SeqIO.parse(fasta_handle,"fasta"):
if record.id in LSU:
LSU_rRNA.append(record)
elif record.id in SSU:
SSU_rRNA.append(record)
else:
non_rRNA.append(record)
SeqIO.write(LSU_rRNA, output_LSU, "fasta")
SeqIO.write(SSU_rRNA, output_SSU, "fasta")
SeqIO.write(non_rRNA, output_mRNA, "fasta")
print "Found %i LSU rRNA" % len(LSU_rRNA)
print "Found %i SSU rRNA" % len(SSU_rRNA)
print "Found %i non rRNA" % len(non_rRNA)
blastLSU.close()
blastSSU.close()
output_LSU.close()
output_SSU.close()
output_mRNA.close()
xml_handle = open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_SSU.xml","rU") #XML blasted against SILVA SSU
output_handle0=open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_SSU_ribotag.xml","w") #new XML assigned to SSU rRNA
handle0 = open("/home/calle/Desktop/RNA_Seq_20130827/BS7dB_to_SSU.fasta","rU") #seq of SSU rRNA
target_0=[]
archaea_seq=[]
n=0
m=0
str_list=[]
query_id=[]
#########################################################################
# import target sequences as fasta files in which we are interested #
# put sequence id in list(target_no) #
#########################################################################
no_target_seq0=0
for record in SeqIO.parse(handle0,"fasta"):
target_0.append(record.id)
no_target_seq0=no_target_seq0+1
no_target_seq1=0
print "No. of target sequence 0:", no_target_seq0
########################################################################
# write blastout of target sequences #
########################################################################
no_query_in_xml=0
name=[]
only_id={}
no_target_0=0
#no_target_1=0
#no_target_2=0
i=0
judge=3
for line in xml_handle.readlines():
str_line=line.encode('ascii','ignore')
if i < 22:
output_handle0.write(str_line)
i=i+1
str_list=str_line.split(">")
if str_list[0] == " <Iteration_query-def":
query_id=str_list[1].split("<")
name=query_id[0]
only_id=name[0:14]
no_query_in_xml=no_query_in_xml+1
if only_id in target_0:
judge = 0
no_target_0=no_target_0+1
else:
judge = 3
if judge == 0:
output_handle0.write(str_line)
print "no_of_queries_in_original xml output:", no_query_in_xml
print "no. of query in target sequence files:", no_target_0
handle0.close()
xml_handle.close()
output_handle0.close()
|
[
"noreply@github.com"
] |
wegnerce.noreply@github.com
|
e91b8bf18bf8bb434bac931c42dd942974fb28cc
|
273e20078706cfd80822144283f0f0da5e347239
|
/douban/items.py
|
336cbf404b37855678af2ea84f6432a917532c8f
|
[] |
no_license
|
lvshaobo/scrapy_cookie
|
00db97399b0c5622fa77e15578f406be94e86c87
|
a4c298c00a41d408fc7ba0e6b7e9aa159dc99ac3
|
refs/heads/master
| 2021-01-17T22:20:53.330544
| 2016-09-21T06:00:06
| 2016-09-21T06:00:06
| 68,784,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class DoubanItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field() # Web Title
name = scrapy.Field() # Job Title
corp = scrapy.Field() # Company Name
addr = scrapy.Field() # Address
salary = scrapy.Field() # Salary
popu = scrapy.Field() # Recruiting Numbers
|
[
"lvshaoboftd@gmail.com"
] |
lvshaoboftd@gmail.com
|
bbea56d9d779d079e4776b06ad59756cdf7c935a
|
f60cc85e3f150723e6589d100618df41ba2c1c3b
|
/LAB2/queensS.py
|
1e812e4d931832c38027b0b0e0dc4533ded9e9d2
|
[] |
no_license
|
GustafWallstrom/TNM096-Labs
|
b4d0d1a143bc3fe374a4ee37600a2e4b4f39c4ed
|
0de1608ac3aec9f4d3a7ef843c1bb25490124409
|
refs/heads/master
| 2021-05-20T20:35:54.758637
| 2020-05-13T09:44:05
| 2020-05-13T09:44:05
| 252,408,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
# Solve the nQueens problem by using depth-first search
from time import time
from datetime import timedelta
from aima.search import depth_first_tree_search, NQueensProblem, Problem
def secondsToStr(t):
return str(timedelta(seconds=t))
def now():
return secondsToStr(time())
# 1. Set up the problem and set the starting time
n = 30
print "\nStarting at at "+now()[12:20]
print "problem with n =",n
start = time()
# 2. Solve the NQueens problem with depth-first search
solution = depth_first_tree_search(NQueensProblem(n))
sol = str(solution)
print "Solution: ", sol[5:len(sol)-1]
# 3. Print time elapsed
end = time()
elapsed = end-start
print "\nElapsed time ", secondsToStr(elapsed)[0:15], "\n"
|
[
"46966247+GustafWallstrom@users.noreply.github.com"
] |
46966247+GustafWallstrom@users.noreply.github.com
|
b5fcaefc1ee465b5d6d8da03f4dcbeaf6a246b6a
|
da73042e63a17f2288747d426b718d6e516da9d4
|
/fast_genetic_algorithm.py
|
8267dfd653d142afe27f60084d0e1837080e12d5
|
[] |
no_license
|
AntipovDen/XdivK
|
002ee3fdf5cbbad7a6e1428008aadfe3ff5a29c3
|
2ce70f7151e302c9e1dded83e12d184f49c3e6b3
|
refs/heads/master
| 2021-01-20T02:08:13.593075
| 2018-11-08T10:39:15
| 2018-11-08T10:39:15
| 89,378,193
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,233
|
py
|
from math import factorial
from random import random
from sys import argv
from scipy.stats import rv_discrete
from numpy import arange, mean
# from matplotlib import pyplot as plt
RUNS = 100
run_number = 0
logfile = None
def get_statistics_fga(n, k, beta):
global run_number
probabilities = 1 / arange(1, (n + 1) // 2) ** beta
probabilities /= probabilities.sum()
dist = rv_discrete(values=(range(1, (n + 1) // 2), probabilities)).rvs
run_number = 1
return mean([run(n, k, dist) for _ in range(RUNS)])
def get_statistics_opo(n, k):
global run_number
opt_prob = factorial(k) ** (1/k)
run_number = 1
return mean([run(n, k, lambda: opt_prob) for _ in range(RUNS)])
def run(n, k, dist): #seems like it works
global logfile, run_number
iterations = 0
sum_x = n - k
logfile.write('run {}\n'.format(run_number))
logfile.flush()
run_number += 1
while sum_x < n:
iterations += 1
if iterations % 1000 == 0:
logfile.write('{}\n'.format(iterations))
logfile.flush()
alpha = dist()
mutation = 0
for i in range(n - sum_x): # flipping zeros
if random() < alpha / n:
mutation += 1
for i in range(sum_x):
if random() < alpha / n: # flipping ones
mutation -= 1
if sum_x + mutation >= n - k:
sum_x += mutation
return iterations
n = int(argv[1])
if len(argv) > 2:
beta = float(argv[2])
# for beta in 1, 1.1, 1.5, 2, 3:
with open('fga_{}_{:1.1f}.out'.format(n, beta), 'w') as f:
logfile = open('fga_{}_{:1.1f}.log'.format(n, beta), 'w')
for k in 2, 3, 5, 10:
logfile.write('k = {}\n'.format(k))
logfile.flush()
f.write(str(get_statistics_fga(n, k, beta)))
f.write(' ')
f.flush()
logfile.close()
else:
with open('opo_{}.out'.format(n), 'w') as f:
logfile = open('opo_{}.log'.format(n), 'w')
for k in 2, 3, 5, 10:
logfile.write('k = {}\n'.format(k))
logfile.flush()
f.write(str((get_statistics_opo(n, k))))
f.write(' ')
f.flush()
logfile.close()
|
[
"antipovden@yandex.ru"
] |
antipovden@yandex.ru
|
8e29269f55422007a8fa6a08128916caecc6a06e
|
f8fa95af388eead9af3c278245592a78b1cddb92
|
/models/Rules.py
|
8816b781a73b660978078d17724137eecab60d5d
|
[] |
no_license
|
Toohk/chess
|
b11403f7d2d0e1c87e245058f546a4e2736ea21f
|
637bdde6801c8ed256f04688902a6d67fc08d650
|
refs/heads/main
| 2023-02-09T02:19:20.305993
| 2020-12-26T13:39:48
| 2020-12-26T13:39:48
| 324,562,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
class Rules:
def __init__(self):
self.num_pieces = {
'king': 1,
'queen': 1,
'bishop': 2,
'knight': 2,
'rook': 2,
'pawn': 8
}
|
[
"thomasdufayet3@gmail.com"
] |
thomasdufayet3@gmail.com
|
21fdb5bc849341c7bb79fd762910a63ed603dbc4
|
4bf615df3e09532222f1ca024a3a71f490d3c504
|
/4. Colecciones en Python/2_Tuplas.py
|
fd6b23f42f13ef3e2a753b67feac3fd72ebbf76c
|
[] |
no_license
|
ErickTocasca/PROYECTO_MINSUP_UNCP
|
72180c875bd973760fa6ddb3aeed5667de6c303d
|
6647b738d8bd355b7cda4faad8fc58e16e2b453b
|
refs/heads/main
| 2023-04-28T04:32:42.853717
| 2021-05-02T15:57:03
| 2021-05-02T15:57:03
| 332,873,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 21:54:33 2021
@author: egt_d
"""
mina = ("geologia", "costos", "perforacion y voladura")
print(mina)
print(mina.count("geologia"))
minalist = list(mina)
minalist.append("metalurgia")
print(minalist)
|
[
"60368138+ErickTocasca@users.noreply.github.com"
] |
60368138+ErickTocasca@users.noreply.github.com
|
f300d8c3ad2c73285efe5a41a77810407745f6ab
|
109d21430a292a9f51c18171bff1b5bdc9c480da
|
/docxcompose-script - Copy.py
|
c1fb6f5730697ae74362582788caadadab72eea6
|
[] |
no_license
|
PY-Venom/ScreenshotToReport
|
0a1f1725e3ebf66a431898f5792960c55505d0ea
|
2800b3b3a790d40dfd62dd279ecaaaca33c691cf
|
refs/heads/master
| 2020-04-30T21:11:40.475432
| 2019-03-22T07:09:29
| 2019-03-22T07:09:29
| 177,088,103
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
#!"c:\users\lathan larue\appdata\local\programs\python\python37-32\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'docxcompose==1.0.0a16','console_scripts','docxcompose'
__requires__ = 'docxcompose==1.0.0a16'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('docxcompose==1.0.0a16', 'console_scripts', 'docxcompose')()
)
|
[
"noreply@github.com"
] |
PY-Venom.noreply@github.com
|
d35885251fdd0e6976cf51d67620328aa0ebf072
|
f69e44d26eafd7293b63de0a5037b4f969bad333
|
/hello_flask_1_1.py
|
295401fc166332c29e48b1f654a941ed37b587d1
|
[] |
no_license
|
astroshima/hiflask
|
ea145e0832022c65f520b4a01f6cc75a3caa0b44
|
e7b4d3491e1e44705a0f5151d1d53b1ac126a365
|
refs/heads/master
| 2020-06-26T14:10:41.034749
| 2019-08-17T00:13:31
| 2019-08-17T00:13:31
| 199,654,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from flask import Flask
app=Flask(__name__)
@app.route('/')
def hello():
return 'Hello world from Flask!'
print('Naziv aktivnog modula je:', __name__)
if __name__=='__main__': # ako je ovaj program startovan komandom python3
app.run() # pokreni veb server i aplikaciju
"""
Podešavanje promenljive okruženja i startovanje veb servera i aplikacije iz komandne linije:
$ env FLASK_APP=hello_flask_1_1.py flask run
"""
|
[
"noreply@github.com"
] |
astroshima.noreply@github.com
|
0abdf8254d9f4f1e0d295afae5919f7d4f839984
|
fa4b4d30755a9e5a22b590906ff03c48ffbde7e3
|
/UseCases/Airbus/A_1/A_1.py
|
339f18dd38e355fd7bd01f0add374102c3c0f99b
|
[] |
no_license
|
COMPOSELECTOR/Composelector
|
c7f33c6fe805f7ff3d5f97eace5c452ed8b12f97
|
b852a7585e8ff48e93f062f552d514013a3dcf6d
|
refs/heads/master
| 2021-07-09T17:07:54.419237
| 2020-07-16T11:45:43
| 2020-07-16T11:45:43
| 158,224,484
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,085
|
py
|
import sys
sys.path.extend(['/home/nitram/Documents/work/MUPIF/mupif'])
from mupif import *
import Pyro4
import logging
log = logging.getLogger()
import time as timeT
import mupif.Physics.PhysicalQuantities as PQ
debug = True
if not debug:
import ComposelectorSimulationTools.MIUtilities as miu
log = logging.getLogger()
nshost = '172.30.0.1'
nsport = 9090
hkey = 'mupif-secret-key'
mul2JobManName='MUL2.JobManager@UseCase1'
class Airbus_Workflow_1(Workflow.Workflow):
def __init__(self, metaData={}):
"""
Initializes the workflow. As the workflow is non-stationary, we allocate individual
applications and store them within a class.
"""
log.info('Setting Workflow basic metadata')
MD = {
'Name': 'Airbus Case',
'ID': '1_2_2',
'Description': 'Simulation of ',
'Model_refs_ID': ['xy', 'xy'],
'Inputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_YoungModulus1', 'Name': 'E_1',
'Description': 'Young modulus 1', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_YoungModulus2', 'Name': 'E_2',
'Description': 'Young modulus 2', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_YoungModulus3', 'Name': 'E_3',
'Description': 'Young modulus 3', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_PoissonRatio12', 'Name': 'nu_12',
'Description': 'Poisson\'s ration 12', 'Units': 'None', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_PoissonRatio13', 'Name': 'nu_13',
'Description': 'Poisson\'s ration 13', 'Units': 'None', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_PoissonRatio23', 'Name': 'nu_23',
'Description': 'Poisson\'s ration 23', 'Units': 'None', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_ShearModulus12', 'Name': 'G_12',
'Description': 'Shear modulus 12', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_ShearModulus13', 'Name': 'G_13',
'Description': 'Shear modulus 13', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_ShearModulus23', 'Name': 'G_23',
'Description': 'Shear modulus 23', 'Units': 'MPa', 'Required': True},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Density', 'Name': 'Rho',
'Description': 'Density', 'Units': 'ton/mm**2', 'Required': True},
],
'Outputs': [
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Mass', 'Name': 'Mass',
'Description': 'Mass of the structure', 'Units': 'kg'},
{'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_CriticalLoadLevel', 'Name': 'F_crit',
'Description': 'Buckling load of the structure', 'Units': 'kN'},
]
}
super(Airbus_Workflow_1, self).__init__(metaData=MD)
self.updateMetadata(metaData)
#list of recognized input porperty IDs
self.myInputPropIDs = [PropertyID.PID_YoungModulus1,PropertyID.PID_YoungModulus2, PropertyID.PID_YoungModulus3, PropertyID.PID_PoissonRatio12, PropertyID.PID_PoissonRatio13,PropertyID.PID_PoissonRatio23, PropertyID.PID_ShearModulus12, PropertyID.PID_ShearModulus13, PropertyID.PID_ShearModulus23, PropertyID.PID_Density]
# list of compulsory IDs
self.myCompulsoryPropIDs = self.myInputPropIDs
#list of recognized output property IDs
self.myOutPropIDs = [PropertyID.PID_CriticalLoadLevel, PropertyID.PID_Mass]
#dictionary of input properties (values)
self.myInputProps = {}
#dictionary of output properties (values)
self.myOutProps = {}
self.mul2Solver = None
def initialize(self, file='', workdir='', targetTime=PQ.PhysicalQuantity(0., 's'), metaData={}, validateMetaData=True, **kwargs):
#locate nameserver
ns = PyroUtil.connectNameServer(nshost, nsport, hkey)
#connect to JobManager running on (remote) server
self.mul2JobMan = PyroUtil.connectJobManager(ns, mul2JobManName,hkey)
#allocate the Mul2 remote instance
try:
self.mul2Solver = PyroUtil.allocateApplicationWithJobManager( ns, self.mul2JobMan, None, hkey, sshContext=None)
log.info('Created mul2 job')
except Exception as e:
log.exception(e)
else:
if ((self.mul2Solver is not None)):
mul2SolverSignature=self.mul2Solver.getApplicationSignature()
log.info("Working mul2 solver on server " + mul2SolverSignature)
else:
log.debug("Connection to server failed, exiting")
super(Airbus_Workflow_1, self).initialize(file=file, workdir=workdir, targetTime=targetTime, metaData=metaData, validateMetaData=validateMetaData, **kwargs)
# To be sure update only required passed metadata in models
passingMD = {
'Execution': {
'ID': self.getMetadata('Execution.ID'),
'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),
'Task_ID': self.getMetadata('Execution.Task_ID')
}
}
workDir = self.mul2Solver.getWorkDir() +'/'+self.mul2Solver.getJobID()
self.mul2Solver.initialize(metaData=passingMD, workdir = workDir)
def setProperty(self, property, objectID=0):
propID = property.getPropertyID()
if (propID in self.myInputPropIDs):
self.myInputProps[propID]=property
else:
raise APIError.APIError('Unknown property ID')
def getProperty(self, propID, time, objectID=0):
if (propID in self.myOutPropIDs):
return self.myOutProps[propID]
else:
raise APIError.APIError ('Unknown property ID', propID)
def solveStep(self, istep, stageID=0, runInBackground=False):
for cID in self.myCompulsoryPropIDs:
if cID not in self.myInputProps:
raise APIError.APIError (self.getApplicationSignature(), ' Missing compulsory property ', cID)
# mul2
try:
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_YoungModulus1])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_YoungModulus2])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_YoungModulus3])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_PoissonRatio12])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_PoissonRatio13])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_PoissonRatio23])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_ShearModulus12])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_ShearModulus13])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_ShearModulus23])
self.mul2Solver.setProperty(self.myInputProps[PropertyID.PID_Density])
except Exception as err:
print ("Setting Mul2 params failed: " + repr(err));
self.terminate()
try:
# solve mul2 part
log.info("Running mul2")
self.mul2Solver.solveStep(None)
## set the desired properties
self.myOutProps[PropertyID.PID_CriticalLoadLevel] = self.mul2Solver.getProperty(PropertyID.PID_CriticalLoadLevel, 0.0)
self.myOutProps[PropertyID.PID_Mass] = self.mul2Solver.getProperty(PropertyID.PID_Mass, 0.0)
except Exception as err:
print ("Error:" + repr(err))
self.terminate()
def getCriticalTimeStep(self):
# determine critical time step
return PQ.PhysicalQuantity(1.0, 's')
def terminate(self):
#self.thermalAppRec.terminateAll()
self.mul2Solver.terminate()
super(Airbus_Workflow_1, self).terminate()
def getApplicationSignature(self):
return "Composelector workflow 1.0"
def getAPIVersion(self):
return "1.0"
def workflow(inputGUID, execGUID):
# Define execution details to export
if not debug:
execPropsToExp = {"ID": "",
"Use case ID": ""}
# Export execution information from database
ExportedExecInfo = miu.ExportData("MI_Composelector", "Modelling tasks workflows executions", execGUID,
execPropsToExp)
execID = ExportedExecInfo["ID"]
useCaseID = ExportedExecInfo["Use case ID"]
# Define properties:units to export
propsToExp = {"Axial Young's modulus": "MPa",
"In-plane Young's modulus": "MPa",
"E3": "MPa",
"In-plane shear modulus": "MPa",
"Transverse shear modulus": "MPa",
"G23": "MPa",
"In-plane Poisson's ratio": "",
"Transverse Poisson's ratio": "",
"NU23": ""
}
# Export data from database
ExportedData = miu.ExportData("MI_Composelector", "Inputs-Outputs", inputGUID, propsToExp, miu.unitSystems.METRIC)
# Assign exported properties to variables
E1 = ExportedData["Axial Young's modulus"]
E2 = ExportedData["In-plane Young's modulus"]
E3 = ExportedData["E3"]
G12 = ExportedData["In-plane shear modulus"]
G13 = ExportedData["Transverse shear modulus"]
G23 = ExportedData["G23"]
nu12 = ExportedData["In-plane Poisson's ratio"]
nu13 = ExportedData["Transverse Poisson's ratio"]
nu23 = ExportedData["NU23"]
else:
E1 = 100.e3
E2 = 6.e3
E3 = 6.e3
G12 = 3.e3
G13 = 3.e3
G23 = 3.e3
nu12 = 0.35
nu13 = 0.35
nu23 = 0.35
rho = 1.58e-9
try:
workflow = Airbus_Workflow_1()
workflowMD = {
'Execution': {
'ID': '1',
'Use_case_ID': '1_1',
'Task_ID': '1'
}
}
workflow.initialize(targetTime=PQ.PhysicalQuantity(1., 's'), metaData=workflowMD)
# set workflow input data
# Submitting new material properties
pE1 = workflow.setProperty(Property.ConstantProperty(E1, PropertyID.PID_YoungModulus1, ValueType.Scalar, 'MPa'))
pE2 = workflow.setProperty(Property.ConstantProperty(E2, PropertyID.PID_YoungModulus2, ValueType.Scalar, 'MPa'))
pE3 = workflow.setProperty(Property.ConstantProperty(E3, PropertyID.PID_YoungModulus3, ValueType.Scalar, 'MPa'))
pnu12 = workflow.setProperty(Property.ConstantProperty(nu12, PropertyID.PID_PoissonRatio12, ValueType.Scalar, PQ.getDimensionlessUnit()))
pnu13 = workflow.setProperty(Property.ConstantProperty(nu13, PropertyID.PID_PoissonRatio13, ValueType.Scalar, PQ.getDimensionlessUnit()))
pnu23 = workflow.setProperty(Property.ConstantProperty(nu23, PropertyID.PID_PoissonRatio23, ValueType.Scalar, PQ.getDimensionlessUnit()))
pG12 = workflow.setProperty(Property.ConstantProperty(G12, PropertyID.PID_ShearModulus12, ValueType.Scalar, 'MPa'))
pG13 = workflow.setProperty(Property.ConstantProperty(G13, PropertyID.PID_ShearModulus13, ValueType.Scalar, 'MPa'))
pG23 = workflow.setProperty(Property.ConstantProperty(G23, PropertyID.PID_ShearModulus23, ValueType.Scalar, 'MPa'))
pRho = workflow.setProperty(Property.ConstantProperty(rho, PropertyID.PID_Density, ValueType.Scalar, 'ton/mm**3'))
# solve workflow
workflow.solve()
# get workflow outputs
time = PQ.PhysicalQuantity(1.0, 's')
# collect MUL2 outputs
#KPI 1-1 weight
weight = workflow.getProperty(PropertyID.PID_Mass, time).inUnitsOf('kg').getValue()
log.info("Requested KPI : Weight: " + str(weight) + ' kg')
#KPI 1-2 buckling load
bucklingLoad = workflow.getProperty(PropertyID.PID_CriticalLoadLevel, time).inUnitsOf('N').getValue()
log.info("Requested KPI : Buckling Load: " + str(bucklingLoad) + ' N')
workflow.terminate()
log.info("Process complete")
if not debug:
# Importing output to database
ImportHelper = miu.Importer("MI_Composelector", "Inputs-Outputs", ["Inputs/Outputs"])
ImportHelper.CreateAttribute("Execution ID", execID, "")
ImportHelper.CreateAttribute("Buckling Load", buckLoad, "N")
return ImportHelper
except APIError.APIError as err:
print ("Mupif API for Scenario error: " + repr(err))
workflow.terminate()
except Exception as err:
print ("Error: " + repr(err))
workflow.terminate()
except:
print ("Unknown error.")
workflow.terminate()
if __name__=='__main__':
workflow(0,0)
|
[
"nitramkaroh@seznam.cz"
] |
nitramkaroh@seznam.cz
|
76cd052ef18943eae6f0c461c344715be186a795
|
3024cafafbfc75193105af7f225d3b12eb2aea46
|
/DjangoProjects/project51/project49/urls.py
|
56c44854fe3856264652ba6f6bf223a7ab1c345e
|
[] |
no_license
|
jaishankarg24/Django-Rest-Framework
|
33266f6825d51abb8a512426baedf59f2ee957c8
|
809ee9208ffbef4202a8f4058a84f5322793af52
|
refs/heads/master
| 2023-03-02T20:56:38.051060
| 2021-02-12T05:37:48
| 2021-02-12T05:37:48
| 338,233,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 847
|
py
|
"""project49 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from testapp import views
urlpatterns = [
path('admin/', admin.site.urls),
re_path(r'^(?P<pk>\d+)/$', views.BookClass.as_view()),
]
|
[
"jaishankarg24@gmail.com"
] |
jaishankarg24@gmail.com
|
7f1a854cba3d9439a93ad06764b73667bb5fc299
|
bb04d91ffb856c6660f7066f091c4179ed505456
|
/Elementary/FirstWordSimplified.py
|
bd7559b159197dd17e8d85c97cf15f0f77462719
|
[] |
no_license
|
tirsodelalamo/Checkio-Python
|
b7ca52072a60b6d90d2f39a6577c89528faaf3a0
|
1e791201da223cf1f43271cc502733e74a874f6c
|
refs/heads/master
| 2021-04-03T14:04:54.457640
| 2020-05-14T10:56:52
| 2020-05-14T10:56:52
| 251,434,379
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
def first_word(text: str) -> str:
"""
returns the first word in a given text.
"""
# your code here
return text.split()[0]
if __name__ == '__main__':
print("Example:")
print(first_word("Hello world"))
# These "asserts" are used for self-checking and not for an auto-testing
assert first_word("Hello world") == "Hello"
assert first_word("a word") == "a"
assert first_word("hi") == "hi"
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"tirsodelalamomartin@gmail.com"
] |
tirsodelalamomartin@gmail.com
|
2e817e3cfa827b9fe6f262cb0092f0009813a60b
|
98dd8499f692775972665752c1e5be5dccc2ea6d
|
/scripts/uknPSF_ANN.py
|
5c4a1758a947036de9fd131fee5e6db5e4680993
|
[
"MIT"
] |
permissive
|
kuntzer/binfind
|
850bc51342a8b36def775e9a683706ddb46e5b11
|
28f9cf9474e6b39a55a1a22d19ca8131a0408c84
|
refs/heads/master
| 2021-01-12T14:45:49.617155
| 2017-05-30T15:02:27
| 2017-05-30T15:02:27
| 72,079,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,815
|
py
|
import itertools
import matplotlib.pyplot as plt
import numpy as np
import os
import binfind
import utils as u
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(name)s(%(funcName)s): %(message)s', level=logging.DEBUG)
n_exposures = 4
binfind.plots.figures.set_fancy()
###################################################################################################
### PARAMETERS
## Simulation parameters
# Minimum separation of the stars to be qualified as binaries
crits_angsep = np.linspace(0.001, 0.015, 15)
# Max contrast to be qualified as binaries
crits_contrast = np.linspace(0.1, 1.5, 15)
# Number of times to do the whfname_interpolationole analysis
n_training = 25
n_validation = 5
n_test = 15
# Number of stars per field
n_stars = 280
# Bin fraction to reach
bin_fraction = 0.3
# Outdir
outdir = 'data/binfind_percent_meas/ukn_PSF_ann'
## Observables and quality parameters
# Stellar catalogue path and parameters
star_catalogues_path = '/home/kuntzer/workspace/Blending_PSF_Euclid/data/BGM/'
l, b = (180, 15)
# Path to interpolation file
fname_interpolation = 'data/measurements/interpolations.pkl'
# Path to fiducial position in x y of psf file
fname_fiducial = 'psf_fields/psf_ellip_gs.dat'
# Brightest magnitude observable
m_min = 18
# faintest mag observable
m_max = 24.5
## Exposures parameters
# Number of stars per fields
# What are the requirements on the reconstruction for a single star?
ei_max_error = 1e-2 # = 1% error
r2_max_error = 5e-2 # = 5% error
# What is the wanted False Positive Rate ? (in fraction)
thr_fpr = 0.1
recovery_n_inter = 2
recovery_n_neighbour = 10
# Thresholds for the star/multiple star classification
thresholds = np.logspace(-8, 0, 1000)
# Show figures after each criteria ?
show = False
###################################################################################################
### INITIALISATION
if len(crits_angsep) == 1 and len(crits_contrast) == 1:
single_exp = True
else:
f1_per_crit = []
lim_per_crit = []
single_exp = False
criteria = list(itertools.product(*[crits_angsep, crits_contrast]))
#data = blutil.load_bmg(os.path.join(star_catalogues_path, 'star_field_BGM_i_%d_%d_%d' % (l, b, fid)), main_sequence=True)
previous_sep = -1
data = None
psf_positions = np.loadtxt(fname_fiducial)
x_psf = psf_positions[:,0]
y_psf = psf_positions[:,1]
min_x_psf = np.amin(x_psf)
min_y_psf = np.amin(y_psf)
max_x_psf = np.amax(x_psf)
max_y_psf = np.amax(y_psf)
euclid = binfind.simulation.Observations(ei_max_error, r2_max_error, fname_interpolation, fname_fiducial)
for iix, (crit_angsep, crit_contrast) in enumerate(criteria):
mlparams = binfind.classifier.MLParams(name = "{:d}".format(int(crit_angsep * 1e3)),
features = range(15), labels = range(1))
toolparams = binfind.classifier.fannwrapper.FANNParams(name = "{:1.1f}".format(crit_contrast),
hidden_nodes = [15,15,15], max_iterations = 2000)
ml_class = binfind.classifier.ML(mlparams, toolparams, workbasedir=os.path.join(outdir, 'ann'))
results_train = {'ann':[]}
results_test = {'ann':[]}
fids = u.get_id_catalogs(crit_angsep, crit_contrast)
if len(fids) != previous_sep:
previous_sep = len(fids)
data = None
for fid in fids:
fname = os.path.join(star_catalogues_path, 'star_field_BGM_i_%d_%d_%d' % (l, b, fid))
datal = binfind.utils.load_bmg(fname, main_sequence=True)
if data is None:
data = datal
else:
data = np.vstack([data,datal])
print "=" * 60
print "Running experiments on alpha > %0.4f, contrast < %0.1f --- (%d/%d)" % (crit_angsep, crit_contrast, iix+1, len(criteria))
sim_cat = binfind.simulation.Catalog(crit_angsep, crit_contrast)
features = None
###################################################################################################
### CORE OF CODE
"""
for ith_experience in range(n_training):
print '>> REALISATION %d/%d <<' % (ith_experience + 1, n_training)
stars_to_observe, feature, fiducials = u.get_knPSF_realisation(data, sim_cat, euclid, n_exposures, \
m_min, m_max, bin_fraction, return_pos=True, relerr=False)
feature = np.hstack([fiducials, feature])
if features is None:
features = feature
star_char = stars_to_observe
else:
features = np.vstack([features, feature])
star_char = np.vstack([star_char, stars_to_observe])
binary_stars = star_char[:,0]
###############################################################################################
### Training
ml_class.train(binary_stars, features)
# Testing the training, just to get an idea
proba = ml_class.predict(features)
ann_roc_params = binfind.diagnostics.test_thresholds(binary_stars, proba, thresholds)
ann_preds = ml_class.predict(features)
ann_metr = binfind.diagnostics.get_metrics(binary_stars, ann_preds)
auc_ann = binfind.diagnostics.auc(ann_roc_params)
print 'AUC training ANN:', auc_ann
print 'TPR:', ann_metr[0]
print 'FPR:', ann_metr[1]
print 'F1:', ann_metr[2]
results_train["ann"].append(np.concatenate([[crit_angsep, crit_contrast], [0.0], ann_metr, [auc_ann]]))
###############################################################################################
# Validation
for ith_experience in range(n_test):
print '>> REALISATION %d/%d <<' % (ith_experience + 1, n_test)
stars_to_observe, feature, fiducials = u.get_knPSF_realisation(data, sim_cat, euclid, n_exposures, \
m_min, m_max, bin_fraction, return_pos=True, relerr=False)
feature = np.hstack([fiducials, feature])
if features is None:
features = feature
star_char = stars_to_observe
else:
features = np.vstack([features, feature])
star_char = np.vstack([star_char, stars_to_observe])
binary_stars = star_char[:,0]
## Random forest
proba_ann = ml_class.predict_proba(features)
ann_roc_params = binfind.diagnostics.test_thresholds(binary_stars, proba_ann, thresholds)
auc_ann = binfind.diagnostics.auc(ann_roc_params)
ann_preds, _ = ml_class.predict(features)
ann_metr = binfind.diagnostics.get_metrics(binary_stars, ann_preds)
print 'AUC testing ANN:', auc_ann
print 'TPR:', ann_metr[0]
print 'FPR:', ann_metr[1]
print 'F1:', ann_metr[2]
fig = plt.figure()
ax = plt.subplot()
labels = ['ANN']
#for line in acf_rocs:
# print line[:3]
binfind.plots.roc(ax, [ ann_roc_params],
metrics=[ann_roc_params[:,3]],
metrics_label=r"$F_1\ \mathrm{score}$", labels=labels)
figfname = os.path.join(outdir, "figures", "roc_sep{:.0f}_con{:.0f}".format(crit_angsep*1e3, crit_contrast*10))
binfind.plots.figures.savefig(figfname, fig, fancy=True, pdf_transparence=True)
if show: plt.show()
plt.close()
"""
###############################################################################################
## Training with PSF reconstruct
features = None
gnd_truth = None
for ith_experience in range(n_training):
print '>> REALISATION %d/%d <<' % (ith_experience + 1, n_training)
stars_to_observe = u.get_uknPSF_realisation(data, sim_cat, euclid, n_exposures, \
m_min, m_max, n_stars, bin_fraction)
feature = euclid.get_reconstruct_fields(recovery_n_inter, recovery_n_neighbour,
eps=0, truth=stars_to_observe[:,0], return_proba=True, relerr=False)
if features is None:
features = feature
gnd_truth = stars_to_observe[:,0]
else:
features = np.vstack([features, feature])
gnd_truth = np.concatenate([gnd_truth, stars_to_observe[:,0]])
print gnd_truth.shape
print features.shape
ml_class.train(gnd_truth, features)
# Testing the training, just to get an idea
proba = ml_class.predict(features)
ann_roc_params = binfind.diagnostics.test_thresholds(gnd_truth, proba, thresholds)
ann_preds = ml_class.predict(features)
ann_metr = binfind.diagnostics.get_metrics(gnd_truth, ann_preds)
auc_ann = binfind.diagnostics.auc(ann_roc_params)
print 'AUC training ANN:', auc_ann
print 'TPR:', ann_metr[0]
print 'FPR:', ann_metr[1]
print 'F1:', ann_metr[2]
results_train["ann"].append(np.concatenate([[crit_angsep, crit_contrast], [0.5], ann_metr, [auc_ann]]))
###############################################################################################
## Validation
idlims = []
for ith_experience in range(n_validation):
print '>> REALISATION %d/%d <<' % (ith_experience + 1, n_validation)
stars_to_observe = u.get_uknPSF_realisation(data, sim_cat, euclid, n_exposures, \
m_min, m_max, n_stars, bin_fraction)
# ANN
ann_preds, proba_ann = euclid.reconstruct_fields(ml_class, recovery_n_inter, recovery_n_neighbour,
eps=0, truth=stars_to_observe[:,0], return_proba=True, relerr=False)
ann_roc_params = binfind.diagnostics.test_thresholds(stars_to_observe[:,0], proba_ann, thresholds)
idlims.append(binfind.utils.find_nearest(ann_roc_params[:,2], thr_fpr))
print idlims
idlim = int(np.median(idlims))
print idlim
thr = ann_roc_params[idlim, 0]
ml_class.set_threshold(thr)
print ml_class.threshold
###############################################################################################
## Testing
feature = None
ann_res = []
ann_rocs = None
for ith_experience in range(n_test):
print '>> REALISATION %d/%d <<' % (ith_experience + 1, n_test)
stars_to_observe = u.get_uknPSF_realisation(data, sim_cat, euclid, n_exposures, \
m_min, m_max, n_stars, bin_fraction)
# ANN
ann_preds, proba_ann = euclid.reconstruct_fields(ml_class, recovery_n_inter, recovery_n_neighbour,
eps=0, truth=stars_to_observe[:,0], return_proba=True, relerr=False)
ann_roc_params = binfind.diagnostics.test_thresholds(stars_to_observe[:,0], proba_ann, thresholds)
ann_metr = binfind.diagnostics.get_metrics(stars_to_observe[:,0], ann_preds)
auc_ann = binfind.diagnostics.auc(ann_roc_params)
print 'AUC testing ANN:', auc_ann
ann_res.append(np.concatenate([[crit_angsep, crit_contrast], [ml_class.threshold], ann_metr, [auc_ann]]))
if ann_rocs is None:
ann_rocs = ann_roc_params
else:
ann_rocs += ann_roc_params
ann_res = np.array(ann_res)
ann_rocs /= n_test
if n_test > 1:
ann_res = np.mean(ann_res, axis=0)
results_test["ann"].append(ann_res)
### Plotting
fig = plt.figure()
ax = plt.subplot()
labels = ['ANN']
binfind.plots.roc(ax, [ ann_rocs],
metrics=[ann_rocs[:,3]],
metrics_label=r"$F_1\ \mathrm{score}$", labels=labels)
figfname = os.path.join(outdir, "figures", "roc_sep{:.0f}_con{:.0f}".format(crit_angsep*1e3, crit_contrast*10))
binfind.plots.figures.savefig(figfname, fig, fancy=True, pdf_transparence=True)
if show: plt.show()
plt.close()
for key in results_train:
results_train[key] = np.array(results_train[key])
results_test[key] = np.array(results_test[key])
binfind.utils.writepickle([results_train, results_test], os.path.join(outdir, "results_{:d}_{:1.1f}.pkl".format(int(crit_angsep*1e3), crit_contrast)))
|
[
"thibault.kuntzer@epfl.ch"
] |
thibault.kuntzer@epfl.ch
|
e5c91c7fd1bd25cc3f7f768d48043b34c72f92c9
|
1900a250ac0544a1dfa64334d5d84e13d754c657
|
/django_back_end/auto_smart_graph/smart_backend/graph_provider/urls.py
|
5eb2f1ffd9c6f91b163d06cd1bdf8a288c76d846
|
[
"MIT"
] |
permissive
|
hjellison/AutoKnowledge
|
8a4a0e23feb3edc8d5e6419b6b8328a3dd926314
|
561a3703d5be0639ef76d0b4f8866c32bd9d7ec1
|
refs/heads/master
| 2022-02-20T02:19:12.403123
| 2019-01-08T17:19:10
| 2019-01-08T17:19:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
import graph_provider.services as services
"""为webservice分配url"""
urlpatterns = [
url(r'^query/$', services.fetch_data),
]
|
[
"guitarmonyz@gmail.com"
] |
guitarmonyz@gmail.com
|
39edc2c652c3cfae8486b41bcca3f97222ca09ff
|
ec690eb923d6fc76acd69f27d5a25f00ac763b7d
|
/Compare the SMT and MILP for Job shop scheduling/Optimization problem/job shop (disjunctive graph model)-Cplex IBM.py
|
6d0f32980e03766f9cb081801a8ec458c01f732e
|
[] |
no_license
|
CTU-IIG/LSP_SMT_Machine_learning
|
60af8bedd49cc7c38becb1031d8c237cdd92ff66
|
4dfa5bad5b7f1625c0bc6a2d2ac67d7978cba192
|
refs/heads/master
| 2020-10-01T23:57:27.138047
| 2020-08-26T13:23:38
| 2020-08-26T13:23:38
| 227,652,141
| 0
| 1
| null | 2019-12-13T14:26:02
| 2019-12-12T16:41:32
| null |
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
from docplex.mp.model import Model
from docplex.util.environment import get_environment
import os
# ----------------------------------------------------------------------------
# Initialize the problem data
# ----------------------------------------------------------------------------
filename = os.path.dirname(os.path.abspath(__file__)) + "/data/jobshop_j100_m10-D.data"
with open(filename, "r") as file:
NB_JOBS, NB_MACHINES = [int(v) for v in file.readline().split()]
JOBS = [[int(v) for v in file.readline().split()] for i in range(NB_JOBS)]
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
MACHINES = [[JOBS[j][2 * s] for s in range(NB_MACHINES)] for j in range(NB_JOBS)]
DURATION = [[JOBS[j][2 * s + 1] for s in range(NB_MACHINES)] for j in range(NB_JOBS)]
A=[(j,o) for j in range(NB_JOBS) for o in range(NB_MACHINES) if MACHINES[j][o]<=NB_MACHINES]
B=[(j,o,jj,oo) for j,o in A for jj,oo in A if (j,o)!=(jj,oo) and (MACHINES[j][o]==MACHINES[jj][oo] or MACHINES[j][o]==NB_MACHINES or MACHINES[jj][oo]==NB_MACHINES) and MACHINES[j][o]<=NB_MACHINES and MACHINES[jj][oo]<=NB_MACHINES]
G=10000
# ----------------------------------------------------------------------------
# Build the model
# ----------------------------------------------------------------------------
mdl = Model('disjunctive_graph')
s= mdl.continuous_var_dict(A,name='s')
y=mdl.binary_var_dict(B,name='y')
c_t=mdl.continuous_var_dict([(j) for j in range(NB_JOBS) if j<NB_JOBS-1],name='completion time')
z=mdl.minimize(mdl.sum(c_t[j] for j in range(NB_JOBS) if j<NB_JOBS-1))
mdl.add_constraints((y[j,o,jj,oo]+y[jj,oo,j,o]==1) for j,o,jj,oo in B)
mdl.add_constraint(mdl.sum(y[j,o,NB_JOBS-1,0] for j,o in A if (j,o)!=(NB_JOBS-1,0))<=0)
mdl.add_constraint(mdl.sum(y[NB_JOBS-1,1,j,o] for j,o in A if (j,o)!=(NB_JOBS-1,1))<=0)
mdl.add_constraints((s[j,o]-s[j,o-1]-DURATION[j][o-1]>=0) for j,o in A if o>0 and j<NB_JOBS-1)
mdl.add_constraints((s[j,o]-s[jj,oo]-DURATION[jj][oo]+(1-y[jj,oo,j,o])*G>=0) for j,o,jj,oo in B)
mdl.add_constraints((c_t[j]-s[j,NB_MACHINES-1]-DURATION[j][NB_MACHINES-1]>=0) for j in range(NB_JOBS) if j<NB_JOBS-1)
solution= mdl.solve(log_output=True)
print(solution)
|
[
"Rohanmoh@users.noreply.github.com"
] |
Rohanmoh@users.noreply.github.com
|
168afb92f5871535bc501a9436acf2551621e45e
|
3c821e371b6b079f42710e88b68d967579193315
|
/apps/dashboard/helpers.py
|
dce8147d03ae7b89dd6a6070a8d19492fe1efbfc
|
[] |
no_license
|
prafulbagai/grabhalo
|
883991082043fb06a9b95b22838fd7459c9e2fce
|
374a1dadc8a53621cf01149ee0c0d3e998922770
|
refs/heads/master
| 2021-01-10T19:00:12.851476
| 2013-11-15T04:44:56
| 2013-11-15T04:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import datetime
from apps.data.models import *
def send_query(request,selected_users,query):
date_time = datetime.datetime.now()
web_query_data = WebQuery.objects.all()
user_id = GrabhaloUser.objects.filter(user_id = request.user.id)[0].id
if not web_query_data:
c_id = 0
else:
for data in web_query_data:
c_id = data.conversation_id
web_query = WebQuery.objects.create(user_id = user_id, sent_to = selected_users,
user_query = query,date_time = date_time, conversation_id = c_id + 1)
web_query.save()
for user in selected_users :
web_reply = WebReply.objects.create(user_id = user_id, sent_to = user, chat = query,
conversation_id = c_id +1, date_time = date_time)
web_reply.save()
|
[
"praful.bagai1991@gmail.com"
] |
praful.bagai1991@gmail.com
|
80337020802ca9fd802f8ae2cfdf0b3637aa4160
|
e05f2ac4d37becd314082d558fc737b85f71d56d
|
/conf.py
|
a45f47df0a4cbbbb7335a4ce79897d2e19875f00
|
[] |
no_license
|
QMickael/abilian-developer-guide
|
bebef4987ecc91a60c9327011e540e3fa4e235fc
|
0f0432f00ccad74aeb4006bc7ec8af3557bedaab
|
refs/heads/master
| 2020-07-09T08:17:32.629372
| 2016-11-17T11:45:49
| 2016-11-17T11:45:49
| 74,018,558
| 0
| 0
| null | 2016-11-17T10:59:38
| 2016-11-17T10:59:38
| null |
UTF-8
|
Python
| false
| false
| 10,985
|
py
|
# -*- coding: utf-8 -*-
#
# Abilian Developer Guide documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 23 23:47:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Abilian Developer Guide'
copyright = u'2015, Stefane Fermigier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2015.3'
# The full version, including alpha/beta/rc tags.
release = '2015.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
#import sphinx_readable_theme
#html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
#html_theme = 'readable'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AbilianDeveloperGuidedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'AbilianDeveloperGuide.tex', u'Abilian Developer Guide Documentation',
u'Stefane Fermigier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'abiliandeveloperguide', u'Abilian Developer Guide Documentation',
[u'Stefane Fermigier'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AbilianDeveloperGuide', u'Abilian Developer Guide Documentation',
u'Stefane Fermigier', 'AbilianDeveloperGuide', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Abilian Developer Guide'
epub_author = u'Stefane Fermigier'
epub_publisher = u'Stefane Fermigier'
epub_copyright = u'2015, Stefane Fermigier'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'Abilian Developer Guide'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
[
"sf@fermigier.com"
] |
sf@fermigier.com
|
2234facacdd73c0c307317c6f99846c445f65e0e
|
583e87b01151fa9cdcf6146ad1cfa0a9be7483f0
|
/test/machine/KNN.py
|
212274ec80ef5ba2da4c4072625a7e3643090d9d
|
[] |
no_license
|
IRH01/snake
|
0f9063da27104b51c9c6400ee0ae0d21061542a6
|
adba7389f340ba1dd3f2d41df6dba8508a809434
|
refs/heads/master
| 2021-01-16T20:37:20.562565
| 2018-10-09T03:58:29
| 2018-10-09T03:58:29
| 62,103,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from array import array
def createDataSet():
group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])
labels = ['A', 'A', 'B', 'B']
return group, labels
|
[
"renh@dtds.com.cn"
] |
renh@dtds.com.cn
|
917c3822abace0c2821005d30b4bad8758f5a716
|
7c35adf38ef567757336db5b17ff3dc7da4785c4
|
/kernelSvm.py
|
901bb5b03b62c7f1bbf956c7f3743eeacb8455e7
|
[] |
no_license
|
lex624/MLclassificationModels
|
00fc577aeab881dd9a0762bbe2e9fabfab26954a
|
e0d37b3c5fb4c332193b811c0deae3496d9f78a8
|
refs/heads/master
| 2022-12-17T14:23:59.988214
| 2020-09-16T11:26:51
| 2020-09-16T11:26:51
| 296,010,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,629
|
py
|
# Kernel SVM
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
X = dataset.iloc[:, [2, 3]].values
y = dataset.iloc[:, -1].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Training the Kernel SVM model on the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Kernel SVM (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
|
[
"36929005+lex624@users.noreply.github.com"
] |
36929005+lex624@users.noreply.github.com
|
2f2b0aea171d99911f7b4b61ba6b9d5ba00a473f
|
689670446bfacf0dfaaeca072c6328e34bb603b7
|
/solutions/5/guillaume/scheduler_superconvergence_09J.py
|
f3de3f7c7556b0475c2be461a13e0b87e6f578d9
|
[
"MIT"
] |
permissive
|
xiemeigongzi/champs_kaggle
|
edb0aa870b6380f6585ab83cd9f4d3881dae9781
|
fda4f213d02fd5e0138a86c52b4140c9f94fec6e
|
refs/heads/master
| 2022-11-17T15:27:36.662280
| 2020-07-15T13:13:06
| 2020-07-15T13:13:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,853
|
py
|
import math
import pandas as pd
import numpy as np
class CosineAnnealing:
def __init__(self, step_start, step_end, lr_start, lr_end):
self.step_start = step_start
self.step_end = step_end
self.lr_start = lr_start
self.lr_end = lr_end
def get(self, step):
if step >= self.step_start and step <= self.step_end:
lr = (
self.lr_end +
0.5 * (self.lr_start - self.lr_end) *
(1 + math.cos((step - self.step_start) / (self.step_end - self.step_start) * math.pi))
)
else:
lr = None
return lr
def plot(self, **kwargs):
lrs = []
index = []
for step in range(self.step_start, self.step_end):
lr = self.get(step)
if lr is not None:
lrs.append(self.get(step))
index.append(step)
lrs = pd.Series(lrs, index = index, name = 'lr')
lrs.index.name = 'step'
lrs.plot(**kwargs)
class LinearScheduler:
def __init__(self, step_start, step_end, lr_start, lr_end):
self.step_start = step_start
self.step_end = step_end
self.lr_start = lr_start
self.lr_end = lr_end
def get(self, step):
if step >= self.step_start and step <= self.step_end:
lr = self.lr_start + (self.lr_end - self.lr_start) * (step - self.step_start) / (self.step_end - self.step_start)
else:
lr = None
return lr
def plot(self, **kwargs):
lrs = []
index = []
for step in range(self.step_start, self.step_end):
lr = self.get(step)
if lr is not None:
lrs.append(self.get(step))
index.append(step)
lrs = pd.Series(lrs, index = index, name = 'lr')
lrs.index.name = 'step'
lrs.plot(**kwargs)
class MixedScheduler:
def __init__(self, schedulers):
self.schedulers = sorted(schedulers, key = lambda s : s.step_start)
self.step_start = self.schedulers[0].step_start
self.step_end = self.schedulers[-1].step_end
def get(self, step):
lr = None
for scheduler in self.schedulers:
scheduler_lr = scheduler.get(step)
if scheduler_lr is not None:
lr = scheduler_lr
return lr
def plot(self, **kwargs):
lrs = []
index = []
for step in range(self.step_start, self.step_end):
lr = self.get(step)
if lr is not None:
lrs.append(self.get(step))
index.append(step)
lrs = pd.Series(lrs, index = index, name = 'lr')
lrs.index.name = 'step'
lrs.plot(**kwargs)
class ExpScheduler:
def __init__(self, step_start, step_end, lr_start, lr_end):
self.step_start = step_start
self.step_end = step_end
self.lr_start = lr_start
self.lr_end = lr_end
self.factor_delta = self.lr_end / self.lr_start
self.step_delta = self.step_end - self.step_start
self.factor = np.exp(np.log(self.factor_delta) / self.step_delta)
def get(self, step):
if step >= self.step_start and step <= self.step_end:
step_delta = step - self.step_start
lr = self.lr_start * self.factor ** step_delta
else:
lr = None
return lr
def plot(self, **kwargs):
lrs = []
index = []
for step in range(self.step_start, self.step_end):
lr = self.get(step)
if lr is not None:
lrs.append(self.get(step))
index.append(step)
lrs = pd.Series(lrs, index = index, name = 'lr')
lrs.index.name = 'step'
lrs.plot(**kwargs)
|
[
"larsbratholm@gmail.com"
] |
larsbratholm@gmail.com
|
f96c7bed2103f0d0226c011abf234a228c1e8afd
|
cd6d278443ac4a64af0954c9b8af3b8f0be06b45
|
/AnimalsShop/wsgi.py
|
682110bb553d97127e594eeb42012c13dc734b09
|
[] |
no_license
|
IlyasNugaev000/HappyPet
|
eecca2656690daa3aab26f4de1a1d92303be873e
|
1c99b3b6d50f38461f8b8007f896a9d66b971350
|
refs/heads/master
| 2023-05-30T21:55:10.015925
| 2021-06-11T07:02:01
| 2021-06-11T07:02:01
| 375,980,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for AnimalsShop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AnimalsShop.settings')
application = get_wsgi_application()
|
[
"ilyas-nugaev@mail.ru"
] |
ilyas-nugaev@mail.ru
|
918361fe7ccc6b8702e3b92faba463d9772ca61f
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/Jet/common_utils.py
|
ebb04ce3493f13c055cc5e8a5ee1b2ea5ac90d8e
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,496
|
py
|
import bpy, sys
from . import bl_info
def get_id(object):
if "id" not in object.keys():
object["id"] = str(hash(object))
return object["id"]
def select_obj_exclusive(obj, edit_mode = False):
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
if edit_mode: bpy.ops.object.mode_set(mode="EDIT")
def update_progress(job_title, progress, processingObj):
length = 50
block = int(round(length*progress))
msg = "\r{0}: [{1:50s}] {2:3.2f}%".format(job_title, "#"*block + "-"*(length-block), round(progress*100, 2))
if progress < 1:
msg += " -> Obj: {0:50s}".format(processingObj)
else:
msg += "{0:50s}".format("")
msg += ("\n" + job_title + " -> DONE\r\n")
sys.stdout.write(msg)
sys.stdout.flush()
def apply_to_selected(context, func, keep_mode = True, keep_selection = True, keep_active = True, value = None, verbose = False):
sel_objs = context.selected_objects
active_obj = context.active_object
mode = None if active_obj is None or active_obj.type != "MESH" else active_obj.mode
numObjs = len(sel_objs)
if numObjs == 0: return None
if verbose:
count = 1
print("")
if mode == 'EDIT':
func(active_obj) if value is None else func(active_obj, value)
else:
for obj in sel_objs:
try:
func(obj) if value is None else func(obj, value)
except:
break
if verbose:
update_progress(func.__name__, count / numObjs, obj.name)
count = count + 1
bpy.ops.object.mode_set(mode="OBJECT")
#bpy.ops.object.select_all(action='DESELECT')
if keep_selection:
for obj in reversed(sel_objs):
obj.select = True
if keep_active:
if hasattr(context, "scene"):
context.scene.objects.active = active_obj
if keep_mode and mode is not None:
bpy.ops.object.mode_set(mode=('EDIT' if mode=='EDIT' else 'OBJECT'))
def get_mesh_objs_selected(context):
return [obj for obj in context.selected_objects if obj.type == 'MESH']
def any_mesh_obj_selected(context):
return len(get_mesh_objs_selected(context)) > 0
def redraw(context):
if hasattr(context, "area") and context.area is not None:
context.area.tag_redraw()
def redraw():
for area in bpy.context.screen.areas:
if area.type in ['VIEW_3D']:
area.tag_redraw()
def get_addon_name():
return bl_info["name"]
def get_preferences(context):
addon_name = get_addon_name()
return context.user_preferences.addons[addon_name].preferences
def shorten_key_modifier(context, key):
if key == 'LEFTMOUSE':
return 'LMB'
elif key == 'RIGHTMOUSE':
return 'RMB'
elif key == 'MIDDLEMOUSE':
return 'MMB'
elif key == 'SELECTMOUSE':
if context.user_preferences.inputs.select_mouse == 'LEFT':
return 'LMB'
else:
return 'RMB'
elif key == 'ACTIONMOUSE':
if context.user_preferences.inputs.select_mouse == 'LEFT':
return'RMB'
else:
return 'LMB'
else:
return key
def get_hotkey(context, keymap_item):
wm = context.window_manager
item = None
#wm.keyconfigs.active.keymaps['Mesh'].keymap_items
for km in wm.keyconfigs.user.keymaps:
for kmi in km.keymap_items:
if kmi.active and kmi.idname == keymap_item:
item = kmi
break
if item is None:
for km in wm.keyconfigs.addon.keymaps:
for kmi in km.keymap_items:
if kmi.active and kmi.idname == keymap_item:
item = kmi
break
if item is None:
for km in wm.keyconfigs.active.keymaps:
for kmi in km.keymap_items:
if kmi.active and kmi.idname == keymap_item:
item = kmi
break
if item is None:
return ""
hotkey = ""
if item.ctrl:
hotkey = hotkey + "Ctrl+"
if item.alt:
hotkey = hotkey + "Alt+"
if item.shift:
hotkey = hotkey + "Shift+"
if item.oskey:
hotkey = hotkey + "OSkey+"
if item.key_modifier != 'NONE':
hotkey = hotkey + shorten_key_modifier(context, item.key_modifier) + "+"
return hotkey + shorten_key_modifier(context, item.type)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
a558cec8621f8f9edd1a20c78659c0cef87aa908
|
b161ebfe06c22a2f18ac27695cf56ad283d037f1
|
/scrimp/provisioner.py
|
45bc62967cfc63c0456607dfd53e8c619075108b
|
[] |
no_license
|
globus-labs/SCRIMP
|
afb0e335959e7c1436ce88a8176a284156cc85fd
|
0aada1fe9fff0827e44771bf983e722eb40b09b6
|
refs/heads/master
| 2021-01-02T22:41:38.919422
| 2017-08-09T20:47:40
| 2017-08-09T20:47:40
| 99,370,330
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,117
|
py
|
import psycopg2
import datetime
import calendar
import time
# import sys
from decimal import *
import requests
from scrimp import logger, ProvisionerConfig, tenant, scheduler
from scrimp.cloud import aws
from scrimp.cloud import simaws
from scrimp.scheduler.condor.condor_scheduler import CondorScheduler
from scrimp.scheduler.simfile.sim_scheduler import SimScheduler
class Provisioner(object):
"""
A provisioner for cloud resources.
Cost effectively acquires and manages instances.
"""
def __init__(self):
self.tenants = []
self.drafts_mapping = {'us-east-1a': 'us-east-1e',
'us-east-1b': 'us-east-1d',
'us-east-1c': 'us-east-1a',
'us-east-1d': 'us-east-1b',
'us-east-1e': 'us-east-1c', }
# Read in any config data and set up the database connection
ProvisionerConfig()
def run(self):
"""
Run the provisioner. This should execute periodically and
determine what actions need to be taken.
"""
self.run_iterations = 0
# self.simulate = False
if ProvisionerConfig().simulate:
self.sched = SimScheduler()
ProvisionerConfig().load_instance_types()
self.load_drafts_data()
while True:
self.run_iterations = self.run_iterations + 1
# Load jobs
t1 = datetime.datetime.now()
start_time = datetime.datetime.now()
self.load_tenants_and_jobs()
t2 = datetime.datetime.now()
# Simulate the world (mostly tidy things up and print stats)
ProvisionerConfig().simulator.simulate(self.tenants)
t3 = datetime.datetime.now()
self.sched.process_idle_jobs(self.tenants)
tx = datetime.datetime.now()
# Simulate Condor
ProvisionerConfig().simulator.run_condor(self.tenants)
t4 = datetime.datetime.now()
# Simulate AWS
ProvisionerConfig().simulator.run_aws()
t5 = datetime.datetime.now()
# Check if it should finish executing (e.g. jobs and
# resources all terminated)
if ProvisionerConfig().simulator.check_finished():
break
self.manage_resources()
t6 = datetime.datetime.now()
if ((ProvisionerConfig().simulate_time -
ProvisionerConfig().sim_time).total_seconds() %
ProvisionerConfig().run_rate == 0):
self.provision_resources()
t7 = datetime.datetime.now()
load_time = (t2 - t1).total_seconds()
sim_time = (t3 - t2).total_seconds()
proc_idle_time = (tx - t3).total_seconds()
condor_time = (t4 - tx).total_seconds()
aws_time = (t5 - t4).total_seconds()
manage_time = (t6 - t5).total_seconds()
prov_time = (t7 - t6).total_seconds()
# Otherwise, step through time
ProvisionerConfig().simulate_time = ProvisionerConfig(
).simulate_time + datetime.timedelta(seconds=2)
logger.debug("RUN ID: %s. SIMULATION: advancing time "
"2 second" % ProvisionerConfig().run_id)
logger.debug("SIMULATION times: load (%s), sim (%s),"
" proc_idle (%s), condor (%s), aws (%s),"
" manage (%s), prov (%s)" % (
load_time, sim_time, proc_idle_time,
condor_time,
aws_time, manage_time, prov_time))
else:
self.sched = CondorScheduler()
while True:
self.run_iterations = self.run_iterations + 1
# Get the tenants from the database and process the current
# condor_q. Also assign those jobs to each tenant.
start_time = datetime.datetime.now()
self.load_tenants_and_jobs()
# provisioning will fail if there are no tenants
if len(self.tenants) > 0:
# Handle all of the existing requests. This will cancel
# or migrate excess requests and update the database to
# reflect the state of the environment
self.manage_resources()
# Work out the price for each instance type and acquire
# resources for jobs
self.provision_resources()
# wait "run_rate" seconds before trying again
end_time = datetime.datetime.now()
diff = (end_time - start_time).total_seconds()
logger.debug("SCRIMP (SIMULATION) run loop: "
"%s seconds. Now sleeping %s seconds." % (
diff, ProvisionerConfig().run_rate))
if diff < ProvisionerConfig().run_rate:
time.sleep(ProvisionerConfig().run_rate - diff)
def load_tenants_and_jobs(self):
"""
Get all of the tenants from the database and then read the condor
queue to get their respective jobs.
"""
# Load all of the tenants
# Load all of the jobs from condor and associate them with the tenants.
# This will also remove jobs that should not be processed (e.g. an
# instance has been fulfilled for them already).
if ProvisionerConfig().simulate:
# lets only do this once.
if ProvisionerConfig().relative_time is None:
self.tenants = tenant.load_from_db()
self.sched.only_load_jobs(self.tenants)
else:
self.tenants = tenant.load_from_db()
self.sched.load_jobs(self.tenants)
def manage_resources(self):
"""
Use the resource manager to keep the database up to date and manage
aws requests and resources.
"""
# Build a set of instances and their current spot prices so we don't
# need to keep revisiting the AWS API
if ProvisionerConfig().simulate:
simaws.manager.process_resources(self.tenants)
else:
aws.manager.process_resources(self.tenants)
scheduler.base_scheduler.ignore_fulfilled_jobs(self.tenants)
def load_drafts_data(self):
"""
To speed this up, load in all the drafts data once per
provisioning cycle
"""
cur_time = datetime.datetime.utcnow()
if ProvisionerConfig().simulate:
cur_time = ProvisionerConfig().simulator.get_fake_time()
minus_ten = cur_time - datetime.timedelta(seconds=600)
query = ("select * from drafts_price where timestamp < "
"'%s'::TIMESTAMP and timestamp > '%s'::TIMESTAMP") % (
cur_time.strftime("%Y-%m-%d %H:%M"),
minus_ten.strftime("%Y-%m-%d %H:%M"))
self.drafts_data = []
logger.debug('getting drafts data: ' + query)
rows = ProvisionerConfig().dbconn.execute(query)
for row in rows:
data = {'time': row['time'], 'price': row['price'],
'zone': row['zone'], 'type': row['type']}
self.drafts_data.append(data)
def provision_resources(self):
# This passes tenant[0] (a test tenant with my credentials) to use its
# credentials to query the AWS API for price data
# price data is stored in the Instance objects
for t in self.tenants:
if len(t.idle_jobs) == 0:
continue
if (ProvisionerConfig().DrAFTS or
ProvisionerConfig().DrAFTSProfiles):
if ProvisionerConfig().simulate:
# when simulating only load it every 5 mins.
if ((ProvisionerConfig().simulate_time -
ProvisionerConfig().sim_time).total_seconds() %
300 == 0):
self.load_drafts_data()
else:
if self.run_iterations % 300 == 0:
self.load_drafts_data()
# Get the spot prices for this tenant's AZ's
if ProvisionerConfig().simulate:
simaws.api.get_spot_prices(
ProvisionerConfig().instance_types, t)
else:
aws.api.get_spot_prices(ProvisionerConfig().instance_types,
t)
# Select a request to make for each job
self.select_instance_type(ProvisionerConfig().instance_types)
# Make the requests for the resources
if ProvisionerConfig().simulate:
simaws.api.request_resources(t)
else:
aws.api.request_resources(t)
def get_potential_instances(self, eligible_instances, job, tenant):
"""
Make a list of all <type,zone> and <type,ondemand> pairs then order
them.
"""
# Putting this here so it isn't called every run
# commented out to stop it checking drafts prices
unsorted_instances = []
# Add an entry for each instance type as ondemand, or each spot
# price so we can sort everything and pick the cheapest.
for ins in eligible_instances:
unsorted_instances.append(aws.Request(
ins, ins.type, "", ins.ami, 1, 0, True,
ins.ondemand, ins.ondemand, ins.ondemand, ins.ondemand,
ins.ondemand))
# Don't bother adding spot prices if it is an ondemand request:
if not job.ondemand:
DrAFTS = None
AvgPrice = None
OraclePrice = None
for zone, price in ins.spot.iteritems():
# if zone == 'us-east-1c':
if (ProvisionerConfig().DrAFTS or
ProvisionerConfig().DrAFTSProfiles):
DrAFTS, OraclePrice = self.get_DrAFTS_bid(
ins.type, zone, job, price)
if DrAFTS is None or OraclePrice is None:
# try it again, if it doesn't find them its
# because the price doesn't exist. so add a big
# value to skip it
DrAFTS, OraclePrice = self.get_DrAFTS_bid(
ins.type, zone, job, price)
if DrAFTS is None:
DrAFTS = 1000
if OraclePrice is None:
OraclePrice = 1000
if ProvisionerConfig().DrAFTS:
unsorted_instances.append(aws.Request(
ins, ins.type, zone, ins.ami, 1, 0, False,
ins.ondemand, DrAFTS, 0, 0, 0))
elif ProvisionerConfig().DrAFTSProfiles:
unsorted_instances.append(aws.Request(
ins, ins.type, zone, ins.ami, 1, 0, False,
ins.ondemand, OraclePrice, 0, 0, 0))
else:
unsorted_instances.append(aws.Request(
ins, ins.type, zone, ins.ami, 1, 0, False,
ins.ondemand, price, 0, 0, 0))
logger.debug('%s, %s spot: %s drafts: %s profile: %s' % (
ins.type, zone, price, DrAFTS, OraclePrice))
# Now sort all of these instances by price
sorted_instances = []
# Adding and false here to force it to use the cheapest price for now.
if ProvisionerConfig().DrAFTS:
# This should sort by the drafts price and then by the current
# spot price that way we will get the cheapest AZ at the top of
# the list.
sorted_instances = sorted(unsorted_instances,
key=lambda k: (k.DrAFTS, k.price))
if ProvisionerConfig().DrAFTSProfiles:
sorted_instances = sorted(unsorted_instances,
key=lambda k: (k.OraclePrice, k.price))
else:
sorted_instances = sorted(
unsorted_instances, key=lambda k: k.price)
return sorted_instances
def get_DrAFTS_bid(self, ins, zone, job, cur_price):
"""
Pull the DrAFTS price for this instance type.
This will get the nearest value greater than 1 hour.
"""
# example: http://128.111.84.183/vpc/us-east-1a-c3.2xlarge.pgraph
try:
ret_drafts = None
ret_oracle = None
if ProvisionerConfig().drafts_stored_db:
# clear the tenant's current avg prices
mapped_zone = self.drafts_mapping[zone]
logger.debug('drafts zone: %s' % mapped_zone)
for row in self.drafts_data:
if (row['type'] == ins and mapped_zone == row['zone'] and
float(row['price']) > float(cur_price)):
time = row['time']
cost = row['price']
if ret_drafts is None and float(time) > 1:
ret_drafts = Decimal(str(cost))
if (ret_oracle is None and float(time) >
(float(job.duration) / 3600)):
ret_oracle = Decimal(str(cost))
return ret_drafts, ret_oracle
else:
# use the mapping between AZs to pick a zone name
mapped_zone = self.drafts_mapping[zone]
addr = 'http://128.111.84.183/vpc/%s-%s.pgraph' % (
mapped_zone, ins)
req = requests.get(addr)
output = req.text
# Split the result by line
lines = output.split("\n")
ret_drafts = None
# define these out here so if it goes over the line,
# when the request length is too long, it can use the
# previous ones.
cost = None
time = None
for line in lines:
# Extract the time and cost
try:
time = line.split(" ")[0]
cost = line.split(" ")[1]
except Exception, y:
logger.error("drafts: Failed here: %s %s" % (y, line))
# Split the line in half to get the time and cost
if float(time) > 1:
# this is the one we want to use
ret_drafts = Decimal(str(cost))
break
# now do the oracle ones
ret_oracle = None
last = False
for line in lines:
# Extract the time and cost
try:
if len(line) > 5:
time = line.split(" ")[0]
cost = line.split(" ")[1]
else:
last = True
logger.debug("No prediction long enough in "
"%s, using last one. %s %s" % (addr,
time,
cost))
except Exception, z:
logger.error("oracle: failed here: %s %s" % (z, line))
# Split the line in half to get the time and cost
if last or float(time) > (float(job.duration) / 3600):
# this is the one we want to use
ret_oracle = Decimal(str(cost))
break
return ret_drafts, ret_oracle
except Exception, e:
logger.debug("Failed to find DrAFTS price for %s. %s" % (ins, e))
return None, None
def print_cheapest_options(self, sorted_instances):
# Print out the top three
logger.info("Top three to select from:")
top_three = 3
for ins in sorted_instances:
if top_three == 0:
break
if ProvisionerConfig().DrAFTS:
logger.info("DrAFTS: %s %s %s %s" % (ins.instance_type,
ins.zone, ins.price, ins.DrAFTS))
if ProvisionerConfig().DrAFTSAvgPrice:
logger.info("DrAFTS Oracle Price: %s %s %s %s" % (ins.instance_type,
ins.zone, ins.price, ins.OraclePrice))
else:
logger.info(" %s %s %s" %
(ins.instance_type, ins.zone, ins.price))
top_three = top_three - 1
def get_timeout_ondemand(self, job, tenant, instances):
"""
Check to see if the job now requires an ondemand instance due to
timing out.
"""
cur_time = datetime.datetime.now()
cur_time = calendar.timegm(cur_time.timetuple())
time_idle = 0
if ProvisionerConfig().simulate:
cur_time = ProvisionerConfig().simulate_time
time_idle = (ProvisionerConfig().simulate_time -
job.req_time).total_seconds()
else:
time_idle = cur_time - int(job.req_time)
res_instance = None
# if the tenant has set a timeout and the job has been idle longer than
# this
if tenant.timeout > 0 and time_idle > tenant.timeout:
# sort the eligibile instances by their ondemand price (odp)
sorted_instances = sorted(instances, key=lambda k: k.odp)
logger.debug("Selecting ondemand instance: %s" % str(job.launch))
res_instance = sorted_instances[0]
return res_instance
def check_ondemand_needed(self, tenant, sorted_instances, job):
# Check to see if an ondemand instance is required due to timeout
needed = False
launch_instance = self.get_timeout_ondemand(job, tenant,
sorted_instances)
cheapest = sorted_instances[0]
# check to see if it timed out
if (launch_instance is not None and
launch_instance.odp < tenant.max_bid_price):
job.launch = aws.Request(
launch_instance, launch_instance.type, "", launch_instance.ami,
1, launch_instance.odp, True)
logger.debug("Selected to launch on demand due to timeout: %s" %
str(job.launch))
needed = True
# check if the job is flagged as needing on-demand
elif job.ondemand:
needed = True
# if the cheapest option is ondemand
elif cheapest.ondemand and cheapest.odp < tenant.max_bid_price:
job.launch = cheapest
logger.debug("Selected to launch on demand due to ondemand "
"being cheapest: %s" % repr(cheapest))
needed = True
# or if the cheapest option close in price to ondemand, then use
# ondemand.
elif (cheapest.price >
(ProvisionerConfig().ondemand_price_threshold *
float(cheapest.odp)) and
cheapest.price < tenant.max_bid_price):
job.launch = cheapest
logger.debug("Selected to launch on demand due to spot price "
"being close to ondemand price: %s" %
repr(cheapest))
needed = True
return needed
def select_instance_type(self, instances):
"""
Select the instance to launch for each idle job.
"""
for tenant in self.tenants:
for job in list(tenant.idle_jobs):
if ProvisionerConfig().simulate:
time.sleep(ProvisionerConfig().overhead_time)
# Get the set of instance types that can be used for this job
eligible_instances = self.restrict_instances(job)
if len(eligible_instances) == 0:
logger.error("Failed to find any eligible instances "
"for job %s" % job)
continue
# get all potential pairs and sort them
sorted_instances = self.get_potential_instances(
eligible_instances, job, tenant)
if len(sorted_instances) == 0:
logger.error("Failed to find any sorted instances "
"for job %s" % job)
continue
# work out if an ondemand instance is needed
job.ondemand = self.check_ondemand_needed(tenant,
sorted_instances,
job)
# If ondemand is required, redo the sorted list with only
# ondemand requests and set that to be the launched instance
if job.ondemand:
sorted_instances = self.get_potential_instances(
eligible_instances, job, tenant)
job.launch = sorted_instances[0]
logger.debug("Launching ondemand for this job. %s" %
str(job.launch))
continue
# otherwise we are now looking at launching a spot request
# print out the options we are looking at
self.print_cheapest_options(sorted_instances)
# filter out a job if it has had too many requests made
existing_requests = self.get_existing_requests(tenant, job)
if len(existing_requests) >= ProvisionerConfig().max_requests:
tenant.idle_jobs.remove(job)
continue
# Find the top request that hasn't already been requested
# (e.g. zone+type pair is not in existing_requests)
for req in sorted_instances:
if len(existing_requests) > 0:
# Skip this type if a matching request already
# exists
exists = False
for existing in existing_requests:
if (req.instance_type == existing.instance_type and
req.zone == existing.zone):
exists = True
if exists:
continue
# Launch this type.
# Hmm, this is getting more complciated with
# multuiple provisioning models.
if req.price < tenant.max_bid_price:
req.bid = self.get_bid_price(job, tenant, req)
job.launch = req
job.cost_aware = req
break
else:
logger.error(("Unable to launch request %s as "
"the price is higher than max bid "
"%s.") % (str(req),
tenant.max_bid_price))
def get_existing_requests(self, tenant, job):
# Get all of the outstanding requests from the db for this instance
existing_requests = []
try:
rows = ProvisionerConfig().dbconn.execute(
("select instance_request.instance_type, "
"instance_request.request_type, "
"instance_type.type, "
"instance_request.subnet, subnet_mapping.zone "
"from instance_request, subnet_mapping, instance_type "
"where job_runner_id = '%s' and "
"instance_request.tenant = %s and "
"instance_request.instance_type = instance_type.id and "
"subnet_mapping.id = instance_request.subnet") %
(job.id, tenant.db_id))
for row in rows:
existing_requests.append(aws.Request(
None, row['type'],
row['zone'], None, None))
except psycopg2.Error:
logger.exception("Error getting number of outstanding")
return existing_requests
def restrict_instances(self, job):
"""
Filter out instances that do not meet the requirements of a job then
return a list of the eligible instances.
"""
eligible_instances = []
# Check if the instance is viable for the job
instance_types = ProvisionerConfig().instance_types
for instance in instance_types:
if aws.manager.check_requirements(instance, job):
eligible_instances.append(instance)
return eligible_instances
def get_bid_price(self, job, tenant, req):
"""
This function is not totally necessary at the moment, but it could be
expanded to include more complex logic when placing a bid.
Currently it just does bid percent * ondemand price of the resource
and checks it is less than the maximum bid.
"""
if ProvisionerConfig().DrAFTS or ProvisionerConfig().DrAFTSProfiles:
return req.price
bid = float(tenant.bid_percent) / 100 * float(req.odp)
if bid <= tenant.max_bid_price:
return bid
else:
return 0.40
|
[
"ryan@chard.co.nz"
] |
ryan@chard.co.nz
|
8e0efa58c0b98131af15ab586df5ead9463701e1
|
7d8ada540b3e17cd471a26d24a7677fa4cf07f3e
|
/pythonds/test.py
|
bd9aeeedbf1827f8fb768550ad6f8d355c3a3bcc
|
[] |
no_license
|
mfy-royce/51cto1
|
bd155b606cd55ac4c01b79bfdb86cd320ee8df06
|
8e35e1412b08261704fc86074f87725e00b55548
|
refs/heads/master
| 2021-01-14T00:26:44.291036
| 2020-02-23T15:24:03
| 2020-02-23T15:24:03
| 242,541,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
#!/usr/bin/python
# encoding: utf-8
"""
author:Royce
contact:mfy-111@163.com
@file: test.py
@time: 16:43
@welcom to learn ai
"""
import random
def randomList(n):
iList = []
for i in range(n):
iList.append(random.randrange(0,100))
return iList
def sortVerify(times,length,sortFunc):
# iList = randomList(length)
# sortedList = sortFunc(iList,0,len(iList)-1)
equl =True
for i in range(times):
iList = randomList(length)
refiList = sorted(iList)
sortedList = sortFunc(iList, 0, len(iList) - 1)
print("iList is {},id is {}".format(iList,id(iList)))
print("refiList is {},id is {}".format(refiList, id(refiList)))
print("sortedList is {},id is {}".format(sortedList,id(sortedList)))
if refiList != sortedList:
equl=False
print("{}th is {}".format(i, equl))
break
if equl :
print("verify is ok")
def doubleListRemoveOne(iList):
num = random.choice(iList)
cList = iList.copy()
doubleList = iList * 2
doubleList.remove(num)
return doubleList
def randomZero(iList,n):
for i in range(n):
p = random.randint(0,len(iList))
iList.insert(p,0)
def randomMatrix(n):
Matrix = []
for i in range(n):
Matrix.append(randomList(n))
return Matrix
def printMatrix(matrix):
for i in range(len(matrix)):
print(matrix[i])
def printLink(head):
p = head
while p != None :
if p != head:
print("name {},index {},count {}".format(p[4], p[0], p[1]))
p =p[3]
def showLink(head):
p=head.next
while p != None:
if p.next != None:
print("{}-->".format(p.val),end=" ")
else:
print("{}".format(p.val))
p= p.next
class ListNode:
def __init__(self,x,next = None):
self.val = x
self.next = next
def creatLink(iList):
head = ListNode(None)
end = head
for i in iList:
end.next = ListNode(i)
end = end.next
return head
def creatCycleLink(iList):
position = random.choice(range(-1,len(iList)))
if position == -1 :
head = creatLink(iList)
return head,position,None
head = ListNode(None)
end = head
for i,num in enumerate(iList):
end.next = ListNode(num)
end = end.next
if i == position:
crossNode = end
end.next = crossNode
return head,position,end
def showCyscleLink(head,position ,end):
if position ==-1:
showLink(head)
return
p = head.next
while True:
if p != end:
print("{}-->".format(p.val), end=" ")
else:
print("{}".format(p.val))
break
p = p.next
print("end connect {},position is {}".format(p.next.val,position))
class BinaryTreeNode():
def __init__(self,val,left = None,right = None):
self.val= val
self.left =left
self.right = right
def creatBinaryTree(iList,root=0):
if root >=len(iList):
return None
if iList[root] == None:
return None
leftRoot = creatBinaryTree(iList,root*2+1)
rightRoot = creatBinaryTree(iList, root * 2 + 2)
root = BinaryTreeNode(iList[root],leftRoot,rightRoot)
return root
def showBinaryTree(root,count):
if root ==None:
return count-1
print("node {} is {}".format(count,root.val))
count = showBinaryTree(root.left,count+1)
count = showBinaryTree(root.right,count+1)
return count
def showBinaryTreeByLevel(root):
treeLevel = [[root]]
nextLevel = True
while nextLevel:
nextLevel= False
level = []
for i in treeLevel[-1]:
if i != None:
level.append(i.left)
level.append(i.right)
else:
level.append(None)
level.append(None)
continue
if i.left != None or i.right != None:
nextLevel = True
treeLevel.append(level)
# tree = []
# nodeNum =
for i,level in enumerate(treeLevel):
print("level {} is ".format(i),end="")
for node in level:
if node !=None:
print("{} ".format(node.val),end="")
else:
print("{} ".format(node), end="")
print("")
if __name__ == "__main__":
iList = randomList(5)
print(iList)
randomZero(iList,5)
print(iList)
printMatrix(randomMatrix(3))
root = creatBinaryTree([3,9,20,None,None,15,7])
showBinaryTree(root,0)
root = creatBinaryTree([3,9,20,15,None,15,7])
showBinaryTree(root,0)
showBinaryTreeByLevel(root)
|
[
"mfy-111@163.com"
] |
mfy-111@163.com
|
c7b04e2e02ff27de0681a5e583a172e9a2e935fb
|
69c882c678103b182988fb60d3e898d569980f1c
|
/Day 6/day6prog1.py
|
0955d6850a007bffc3233d9711192a9943ab85cc
|
[] |
no_license
|
gittygupta/stcet-python
|
44be9d91cdd6215879d9f04497214819228821be
|
e77456172746ee76b6e2a901ddb0c3dbe457f82a
|
refs/heads/master
| 2022-03-05T11:37:08.720226
| 2019-12-01T00:56:03
| 2019-12-01T00:56:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
x = input("Enter number:")
product = 1
def fact(x):
pro = 1
for i in range(1, x+1):
pro *= i
return pro
def factrec(x):
product = 1
if x == 1:
return 1
product = product * x * factrec(x - 1)
return product
product = factrec(x)
print(product)
|
[
"noreply@github.com"
] |
gittygupta.noreply@github.com
|
464271ebe2e0dd530ebb1d568ce07035e5ea6033
|
5261e3c72259991fbdb9737c4c764eb0686860d3
|
/packages/fetchai/skills/simple_data_request/__init__.py
|
e5ceab9b030383f3520c9ad6b711ecfb8cad4d52
|
[
"Apache-2.0"
] |
permissive
|
eorituz/agents-aea
|
45dfb9729718421290c71da91ac4c51f9cc6a608
|
197451196728141a27ec73fd8210c05cb74501f7
|
refs/heads/main
| 2023-03-24T02:40:27.132664
| 2021-03-23T14:42:58
| 2021-03-23T14:42:58
| 350,744,268
| 0
| 0
|
Apache-2.0
| 2021-03-23T14:40:13
| 2021-03-23T14:32:29
| null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of the simple_data_request skill."""
from aea.configurations.base import PublicId
PUBLIC_ID = PublicId.from_str("fetchai/simple_data_request:0.8.0")
|
[
"david.minarsch@googlemail.com"
] |
david.minarsch@googlemail.com
|
3af579131f3adede67cd81132bc2b423f4c7162d
|
ba8f410097500a9325714527507af8b567fb7716
|
/WEEK_2/String/program5.py
|
76c1a4a5d90ebd4283cbc6aa4b9c7854de8b700b
|
[] |
no_license
|
GaikwadHarshad/ML-FellowShip-Program
|
56b27434dc738273ac04ba9b0056ff45d871ff90
|
a702db1a726b7c404a4e2dbc39ea336d148e0b28
|
refs/heads/master
| 2020-04-26T04:24:59.883661
| 2019-06-20T11:13:38
| 2019-06-20T11:13:38
| 173,301,157
| 0
| 0
| null | 2019-06-20T11:13:39
| 2019-03-01T12:50:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,918
|
py
|
""" Write a Python function that takes a list of words and returns the length of the longest one. """
from myprograms.Utility import UtilityDS
class String5:
str1 = []
# perform operation on string
def long_word(self):
while 1:
print("--------------------------------------------------")
print("1.Create List of String""\n""2.Length of longest word.""\n""3.Exit")
try:
choice = int(input("Enter choice :"))
# validate choice
ch = UtilityDS.validate_num(choice)
if ch:
if choice == 1:
print("we are creating list : ")
# number of element to add
element = int(input("How many element you want to add: "))
# validating the number
e = UtilityDS.validate_num(element)
if e:
# if valid then create list
self.str1 = UtilityDS.create_list_all(element)
print("List is created : ", self.str1)
elif choice == 2:
if self.str1.__len__() < 1:
print("Enter string first")
else:
# getting longest word from given list of string
longest = UtilityDS.get_long_word(self.str1)
print("Longest word in list of string is : ", longest)
elif choice == 3:
exit()
else:
print("Invalid choice")
else:
print("Enter choice between 1 - 3")
except Exception as e:
print(e)
# instantiation
String5_object = String5()
String5_object.long_word()
|
[
"mr.gaikwad0605@gmail.com"
] |
mr.gaikwad0605@gmail.com
|
597c3fd526e4cd2ec3c0819d9e04516a311d4311
|
a84eeba8ab8ff4711253defa9e63abca130de84b
|
/simple_solution_api.py
|
14fc516248b03322926ab9ef041d40540f85dfba
|
[] |
no_license
|
t-pleasure/productcloud
|
6da2d5eacd8c667034ca91cf426291aec6328835
|
29dd6725df052387eba1c3081fcd973c105a50b5
|
refs/heads/master
| 2016-09-14T02:19:29.577976
| 2016-05-24T17:03:25
| 2016-05-24T17:03:25
| 59,478,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,005
|
py
|
#!/usr/bin/python
"""
STAND ALONE SCRIPT TO RUN SINGLE PROCESS SOLUTION.
"""
import json, os, threading
import amazon_products.product_status as pstatus
from amazon_products.product import Product
from data_services.mapstore import KVDiskStore, WordCountDisk
from data_services.queue import KafkaTopicQueue
from algos.heaps import MaxCapacityPQDict
from urllib3 import PoolManager
from flask import Flask, request
app = Flask(__name__)
# DataStore to persist information about status of products and if we've processed them yet
product_status_db = KVDiskStore("product_status_SINGLE",
serialize=lambda p:p.tojson(),
deserialize=pstatus.json2product)
# DataStore to persist product_id -> word_counts
product_worddata_db = KVDiskStore("product_data_SINGLE",
serialize=json.dumps,
deserialize=json.loads)
# Datastore to persist ALL WORD COUNTS
global_wordcount_db = WordCountDisk("global_word_count_SINGLE")
# file to persist top k elements to
persist_top_k_file = None
# MinHeap containing top k most words
K = 100 # default value for K
top_words = MaxCapacityPQDict(K)
global_wordcount_lock = threading.Lock()
def increment_global_wordcount(word, inc_amount):
with global_wordcount_lock:
new_amt = global_wordcount_db.increment(word, inc_amount)
return new_amt
# helper method associate a product id with a lock
NLOCKS = 100 # default number of locks
pidlocks = [threading.Lock() for _ in range(NLOCKS)]
def pid2lock(pid):
return pidlocks[hash(pid)%NLOCKS]
#################
# ROUTES # #
#################
@app.route('/', methods=["GET","POST"])
def default():
# if product_url is not specified, simply return the top k words
if "product_url" not in request.args:
return top_words.tojson()
# extract product id from request
purl = request.args['product_url']
pid = Product.url_to_productid(purl)
## crucial region
## lock the following block based on the product id as to avoid
## potential race conditions when dealing with requests for the same product.
## this ensures that only one thread can be processing an id at a time
with pid2lock(pid):
# check state to see whether or not product_id has been processed or is being processed
status = product_status_db.get(pid)
if status and status.type in [pstatus.Types.PROCESSING, pstatus.Types.COMPLETED, pstatus.Types.INVALID]:
return json.dumps({"pid": pid,
"status": status.type,
"current_words": dict(top_words.items())})
# if product id is not valid display appropriate message and record in database
if not Product.isvalid_pid(pid):
product_status_db.put(pid, pstatus.InvalidStatus(pid))
return json.dumps({"pid": pid,
"status": pstatus.Types.INVALID,
"current_words": dict(top_words.items())})
# Change state of datastore to indicate this product is currently being processed
product_status_db.put(pid, pstatus.ProcessingStatus(pid))
# obtain product description
product = Product.fetch_product(pid)
# obtain word count for product description
wcount = product.wordcounts
# persist word count for product description
product_worddata_db.put(pid, wcount)
# update global word counts
for (word, inc_amt) in wcount.items():
new_amt = increment_global_wordcount(word, inc_amt)
top_words.pushOrUpdate(word, new_amt)
# update status for product to indicate completion
product_status_db.put(pid, pstatus.CompletedStatus(pid))
# persist top_k words
if persist_top_k_file:
with open(persist_top_k_file, 'w') as f:
f.write(top_words.tojson())
return json.dumps({"pid": pid,
"status": pstatus.Types.COMPLETED,
"current_words": dict(top_words.items())})
@app.route('/product_status')
def info():
return str(product_status_db.items())
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', action="store", default=9999, dest='port', help="port to bind to", type=int)
parser.add_argument('--k', action="store", default=100, dest='k', help="number of top largest words to find", type=int)
parser.add_argument('--top-k-file', action="store", default=None, dest='top_k_file', help="file to persist top k largest words")
parser.add_argument('--n-locks', action="store", default=100, dest='n_locks', help="number of locks to have for coordinating product parsing")
parser.add_argument('--debug', action="store", default=False, dest='debug', help="debug flag", type=bool)
args = parser.parse_args()
# update global variables
persist_top_k_file = args.top_k_file
K = args.k
NLOCKS = args.n_locks
# compute top_words
top_words = MaxCapacityPQDict(K)
for (w,c) in global_wordcount_db.items():
top_words.pushOrUpdate(w,c)
# app settings
app.debug = args.debug
app.run(port=args.port)
|
[
"totran@Tonys-MBP-2.lan"
] |
totran@Tonys-MBP-2.lan
|
d432973d55120799261bd375528ec13c063740f9
|
9d19a6b00be95c92f3e32fff51c90ab2e2a76293
|
/chat/tests.py
|
276d4fd1effc528efd993757b116f2af42f55b2e
|
[] |
no_license
|
TobKed/django-channels-chat
|
70d5892dedac44179e3366d5423aa763383aa753
|
f247c3e7195619906bdf7f67680a712cc17e5cf8
|
refs/heads/master
| 2022-12-10T11:19:42.444003
| 2018-12-30T15:12:32
| 2018-12-30T15:12:32
| 163,593,656
| 0
| 0
| null | 2022-12-08T01:30:26
| 2018-12-30T14:32:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,665
|
py
|
from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True # emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome(ChromeDriverManager().install())
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_when_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to_window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to_window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to_window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value')
|
[
"tobiaszkedzierski@gmail.com"
] |
tobiaszkedzierski@gmail.com
|
07284a6e486092fdaa4633f81f08fb768c68f944
|
88a8bec01a7f6631714f19a896a715fcee830552
|
/KKK/members/urls.py
|
04f043f97d044c9cff033046b6ec6ddae50827a5
|
[] |
no_license
|
ajeethkumar-2/ImageBlogStateful
|
30a850ed11dadc186bed5d641d0d9f32f8fdc554
|
2175acaae8cd625300624078387c575534bc3e65
|
refs/heads/master
| 2022-12-26T09:50:00.639256
| 2020-10-08T11:53:12
| 2020-10-08T11:53:12
| 302,323,938
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 626
|
py
|
from django.urls import path
from .views import *
urlpatterns = [
path('register', UserRegistration.as_view(), name='register'),
path('edit_login_settings', EditLoginSettings.as_view(), name='edit_login_settings'),
path('password', ChangePassword.as_view(), name='password'),
path('password_success', password_success, name='password_success'),
path('<int:pk>/user_profile', UserProfile.as_view(), name='user_profile'),
path('<int:pk>/edit_user_profile', EditUserProfile.as_view(), name='edit_user_profile'),
path('create_user_profile', CreateUserProfile.as_view(), name='create_user_profile'),
]
|
[
"ajeeethkumar.skr@gmail.com"
] |
ajeeethkumar.skr@gmail.com
|
43e2989d39a42e37d7741d5ca8c6153862f4876d
|
37be91337af68767906a776aaaf8ab2106f8e6a9
|
/dataAPI/dictionary/migrations/0003_auto_20200221_0958.py
|
c6e5566c0b15ea834670bb138399b352fd85f070
|
[] |
no_license
|
KelongZ/mysite
|
7962d64d202e46638e7e8647ddedfcc4855db040
|
e8b507d2c10cf3f7a947bcd3c951103f6db9eaf7
|
refs/heads/master
| 2020-12-08T14:29:44.320592
| 2020-03-31T11:25:14
| 2020-03-31T11:25:14
| 233,005,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Generated by Django 3.0.2 on 2020-02-21 01:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dictionary', '0002_configdata'),
]
operations = [
migrations.AlterField(
model_name='configsrc',
name='port',
field=models.IntegerField(verbose_name='端口号'),
),
]
|
[
"a313974633@163.com"
] |
a313974633@163.com
|
9e92bb4d6e710b562df16db9e0988f4b6e21ed35
|
557cebc2ca2d462bfed042e3d47f25549c7fdb47
|
/apps/ideas/migrations/0005_remove_idea_category.py
|
5a7aab7fe3a932594854d474ba86bd704b521ab0
|
[] |
no_license
|
jmshulett/django_web_development
|
b1964192025476077ca5d8ae12b66928a844ceea
|
3cb311f55624a97ae72702aa28d18aa289336fe1
|
refs/heads/main
| 2023-08-15T02:17:14.011696
| 2021-09-18T21:59:12
| 2021-09-18T21:59:12
| 407,973,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Generated by Django 3.0.14 on 2021-09-18 17:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ideas', '0004_copy_categories'),
]
operations = [
migrations.RemoveField(
model_name='idea',
name='category',
),
]
|
[
"jphulett1@buffS.wtamu.edu"
] |
jphulett1@buffS.wtamu.edu
|
b38c937fd5d4516fab51a890b7ef25abaeca0505
|
36cc6e08580c8085f93555cb74a3f3ae0bca87a5
|
/day8.0.py
|
2a506fa09d372dcf1d1f7ce0df63e86d3d2f9737
|
[
"MIT"
] |
permissive
|
dp1/AoC17
|
262074a46a6d0675e54b7422b9046d130b4c489f
|
7253c34f8d061144534ae21ff4310bce2fdfb5e0
|
refs/heads/master
| 2021-09-01T06:33:35.657942
| 2017-12-25T10:58:42
| 2017-12-25T10:58:42
| 112,944,220
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
with open("day8.txt", "r") as fin:
data = fin.read().strip().split('\n')
regs = {}
for l in data:
ll = l.split(' ')
reg, op, val, target, cmp, cval = ll[0], ll[1], int(ll[2]), ll[4], ll[5], int(ll[6])
if reg not in regs:
regs[reg] = 0
if target not in regs:
regs[target] = 0
ok = False
if cmp == '<' and regs[target] < cval:
ok = True
if cmp == '>' and regs[target] > cval:
ok = True
if cmp == '<=' and regs[target] <= cval:
ok = True
if cmp == '>=' and regs[target] >= cval:
ok = True
if cmp == '==' and regs[target] == cval:
ok = True
if cmp == '!=' and regs[target] != cval:
ok = True
if ok:
if op == 'inc':
regs[reg] += val
else:
regs[reg] -= val
print max(regs.items(), key=lambda x:x[1])
|
[
"dario.pk1@gmail.com"
] |
dario.pk1@gmail.com
|
6f8bddcfb3703b2be846835c4d7059aeb3b7b713
|
13c34615485f841cc3522dde99ccf5090da9a4d6
|
/tests/unit_tests/networks/tf/object_classification/__init__.py
|
db5aa25af4bc14c6f17e9ac6d263f68010c54af2
|
[] |
no_license
|
sallamander/dl-playground
|
3e2dcd88ec3beae67401511912f5a1d202842525
|
e05b2a15dd2925fca5206c2509e1da29c1806834
|
refs/heads/master
| 2021-06-12T13:52:48.302728
| 2019-05-29T15:54:34
| 2019-10-14T19:00:46
| 142,235,475
| 5
| 1
| null | 2019-08-28T21:32:21
| 2018-07-25T02:15:21
|
Python
|
UTF-8
|
Python
| false
| false
| 67
|
py
|
"""Unit tests for applications.tf.object_classification modules"""
|
[
"noreply@github.com"
] |
sallamander.noreply@github.com
|
6d0b522024a5885efbca8ba209c333c47ef8641e
|
bab4c9bebbfadd76c11dd022452c916bf4a3ad75
|
/analysis/region graph.py
|
507d00fa5c548263e0f275a5488e22fb2144e6ef
|
[] |
no_license
|
Y-Hyehye/project_safecovid
|
81aeab383d9674f10fc82aeb59fafe4f8215b8f1
|
d81795df47e90d1905d65c89dd144fe15d3f7e5a
|
refs/heads/master
| 2023-02-15T18:54:25.515100
| 2021-01-18T14:46:33
| 2021-01-18T14:46:33
| 330,680,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32,204
|
py
|
import urllib.request
import json
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import xmltodict
from google.cloud import storage
from firebase import firebase
import os
from os import path
# 한글폰트 적용
from matplotlib import font_manager, rc
import matplotlib
font_location="C:\Windows\Fonts\malgun.ttf"
font_name=font_manager.FontProperties(fname=font_location).get_name()
matplotlib.rc('font',family=font_name)
# 함수-자연수 백의 자리 내림, 올림
def rounddown(val) :
val = int(val / 100) * 100
return val
def roundup(val) :
val = val + 90
val = int(val / 10) * 10
return val
# 함수-자연수 일의 자리 내림, 올림
def unit_down(val) :
val = int(val / 10) * 10
return val
def unit_up(val) :
val = val + 9
val = int(val / 10) * 10
return val
# 함수-천의 자리 올림
def step_up(val) :
val = val + 900
val = int(val / 1000) * 1000
return val
# 코로나 시,도 발생 현황 OpenAPI 불러오기
url='http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson?serviceKey=pg8wihCfmYf9Euwqa0CoiZfui3IMOfk1kliZfV46KSIm3pDqCHQZBNVRyrNCGvbPYUYwsCmB5ULMPvlH2aT4Ag%3D%3D&pageNo=1&numOfRows=&startCreateDt=&endCreateDt=&'
request = urllib.request.Request(url)
response = urllib.request.urlopen(request)
rescode = response.getcode()
response_body = response.read()
data=response_body.decode('utf-8')
dict_data = xmltodict.parse(data)
json_data = json.dumps(dict_data)
result = json.loads(json_data)
rdata=result['response']['body']['items']['item']
# df(DataFrame)으로 정의
df=pd.DataFrame(rdata)
# 데이터 전처리 (원하는 데이터 추출)
df_safe=df[['gubun','defCnt','localOccCnt','overFlowCnt','deathCnt','incDec','isolClearCnt','qurRate','createDt']]
df_safe=df_safe.fillna(0)
df_safe['gubun']=df_safe['gubun'].astype(str)
df_safe['createDt']=df_safe['createDt'].astype(str)
df_safe=df_safe[df_safe.gubun != '검역']
df_safe=df_safe[df_safe.gubun != '합계']
# 날짜 데이터 값 변경하기
date=[]
for one in df_safe['createDt']:
date.append(one[0:10])
df_safe['createDt']=date
df_safe['createDt'] = df_safe['createDt'].str.replace(pat=r'[-]', repl= r'', regex=True)
df_safe['qurRate'] = df_safe['qurRate'].str.replace(pat=r'[-]', repl= r'0', regex=True)
# 형변환
df_safe['defCnt']=df_safe['defCnt'].astype(int)
df_safe['localOccCnt']=df_safe['localOccCnt'].astype(int)
df_safe['overFlowCnt']=df_safe['overFlowCnt'].astype(int)
df_safe['deathCnt']=df_safe['deathCnt'].astype(int)
df_safe['incDec']=df_safe['incDec'].astype(int)
df_safe['isolClearCnt']=df_safe['isolClearCnt'].astype(int)
df_safe['qurRate']=df_safe['qurRate'].astype(float)
df_safe['createDt']=df_safe['createDt'].astype(int)
# 10개의 지역으로 DataFrame 나누기
df_safe_seoul=df_safe[df_safe['gubun'].str.contains('서울')]
df_safe_gg=df_safe[df_safe['gubun'].str.contains('경기|인천')]
df_safe_chungnam=df_safe[df_safe['gubun'].str.contains('충남|대전|세종')]
df_safe_chungbuk=df_safe[df_safe['gubun'].str.contains('충북')]
df_safe_jeonnam=df_safe[df_safe['gubun'].str.contains('전남|광주')]
df_safe_jeonbuk=df_safe[df_safe['gubun'].str.contains('전북')]
df_safe_gyeongnam=df_safe[df_safe['gubun'].str.contains('경남|부산|울산')]
df_safe_gb=df_safe[df_safe['gubun'].str.contains('경북|대구')]
df_safe_gangwon=df_safe[df_safe['gubun'].str.contains('강원')]
df_safe_jeju=df_safe[df_safe['gubun'].str.contains('제주')]
# 서울
# 서울 df 전처리
df_safe_seoul=df_safe_seoul.groupby('createDt', sort=True).head(1)
df_safe_seoul_week=df_safe_seoul.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_seoul_month=df_safe_seoul_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_seoul_day=df_safe_seoul.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_seoul_month=df_safe_seoul_month.sort_values(by='createDt', ascending=True)
df_safe_seoul_day=df_safe_seoul_day.sort_values(by='createDt', ascending=True)
df_safe_seoul_month['createDt']=df_safe_seoul_month['createDt'].astype(str)
# 서울 확진자수 그래프 yticks 값 조정
start_num=df_safe_seoul_month.iloc[0,1]
end_num=df_safe_seoul_month.iloc[-1,1]
start_num=rounddown(start_num)-500
end_num=roundup(end_num)+1000
# figure 기본 설정(크기 조정)
fig = plt.figure(figsize=(8, 6))
fig.subplots_adjust(left=0.1, right=0.9, top=0.94, bottom=0.1)
fig.patch.set_alpha(0)
ax = fig.add_subplot()
# 서울 확진자수 그래프 출력
line_plot=ax.plot(df_safe_seoul_month.createDt,df_safe_seoul_month['defCnt'], color='#FF5A5A', linewidth=2, marker='o',markersize=5, alpha=.75 )
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=1000))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+100 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_seoul_1.png')
plt.clf()
# 서울 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_seoul_day_c=df_safe_seoul_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_seoul_day_c['localOccCnt'].idxmax()
end_num_c=(df_safe_seoul_day_c['localOccCnt'][end_num_idx])+50
# 서울 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_seoul_day.createDt))
plt.bar(x-0.0, df_safe_seoul_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_seoul_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_seoul_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=50))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_seoul_2.png')
plt.clf()
# 경기도
# 경기도 df 전처리
df_safe_gg=df_safe_gg.groupby('createDt', sort=True).head(1)
df_safe_gg_week=df_safe_gg.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gg_month=df_safe_gg_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gg_day=df_safe_gg.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_gg_month=df_safe_gg_month.sort_values(by='createDt', ascending=True)
df_safe_gg_day=df_safe_gg_day.sort_values(by='createDt', ascending=True)
df_safe_gg_month['createDt']=df_safe_gg_month['createDt'].astype(str)
# 경기도 확진자수 그래프 yticks 값 조정
start_num=df_safe_gg_month.iloc[0,1]
end_num=df_safe_gg_month.iloc[-1,1]
start_num=roundup(start_num)-100
end_num=rounddown(end_num)+800
# 경기도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_gg_month.createDt,df_safe_gg_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=800))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+68 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_gg_1.png', dpi=300)
plt.clf()
# 경기도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_gg_day_c=df_safe_gg_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_gg_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_gg_day_c['localOccCnt'][end_num_idx]+50
# 경기도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_gg_day.createDt))
plt.bar(x-0.0, df_safe_gg_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_gg_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_gg_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=40))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_gg_2.png', dpi=300)
plt.clf()
# 충청남도
# 충청남도 df 전처리
df_safe_chungnam=df_safe_chungnam.groupby('createDt', sort=True).head(1)
df_safe_chungnam_week=df_safe_chungnam.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_chungnam_month=df_safe_chungnam_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_chungnam_day=df_safe_chungnam.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_chungnam_month=df_safe_chungnam_month.sort_values(by='createDt', ascending=True)
df_safe_chungnam_day=df_safe_chungnam_day.sort_values(by='createDt', ascending=True)
df_safe_chungnam_month['createDt']=df_safe_chungnam_month['createDt'].astype(str)
# 충청남도 확진자수 그래프 yticks 값 조정
start_num=df_safe_chungnam_month.iloc[0,1]
end_num=df_safe_chungnam_month.iloc[-1,1]
start_num=roundup(start_num)-100
end_num=roundup(end_num)
# 충청남도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_chungnam_month.createDt,df_safe_chungnam_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=100))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+9 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_chungnam_1.png', dpi=300)
plt.clf()
# 충청남도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_chungnam_day_c=df_safe_chungnam_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_chungnam_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_chungnam_day_c['localOccCnt'][end_num_idx]+10
# 충청남도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_chungnam_day.createDt))
plt.bar(x-0.0, df_safe_chungnam_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_chungnam_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_chungnam_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=5))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_chungnam_2.png', dpi=300)
plt.clf()
# 충청북도
# 충청북도 df 전처리
df_safe_chungbuk=df_safe_chungbuk.groupby('createDt', sort=True).head(1)
df_safe_chungbuk_week=df_safe_chungbuk.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_chungbuk_month=df_safe_chungbuk_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_chungbuk_day=df_safe_chungbuk.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_chungbuk_month=df_safe_chungbuk_month.sort_values(by='createDt', ascending=True)
df_safe_chungbuk_day=df_safe_chungbuk_day.sort_values(by='createDt', ascending=True)
df_safe_chungbuk_month['createDt']=df_safe_chungbuk_month['createDt'].astype(str)
# 충청북도 확진자수 그래프 yticks 값 조정
start_num=df_safe_chungbuk_month.iloc[0,1]
end_num=df_safe_chungbuk_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+50
# 충청북도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_chungbuk_month.createDt,df_safe_chungbuk_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=50))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+6.3 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_chungbuk_1.png', dpi=300)
plt.clf()
# 충청북도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_chungbuk_day_c=df_safe_chungbuk_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_chungbuk_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_chungbuk_day_c['localOccCnt'][end_num_idx]+5
# 충청북도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_chungbuk_day.createDt))
plt.bar(x-0.0, df_safe_chungbuk_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_chungbuk_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_chungbuk_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=5))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_chungbuk_2.png', dpi=300)
plt.clf()
# 전라남도
# 전라남도 df 전처리
df_safe_jeonnam=df_safe_jeonnam.groupby('createDt', sort=True).head(1)
df_safe_jeonnam_week=df_safe_jeonnam.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeonnam_month=df_safe_jeonnam_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeonnam_day=df_safe_jeonnam.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_jeonnam_month=df_safe_jeonnam_month.sort_values(by='createDt', ascending=True)
df_safe_jeonnam_day=df_safe_jeonnam_day.sort_values(by='createDt', ascending=True)
df_safe_jeonnam_month['createDt']=df_safe_jeonnam_month['createDt'].astype(str)
# 전라남도 확진자수 그래프 yticks 값 조정
start_num=df_safe_jeonnam_month.iloc[0,1]
end_num=df_safe_jeonnam_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+60
# 전라남도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_jeonnam_month.createDt,df_safe_jeonnam_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=60))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+5 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_jeonnam_1.png', dpi=300)
plt.clf()
# 전라남도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_jeonnam_day_c=df_safe_jeonnam_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_jeonnam_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_jeonnam_day_c['localOccCnt'][end_num_idx]+4
# 전라남도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_jeonnam_day.createDt))
plt.bar(x-0.0, df_safe_jeonnam_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_jeonnam_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_jeonnam_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=2))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_jeonnam_2.png', dpi=300)
plt.clf()
# 전라북도
# 전라북도 df 전처리
df_safe_jeonbuk=df_safe_jeonbuk.groupby('createDt', sort=True).head(1)
df_safe_jeonbuk_week=df_safe_jeonbuk.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeonbuk_month=df_safe_jeonbuk_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeonbuk_day=df_safe_jeonbuk.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_jeonbuk_month=df_safe_jeonbuk_month.sort_values(by='createDt', ascending=True)
df_safe_jeonbuk_day=df_safe_jeonbuk_day.sort_values(by='createDt', ascending=True)
df_safe_jeonbuk_month['createDt']=df_safe_jeonbuk_month['createDt'].astype(str)
# 전라북도 확진자수 그래프 yticks 값 조정
start_num=df_safe_jeonbuk_month.iloc[0,1]
end_num=df_safe_jeonbuk_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+80
# 전라북도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_jeonbuk_month.createDt,df_safe_jeonbuk_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=80))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+5 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_jeonbuk_1.png', dpi=300)
plt.clf()
# 전라북도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_jeonbuk_day_c=df_safe_jeonbuk_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_jeonbuk_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_jeonbuk_day_c['localOccCnt'][end_num_idx]+5
# 전라북도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_jeonbuk_day.createDt))
plt.bar(x-0.0, df_safe_jeonbuk_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_jeonbuk_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_jeonbuk_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=5))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_jeonbuk_2.png', dpi=300)
plt.clf()
# 경상남도
# 경상남도 df 전처리
df_safe_gyeongnam=df_safe_gyeongnam.groupby('createDt', sort=True).head(1)
df_safe_gyeongnam_week=df_safe_gyeongnam.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gyeongnam_month=df_safe_gyeongnam_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gyeongnam_day=df_safe_gyeongnam.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_gyeongnam_month=df_safe_gyeongnam_month.sort_values(by='createDt', ascending=True)
df_safe_gyeongnam_day=df_safe_gyeongnam_day.sort_values(by='createDt', ascending=True)
df_safe_gyeongnam_month['createDt']=df_safe_gyeongnam_month['createDt'].astype(str)
# 경상남도 확진자수 그래프 yticks 값 조정
start_num=df_safe_gyeongnam_month.iloc[0,1]
end_num=df_safe_gyeongnam_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+100
# 경상남도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_gyeongnam_month.createDt,df_safe_gyeongnam_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=100))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+7 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_gyeongnam_1.png', dpi=300)
plt.clf()
# 경상남도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_gyeongnam_day_c=df_safe_gyeongnam_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_gyeongnam_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_gyeongnam_day_c['localOccCnt'][end_num_idx]+10
# 경상남도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_gyeongnam_day.createDt))
plt.bar(x-0.0, df_safe_gyeongnam_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_gyeongnam_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_gyeongnam_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=10))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_gyeongnam_2.png', dpi=300)
plt.clf()
# 경상북도
# 경상북도 df 전처리
df_safe_gb=df_safe_gb.groupby('createDt', sort=True).head(1)
df_safe_gb_week=df_safe_gb.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gb_month=df_safe_gb_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gb_day=df_safe_gb.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_gb_month=df_safe_gb_month.sort_values(by='createDt', ascending=True)
df_safe_gb_day=df_safe_gb_day.sort_values(by='createDt', ascending=True)
df_safe_gb_month['createDt']=df_safe_gb_month['createDt'].astype(str)
# 경상북도 확진자수 그래프 yticks 값 조정
start_num=df_safe_gb_month.iloc[0,1]
end_num=df_safe_gb_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+50
# 경상북도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_gb_month.createDt,df_safe_gb_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=50))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+4 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_gb_1.png', dpi=300)
plt.clf()
# 경상북도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_gb_day_c=df_safe_gb_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_gb_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_gb_day_c['localOccCnt'][end_num_idx]+4
# 경상북도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_gb_day.createDt))
plt.bar(x-0.0, df_safe_gb_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_gb_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_gb_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=3))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_gb_2.png', dpi=300)
plt.clf()
# 강원도
# 강원도 df 전처리
df_safe_gangwon=df_safe_gangwon.groupby('createDt', sort=True).head(1)
df_safe_gangwon_week=df_safe_gangwon.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gangwon_month=df_safe_gangwon_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_gangwon_day=df_safe_gangwon.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_gangwon_month=df_safe_gangwon_month.sort_values(by='createDt', ascending=True)
df_safe_gangwon_day=df_safe_gangwon_day.sort_values(by='createDt', ascending=True)
df_safe_gangwon_month['createDt']=df_safe_gangwon_month['createDt'].astype(str)
# 강원도 확진자수 그래프 yticks 값 조정
start_num=df_safe_gangwon_month.iloc[0,1]
end_num=df_safe_gangwon_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+100
# 강원도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_gangwon_month.createDt,df_safe_gangwon_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=100))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+7.5 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_gangwon_1.png', dpi=300)
plt.clf()
# 강원도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_gangwon_day_c=df_safe_gangwon_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_gangwon_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_gangwon_day_c['localOccCnt'][end_num_idx]+8
# 강원도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_gangwon_day.createDt))
plt.bar(x-0.0, df_safe_gangwon_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_gangwon_day.overFlowCnt, label='해외유입수', width=0.2,color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_gangwon_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=4))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_gangwon_2.png', dpi=300)
plt.clf()
# 제주도
# 제주도 df 전처리
df_safe_jeju=df_safe_jeju.groupby('createDt', sort=True).head(1)
df_safe_jeju_week=df_safe_jeju.iloc[::7] # 일주일 단위로 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeju_month=df_safe_jeju_week.head(5) # 일주일 단위로 한달(5주) 데이터 추출 (누적 확진자수 그래프에 사용)
df_safe_jeju_day=df_safe_jeju.head(5) # 하루 단위로 5일 데이터 추출 (지역발생수, 해외유입수 그래프에 사용)
df_safe_jeju_month=df_safe_jeju_month.sort_values(by='createDt', ascending=True)
df_safe_jeju_day=df_safe_jeju_day.sort_values(by='createDt', ascending=True)
df_safe_jeju_month['createDt']=df_safe_jeju_month['createDt'].astype(str)
# 제주도 확진자수 그래프 yticks 값 조정
start_num=df_safe_jeju_month.iloc[0,1]
end_num=df_safe_jeju_month.iloc[-1,1]
start_num=unit_down(start_num)
end_num=unit_up(end_num)+10
# 제주도 확진자수 그래프 출력
ax = fig.add_subplot()
line_plot=ax.plot(df_safe_jeju_month.createDt,df_safe_jeju_month['defCnt'], color='#FF5A5A', alpha=.75, linewidth=2, marker='o',markersize=5)
line_plot=line_plot[0]
plt.yticks(np.arange(start_num, end_num, step=10))
plt.xlabel('날짜 (단위: 주)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
for coord in list(line_plot.get_xydata()):
ax.text(coord[0],coord[1]+0.7 ,f'{int(coord[1])}',fontsize=11, ha='center', color='#FF5A5A')
plt.savefig('images/fig_jeju_1.png', dpi=300)
plt.clf()
# 제주도 지역발생수, 해외유입수 그래프 yticks 값 조정
df_safe_jeju_day_c=df_safe_jeju_day.sort_values(by='localOccCnt', ascending=True)
end_num_idx=df_safe_jeju_day_c['localOccCnt'].idxmax()
end_num_c=df_safe_jeju_day_c['localOccCnt'][end_num_idx]+4
# 제주도 지역발생수, 해외유입수 그래프 출력
x=np.arange(len(df_safe_jeju_day.createDt))
plt.bar(x-0.0, df_safe_jeju_day.localOccCnt, label='지역발생수', width=0.2, color='#ff8a3c', alpha=.75)
plt.bar(x+0.2, df_safe_jeju_day.overFlowCnt, label='해외유입수', width=0.2, color='#3a8d65', alpha=.75)
plt.xticks(x, df_safe_jeju_day.createDt)
plt.yticks(np.arange(0, end_num_c, step=2))
plt.xlabel('날짜 (단위: 일)')
plt.ylabel('명', position=(0,1.05), verticalalignment='top', horizontalalignment='left', rotation='horizontal')
plt.legend()
plt.savefig('images/fig_jeju_2.png', dpi=300)
# Firebase Storage 연결
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="mykey.json"
firebase = firebase.FirebaseApplication('https://project-48b89.firebaseio.com/')
client = storage.Client()
bucket = client.get_bucket('project-48b89.appspot.com')
imageBlob = bucket.blob("/")
# Storage에 저장된 그래프 사진파일 저장
# 서울 그래프
imagePath_s1 = "images/fig_seoul_1.png"
imageBlob_s1 = bucket.blob("seoul1.png")
imageBlob_s1.upload_from_filename(imagePath_s1)
imagePath_s2 = "images/fig_seoul_2.png"
imageBlob_s2 = bucket.blob("seoul2.png")
imageBlob_s2.upload_from_filename(imagePath_s2)
# 경기도 그래프
imagePath_g1 = "images/fig_gg_1.png"
imageBlob_g1 = bucket.blob("gyeounggi1.png")
imageBlob_g1.upload_from_filename(imagePath_g1)
imagePath_g2 = "images/fig_gg_2.png"
imageBlob_g2 = bucket.blob("gyeounggi2.png")
imageBlob_g2.upload_from_filename(imagePath_g2)
# 충청남도 그래프
imagePath_cn1 = "images/fig_chungnam_1.png"
imageBlob_cn1 = bucket.blob("chungnam1.png")
imageBlob_cn1.upload_from_filename(imagePath_cn1)
imagePath_cn2 = "images/fig_chungnam_2.png"
imageBlob_cn2 = bucket.blob("chungnam2.png")
imageBlob_cn2.upload_from_filename(imagePath_cn2)
# 충청북도 그래프
imagePath_cb1 = "images/fig_chungbuk_1.png"
imageBlob_cb1 = bucket.blob("chungbuk1.png")
imageBlob_cb1.upload_from_filename(imagePath_cb1)
imagePath_cb2 = "images/fig_chungbuk_2.png"
imageBlob_cb2 = bucket.blob("chungbuk2.png")
imageBlob_cb2.upload_from_filename(imagePath_cb2)
# 전라남도 그래프
imagePath_jn1 = "images/fig_jeonnam_1.png"
imageBlob_jn1 = bucket.blob("jeonnam1.png")
imageBlob_jn1.upload_from_filename(imagePath_jn1)
imagePath_jn2 = "images/fig_jeonnam_2.png"
imageBlob_jn2 = bucket.blob("jeonnam2.png")
imageBlob_jn2.upload_from_filename(imagePath_jn2)
# 전라북도 그래프
imagePath_jb1 = "images/fig_jeonbuk_1.png"
imageBlob_jb1 = bucket.blob("jeonbuk1.png")
imageBlob_jb1.upload_from_filename(imagePath_jb1)
imagePath_jb2 = "images/fig_jeonbuk_2.png"
imageBlob_jb2 = bucket.blob("jeonbuk2.png")
imageBlob_jb2.upload_from_filename(imagePath_jb2)
# 경상남도 그래프
imagePath_gn1 = "images/fig_gyeongnam_1.png"
imageBlob_gn1 = bucket.blob("gyeongnam1.png")
imageBlob_gn1.upload_from_filename(imagePath_gn1)
imagePath_gn2 = "images/fig_gyeongnam_2.png"
imageBlob_gn2 = bucket.blob("gyeongnam2.png")
imageBlob_gn2.upload_from_filename(imagePath_gn2)
# 경상북도 그래프
imagePath_gb1 = "images/fig_gb_1.png"
imageBlob_gb1 = bucket.blob("gyeongbuk1.png")
imageBlob_gb1.upload_from_filename(imagePath_gb1)
imagePath_gb2 = "images/fig_gb_2.png"
imageBlob_gb2 = bucket.blob("gyeongbuk2.png")
imageBlob_gb2.upload_from_filename(imagePath_gb2)
# 강원도 그래프
imagePath_gw1 = "images/fig_gangwon_1.png"
imageBlob_gw1 = bucket.blob("gangwon1.png")
imageBlob_gw1.upload_from_filename(imagePath_gw1)
imagePath_gw2 = "images/fig_gangwon_2.png"
imageBlob_gw2 = bucket.blob("gangwon2.png")
imageBlob_gw2.upload_from_filename(imagePath_gw2)
# 제주도 그래프
imagePath_j1 = "images/fig_jeju_1.png"
imageBlob_j1 = bucket.blob("jeju1.png")
imageBlob_j1.upload_from_filename(imagePath_j1)
imagePath_j2 = "images/fig_jeju_2.png"
imageBlob_j2 = bucket.blob("jeju2.png")
imageBlob_j2.upload_from_filename(imagePath_j2)
|
[
"yang_hyeji1@daum.net"
] |
yang_hyeji1@daum.net
|
c012f9c59f940bc7e0291d153248da24b9b21db3
|
372558f4337c539871e2e25fcaa06c45041a700e
|
/userInterface.py
|
b4733d79c954769bdfd048ac726fb2b350f69af4
|
[] |
no_license
|
kubicius/blockchain
|
452be7990be91f322acceafdfba7f2664cd2311c
|
bf4368975e6f2198d90c0a3b1c8005503b3ae851
|
refs/heads/main
| 2023-04-12T17:42:52.096126
| 2021-05-05T20:44:46
| 2021-05-05T20:44:46
| 364,521,496
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
from fastapi.templating import Jinja2Templates
class UserInterface:
"""
Class preparing html templates.
"""
def __init__(self):
self.templates = Jinja2Templates(directory="templates/")
def show_template(self, request, result):
return self.templates.TemplateResponse('default.html', context={'request': request, 'result': result})
def prepare_all_records(self, blockchain, filter, id):
result = ""
if filter:
result += "<div class='row filter'>"
result += "<form method='GET'>"
result += "<div class='col'>"
result += "<label for='id'>Person ID:</label>"
result += "<input type='hidden' name='filter' value='True'>"
result += "<input id='id' name='id' required>"
result += "<input type='submit' value='Filter'>"
result += "</div>"
result += "</form>"
result += "</div>"
result += "<div class='row'>"
for label in ['person_id', 'person_name', 'doctor', 'report', 'medicine']:
result += "<div class='col result__label'>"
result += "<span>" + label + "</span>"
result += "</div>"
result += "</div>"
for block in blockchain.chain:
t = block.transactions
if len(block.transactions) == 1:
if filter == False or block.transactions[0]['person_id'] == id:
result += "<div class='row'>"
for t in block.transactions[0]:
result += "<div class='col result__data'>"
result += "<span>"+str(block.transactions[0][t])+"</span>"
result += "</div>"
result += "</div>"
return result
def prepare_mine_result(self, fired, mined):
result = ""
if fired:
if mined:
result = "<p>Mined block: " + str(mined) + "</p>"
else:
result = "<p>Nothing to mine.</p>"
result += "<button onclick='window.location.href=\"/mine?fired=True\"'>MINE!</button>"
return result
|
[
"kskubicius@gmail.com"
] |
kskubicius@gmail.com
|
d45c64018c5559929a424479f55941ef74fffbfb
|
84379e15e54ba79b7e63c1fceecf712b46f22977
|
/apps/cards/migrations/0052_auto_20200516_1545.py
|
e7249b0638f24725e1ab93e5956e11c91eed3149
|
[] |
no_license
|
CoderEnko007/HearthStoneStationBackend
|
a1d74c324233ebd617ad01df13bc609d1f1aa2f6
|
6cc92cb806f19f2a2a0596645028cfe2fa5895d6
|
refs/heads/master
| 2022-12-11T23:20:24.335737
| 2022-09-18T07:04:08
| 2022-09-18T07:04:08
| 144,392,864
| 0
| 0
| null | 2022-12-08T02:22:42
| 2018-08-11T14:40:48
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
# Generated by Django 2.0.4 on 2020-05-16 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0051_hscards_update_time'),
]
operations = [
migrations.AddField(
model_name='hsbattlegroundcards',
name='ename',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='英文名称'),
),
migrations.AlterField(
model_name='hsbattlegroundcards',
name='name',
field=models.CharField(max_length=100, verbose_name='中文名称'),
),
]
|
[
"yf381966217@163.com"
] |
yf381966217@163.com
|
114faf3399990d6091ca6561a042f283688d53cc
|
085148f472eb07a565df7ea513b90ec84270d40a
|
/petalapp/config.py
|
3623889fee5098390e9b4ced2860cbd81297cf3a
|
[] |
no_license
|
drewverlee/petal
|
165af8073f01afc9230534b73128cc5fb8ccd631
|
cde63d27af7d6059c102a9980aeb442ea21eda22
|
refs/heads/master
| 2021-01-19T10:26:16.627874
| 2014-01-11T16:39:29
| 2014-01-11T16:39:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 971
|
py
|
'''
File: config.py
Date: 2012-11
Author: Drew Verlee
Description: configuration setup to handle aws,heroku, local machine,
database, etc...
'''
import os
# flask
PORT = int(os.environ.get("PORT", 5000))
basedir = str(os.path.abspath(os.path.dirname(__file__)))
SECRET_KEY = str(os.environ.get("APP_SECRET_KEY"))
DEBUG = str(os.environ.get("DEBUG"))
ALLOWED_EXTENSIONS = str(os.environ.get("ALLOWED_EXTENSIONS"))
CSRF_ENABLED = True
TESTING = os.environ.get("TESTING", False)
#database, TODO add local db?
SQLALCHEMY_DATABASE_URI = str(os.environ.get("DATABASE_URL"))
SQLALCHEMY_MIGRATE_REPO = str(os.path.join(basedir, 'database/db_repository'))
# S3
AWS_ACCESS_KEY_ID = str(os.environ.get("AWS_ACCESS_KEY_ID"))
AWS_SECRET_ACCESS_KEY = str(os.environ.get("AWS_SECRET_ACCESS_KEY"))
S3_BUCKET = str(os.environ.get("S3_BUCKET"))
S3_UPLOAD_DIRECTORY = str(os.environ.get("S3_UPLOAD_DIRECTORY"))
# browser id
BROWERID_LOGIN_URL = "/login"
BROWERID_LOGOUT_URL = "/logout"
|
[
"Drew.verlee@gmail.com"
] |
Drew.verlee@gmail.com
|
292a7efec1a7e17418ca697c6361e6cd703ed8b4
|
621a40fa363dc0c32c96a4c8fdfe9142877e2ff1
|
/ietf/sync/mails.py
|
a58894e3d1cc66becadded97bb3aca98e9b74879
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
omunroe-com/ietfdb2
|
d9c40bebe4b25059f810c70dd1370cca30cb3c36
|
aeaae292fbd55aca1b6043227ec105e67d73367f
|
refs/heads/master
| 2020-04-04T21:05:56.067430
| 2018-11-05T09:08:27
| 2018-11-05T09:08:27
| 156,273,382
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
from django.urls import reverse as urlreverse
from django.conf import settings
from ietf.utils.mail import send_mail
from ietf.sync.discrepancies import find_discrepancies
def email_discrepancies(receivers):
sections = find_discrepancies()
send_mail(None,
receivers,
None,
"Datatracker Sync Discrepancies Report",
"sync/discrepancies_report.txt",
dict(sections=sections,
url=settings.IDTRACKER_BASE_URL + urlreverse("ietf.sync.views.discrepancies"),
base_url=settings.IDTRACKER_BASE_URL,
))
|
[
"henrik@levkowetz.com"
] |
henrik@levkowetz.com
|
537854fc42c02d114a9c6a582dd68c42475254ce
|
ec87588d0032c551a114b27c9491582d4191889c
|
/Week_1/1068.py
|
dd4c0bebcc6c207599bd6c772d69a24e3e157332
|
[] |
no_license
|
MurylloEx/Data-Structures-and-Algorithms
|
0f28de436debb25acc1ae36acb88b12d1bd7b140
|
6a9c38ce5e925ac1d221a7cc7405daf5d46b43ed
|
refs/heads/master
| 2021-02-19T17:33:18.288885
| 2020-11-13T15:08:34
| 2020-11-13T15:08:34
| 245,316,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
while True:
try:
expr = input()
if (expr not in '\n\r'):
raw_expr = str()
for idx in range(0, len(expr)):
if (expr[idx] in '()'):
raw_expr += expr[idx]
if (len(raw_expr) % 2 == 1):
print('incorrect')
else:
strlen = int(len(raw_expr)/2)
for idx in range(0, strlen):
raw_expr = raw_expr.replace('()', '')
print('correct' if raw_expr == '' else 'incorrect')
else:
break
except EOFError:
break
|
[
"noreply@github.com"
] |
MurylloEx.noreply@github.com
|
b442671190ebc5c0787131c5aab2a40f6a028ce6
|
3b239e588f2ca6e49a28a63d906dd8dd26173f88
|
/code/play_train_eval_q.py
|
098d7e50fdc67e62c6665a9b62a6477517ed596a
|
[] |
no_license
|
Angi16/deep_learning_and_the_game_of_go
|
3bbf4f075f41359b87cb06fe01b4c7af85837c18
|
ba63d5e3f60ec42fa1088921ecf93bdec641fd04
|
refs/heads/master
| 2020-03-23T16:02:47.431241
| 2018-07-21T02:57:16
| 2018-07-21T02:57:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,494
|
py
|
import argparse
import datetime
import multiprocessing
import os
import random
import shutil
import time
import tempfile
from collections import namedtuple
import h5py
import numpy as np
from dlgo import kerasutil
from dlgo import scoring
from dlgo import rl
from dlgo.goboard_fast import GameState, Player, Point
def load_agent(filename):
with h5py.File(filename, 'r') as h5file:
return rl.load_q_agent(h5file)
COLS = 'ABCDEFGHJKLMNOPQRST'
STONE_TO_CHAR = {
None: '.',
Player.black: 'x',
Player.white: 'o',
}
def avg(items):
if not items:
return 0.0
return sum(items) / float(len(items))
def print_board(board):
for row in range(board.num_rows, 0, -1):
line = []
for col in range(1, board.num_cols + 1):
stone = board.get(Point(row=row, col=col))
line.append(STONE_TO_CHAR[stone])
print('%2d %s' % (row, ''.join(line)))
print(' ' + COLS[:board.num_cols])
class GameRecord(namedtuple('GameRecord', 'moves winner margin')):
pass
def name(player):
if player == Player.black:
return 'B'
return 'W'
def simulate_game(black_player, white_player, board_size):
moves = []
game = GameState.new_game(board_size)
agents = {
Player.black: black_player,
Player.white: white_player,
}
while not game.is_over():
next_move = agents[game.next_player].select_move(game)
moves.append(next_move)
game = game.apply_move(next_move)
print_board(game.board)
game_result = scoring.compute_game_result(game)
print(game_result)
return GameRecord(
moves=moves,
winner=game_result.winner,
margin=game_result.winning_margin,
)
def get_temp_file():
fd, fname = tempfile.mkstemp(prefix='dlgo-train')
os.close(fd)
return fname
def do_self_play(board_size, agent1_filename, agent2_filename,
num_games, temperature,
experience_filename,
gpu_frac):
kerasutil.set_gpu_memory_target(gpu_frac)
random.seed(int(time.time()) + os.getpid())
np.random.seed(int(time.time()) + os.getpid())
agent1 = load_agent(agent1_filename)
agent1.set_temperature(temperature)
agent2 = load_agent(agent2_filename)
agent2.set_temperature(temperature)
collector1 = rl.ExperienceCollector()
color1 = Player.black
for i in range(num_games):
print('Simulating game %d/%d...' % (i + 1, num_games))
collector1.begin_episode()
agent1.set_collector(collector1)
if color1 == Player.black:
black_player, white_player = agent1, agent2
else:
white_player, black_player = agent1, agent2
game_record = simulate_game(black_player, white_player, board_size)
if game_record.winner == color1:
print('Agent 1 wins.')
collector1.complete_episode(reward=1)
else:
print('Agent 2 wins.')
collector1.complete_episode(reward=-1)
color1 = color1.other
experience = rl.combine_experience([collector1])
print('Saving experience buffer to %s\n' % experience_filename)
with h5py.File(experience_filename, 'w') as experience_outf:
experience.serialize(experience_outf)
def generate_experience(learning_agent, reference_agent, exp_file,
num_games, board_size, num_workers, temperature):
experience_files = []
workers = []
gpu_frac = 0.95 / float(num_workers)
games_per_worker = num_games // num_workers
for i in range(num_workers):
filename = get_temp_file()
experience_files.append(filename)
worker = multiprocessing.Process(
target=do_self_play,
args=(
board_size,
learning_agent,
reference_agent,
games_per_worker,
temperature,
filename,
gpu_frac,
)
)
worker.start()
workers.append(worker)
# Wait for all workers to finish.
print('Waiting for workers...')
for worker in workers:
worker.join()
# Merge experience buffers.
print('Merging experience buffers...')
first_filename = experience_files[0]
other_filenames = experience_files[1:]
with h5py.File(first_filename, 'r') as expf:
combined_buffer = rl.load_experience(expf)
for filename in other_filenames:
with h5py.File(filename, 'r') as expf:
next_buffer = rl.load_experience(expf)
combined_buffer = rl.combine_experience([combined_buffer, next_buffer])
print('Saving into %s...' % exp_file)
with h5py.File(exp_file, 'w') as experience_outf:
combined_buffer.serialize(experience_outf)
# Clean up.
for fname in experience_files:
os.unlink(fname)
def train_worker(learning_agent, output_file, experience_file,
lr, batch_size):
learning_agent = load_agent(learning_agent)
with h5py.File(experience_file, 'r') as expf:
exp_buffer = rl.load_experience(expf)
learning_agent.train(exp_buffer, lr=lr, batch_size=batch_size)
with h5py.File(output_file, 'w') as updated_agent_outf:
learning_agent.serialize(updated_agent_outf)
def train_on_experience(learning_agent, output_file, experience_file,
lr, batch_size):
# Do the training in the background process. Otherwise some Keras
# stuff gets initialized in the parent, and later that forks, and
# that messes with the workers.
worker = multiprocessing.Process(
target=train_worker,
args=(
learning_agent,
output_file,
experience_file,
lr,
batch_size
)
)
worker.start()
worker.join()
def play_games(args):
agent1_fname, agent2_fname, num_games, board_size, gpu_frac, temperature = args
kerasutil.set_gpu_memory_target(gpu_frac)
random.seed(int(time.time()) + os.getpid())
np.random.seed(int(time.time()) + os.getpid())
agent1 = load_agent(agent1_fname)
agent1.set_temperature(temperature)
agent2 = load_agent(agent2_fname)
agent2.set_temperature(temperature)
wins, losses = 0, 0
color1 = Player.black
for i in range(num_games):
print('Simulating game %d/%d...' % (i + 1, num_games))
if color1 == Player.black:
black_player, white_player = agent1, agent2
else:
white_player, black_player = agent1, agent2
game_record = simulate_game(black_player, white_player, board_size)
if game_record.winner == color1:
print('Agent 1 wins')
wins += 1
else:
print('Agent 2 wins')
losses += 1
print('Agent 1 record: %d/%d' % (wins, wins + losses))
color1 = color1.other
return (wins, losses)
def evaluate(learning_agent, reference_agent,
num_games, num_workers, board_size,
temperature):
games_per_worker = num_games // num_workers
gpu_frac = 0.95 / float(num_workers)
pool = multiprocessing.Pool(num_workers)
worker_args = [
(
learning_agent, reference_agent,
games_per_worker, board_size, gpu_frac,
temperature,
)
for _ in range(num_workers)
]
game_results = pool.map(play_games, worker_args)
total_wins, total_losses = 0, 0
for wins, losses in game_results:
total_wins += wins
total_losses += losses
print('FINAL RESULTS:')
print('Learner: %d' % total_wins)
print('Refrnce: %d' % total_losses)
pool.close()
pool.join()
return total_wins
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--agent', required=True)
parser.add_argument('--games-per-batch', '-g', type=int, default=1000)
parser.add_argument('--work-dir', '-d')
parser.add_argument('--num-workers', '-w', type=int, default=1)
parser.add_argument('--temperature', '-t', type=float, default=0.0)
parser.add_argument('--board-size', '-b', type=int, default=19)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--bs', type=int, default=512)
parser.add_argument('--log-file', '-l')
args = parser.parse_args()
logf = open(args.log_file, 'a')
logf.write('----------------------\n')
logf.write('Starting from %s at %s\n' % (
args.agent, datetime.datetime.now()))
temp_decay = 0.98
min_temp = 0.01
temperature = args.temperature
learning_agent = args.agent
reference_agent = args.agent
experience_file = os.path.join(args.work_dir, 'exp_temp.hdf5')
tmp_agent = os.path.join(args.work_dir, 'agent_temp.hdf5')
working_agent = os.path.join(args.work_dir, 'agent_cur.hdf5')
total_games = 0
while True:
print('Reference: %s' % (reference_agent,))
logf.write('Total games so far %d\n' % (total_games,))
generate_experience(
learning_agent, reference_agent,
experience_file,
num_games=args.games_per_batch,
board_size=args.board_size,
num_workers=args.num_workers,
temperature=temperature)
train_on_experience(
learning_agent, tmp_agent, experience_file,
lr=args.lr, batch_size=args.bs)
total_games += args.games_per_batch
wins = evaluate(
learning_agent, reference_agent,
num_games=480,
num_workers=args.num_workers,
board_size=args.board_size,
temperature=temperature)
print('Won %d / 480 games (%.3f)' % (
wins, float(wins) / 480.0))
logf.write('Won %d / 480 games (%.3f)\n' % (
wins, float(wins) / 480.0))
shutil.copy(tmp_agent, working_agent)
learning_agent = working_agent
if wins >= 262:
next_filename = os.path.join(
args.work_dir,
'agent_%08d.hdf5' % (total_games,))
shutil.move(tmp_agent, next_filename)
reference_agent = next_filename
logf.write('New reference is %s\n' % next_filename)
temperature = max(min_temp, temp_decay * temperature)
logf.write('New temperature is %f\n' % temperature)
else:
print('Keep learning\n')
logf.flush()
if __name__ == '__main__':
main()
|
[
"max.pumperla@googlemail.com"
] |
max.pumperla@googlemail.com
|
4515847506698c8aa7f7ffc59048f1339af96ed8
|
9bd8b82a2aa4a126863b497e276e3d54dba56050
|
/pytorch_transformers/modeling_roberta.py
|
87f797d42fd5f9bdb99cc9de8116d336793367f7
|
[
"Apache-2.0"
] |
permissive
|
Nstats/pytorch_senti_analysis_ch
|
d905600f16c168dad9f4a5da9b68dd19cdb5a578
|
bb01cc508c37638670b26259a6ee35c4e857f2b6
|
refs/heads/master
| 2022-12-11T11:40:11.785573
| 2019-11-29T10:03:49
| 2019-11-29T10:03:49
| 208,766,887
| 3
| 0
|
Apache-2.0
| 2022-12-08T06:09:42
| 2019-09-16T09:56:58
|
Python
|
UTF-8
|
Python
| false
| false
| 18,426
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RoBERTa model. """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers.modeling_bert import (BertConfig, BertEmbeddings,
BertLayerNorm, BertModel,
BertPreTrainedModel, gelu)
from pytorch_transformers.modeling_utils import add_start_docstrings
logger = logging.getLogger(__name__)
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin",
}
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-bert_config.json",
'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-bert_config.json",
'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-bert_config.json",
}
class RobertaEmbeddings(BertEmbeddings):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super(RobertaEmbeddings, self).__init__(config)
self.padding_idx = 1
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
# Position numbers begin at padding_idx+1. Padding symbols are ignored.
# cf. fairseq's `utils.make_positions`
position_ids = torch.arange(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
return super(RobertaEmbeddings, self).forward(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
class RobertaConfig(BertConfig):
pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
ROBERTA_START_DOCSTRING = r""" The RoBERTa model was proposed in
`RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
Veselin Stoyanov. It is based on Google's BERT model released in 2018.
It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
objective and training with much larger mini-batches and learning rates.
This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained
models.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
https://arxiv.org/abs/1907.11692
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
ROBERTA_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, RoBERTa input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP][SEP] no it is not . [SEP]``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with
the ``add_special_tokens`` parameter set to ``True``.
RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.",
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaModel(BertModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaModel, self).__init__(config)
self.embeddings = RobertaEmbeddings(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if input_ids[:, 0].sum().item() != 0:
logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
"This model requires special tokens in order to work. "
"Please specify add_special_tokens=True in your encoding.")
return super(RobertaModel, self).forward(input_ids, token_type_ids, attention_mask, position_ids, head_mask)
@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """,
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForMaskedLM, self).__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head.decoder, self.roberta.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, position_ids=None,
head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
class RobertaLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super(RobertaLMHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x) + self.bias
return x
@add_start_docstrings("""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer
on top of the pooled output) e.g. for GLUE tasks. """,
ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class RobertaForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
model = RobertaForSequenceClassification.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super(RobertaForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.classifier = RobertaClassificationHead(config)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.roberta(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
|
[
"17801238715@163.com"
] |
17801238715@163.com
|
73d2cffae018f4ee3e6b3605bbc5eb17ef895fc2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02886/s682827583.py
|
180261c0dad26a28d3b99ade929a4b036e335d73
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
n = int(input())
d = list(map(lambda x: int(x), input().split(" ")))
ans = 0
for i in range(n - 1):
ans += d[i] * sum(d[i + 1:])
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d090b2a3de4de477fd8d316b88a68204f8466762
|
17a8ed643eb39759409187a7e6b5483a616da212
|
/example/svm_demo.py
|
c556f00c7a32c77e313b8eba1a9a79b117d552a3
|
[
"MIT"
] |
permissive
|
Lving/outlier_detection
|
a59086b8528b718f12838167ba722b7a80533891
|
13ffe460c35af7dcc49314b36791b2c4dabc7348
|
refs/heads/master
| 2021-05-08T06:33:26.182552
| 2016-09-19T09:18:22
| 2016-09-19T09:18:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,194
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from outlier_detector.base_detector import OutlierDetector
# Generate train data
X_train = 0.3 * np.random.randn(100, 2)
# Generate some regular novel observations
X_test = 0.3 * np.random.randn(20, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
param_dict = {'gamma': 0.1, 'kernel': 'rbf', 'nu': 0.1}
# fit the model
outlier_worker = OutlierDetector(algo_name='svm', param_dict=param_dict)
outlier_worker.fit(X_train)
test = outlier_worker.predict(X_test)
out = outlier_worker.predict(X_outliers)
train = outlier_worker.predict(X_train)
print outlier_worker.score(X_outliers)
print outlier_worker.score(X_train[:20])
plt.figure()
plt.subplot(311)
x = [r[0] for r in X_train]
y = [r[1] for r in X_train]
c = ['g' if r == 1 else 'r' for r in train]
plt.scatter(x, y, color=c)
plt.subplot(312)
x = [r[0] for r in X_test]
y = [r[1] for r in X_test]
c = ['g' if r == 1 else 'r' for r in test]
plt.scatter(x, y, color=c)
plt.subplot(313)
x = [r[0] for r in X_outliers]
y = [r[1] for r in X_outliers]
c = ['g' if r == 1 else 'r' for r in out]
plt.scatter(x, y, color=c)
plt.show()
|
[
"wg@gizwits.com"
] |
wg@gizwits.com
|
ec9abecd68f805a633d5bb84759c798bc7535ad6
|
9fe84516b363ec5be12b37b12063d2e94f9255cf
|
/lib/networks/VGGnet_train.py
|
40a885c1d4caca742453194a2f025b6866762005
|
[] |
no_license
|
justrypython/text-detection-ctpn
|
f14543dccf278744886111f1e1f50e3d02edc6d0
|
6783c2212d56b7133aebc827fe75c26f2a8c1fd8
|
refs/heads/master
| 2021-07-04T07:10:12.503395
| 2017-09-26T08:21:56
| 2017-09-26T08:21:56
| 104,291,038
| 0
| 0
| null | 2017-09-21T02:21:59
| 2017-09-21T02:21:59
| null |
UTF-8
|
Python
| false
| false
| 5,210
|
py
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
from network import Network
from ..fast_rcnn.config import cfg
class VGGnet_train(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')
self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')
self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')
self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')
self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\
'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})
self.trainable = trainable
self.setup()
def setup(self):
# n_classes = 21
n_classes = cfg.NCLASSES
# anchor_scales = [8, 16, 32]
anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16, ]
(self.feed('data')
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3'))
#========= RPN ============
# zai 5-3 te zheng ceng yong yi ge 3*3 de huan dong chuan kou
(self.feed('conv5_3')
.conv(3,3,512,1,1,name='rpn_conv/3x3'))
(self.feed('rpn_conv/3x3').lstm(512,128,name='lstm_o'))
(self.feed('lstm_o').lstm_bbox(128,len(anchor_scales) * 10 * 4, name='rpn_bbox_pred'))
(self.feed('lstm_o').lstm_bbox(128,len(anchor_scales) * 10 * 2,name='rpn_cls_score'))
#(self.feed('lstm_o').fc_bbox(256, name='fc_box'))
#(self.feed('fc_box').fc_bbox(len(anchor_scales) * 10 * 4, name='rpn_bbox_pred'))
#(self.feed('fc_box').fc_bbox(len(anchor_scales) * 10 * 2, name='rpn_cls_score'))
# Loss of rpn_cls & rpn_boxes
# shape is (1, H, W, A x 4) and (1, H, W, A x 2)
# 加入全卷积层,用来预测anchor的相对位置,也即delta
'''
(self.feed('rpn_conv/3x3')
.conv_rpn(1,1,len(anchor_scales) * 10 * 4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
# 加入全卷积层,用来预测每一个delta的得分,object和non-object两个得分
(self.feed('rpn_conv/3x3')
.conv(1, 1, len(anchor_scales) * 10 * 2, 1, 1, padding='VALID', relu=False, name='rpn_cls_score'))
'''
# generating training labels on the fly
# output: rpn_labels(HxWxA, 2) rpn_bbox_targets(HxWxA, 4) rpn_bbox_inside_weights rpn_bbox_outside_weights
# 给每个anchor上标签,并计算真值(也是delta的形式),以及内部权重和外部权重
(self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info')
.anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))
# shape is (1, H, W, Ax2) -> (1, H, WxA, 2)
# 给之前得到的score进行softmax,得到0-1之间的得分
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
# shape is (1, H, WxA, 2) -> (1, H, W, Ax2)
#把得分reshape回正常的shape
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*10*2, name = 'rpn_cls_prob_reshape'))
# 生成固定anchor,并给所有的anchor加上之前得到的rpn-bbox-pred,也就是delta
# 在做nms之类的处理,最后得到2000个rpn-rois
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TRAIN', name = 'rpn_rois_data'))
# matching boxes and groundtruth,
# and randomly sample some rois and labels for RCNN
# 在之前生成的2000个proposal中挑选一部分,并上标签,准备送入rcnn
(self.feed('rpn_rois','rpn_targets','gt_boxes', 'gt_ishard', 'dontcare_areas')
.proposal_target_layer(n_classes,name = 'roi-data'))
|
[
"kerzhao@163.com"
] |
kerzhao@163.com
|
e68d4c563be6e6974f20f1df9d5d4e92f618c805
|
ea6c1dbf5a56bb8524db3ab0c8d36f86531535de
|
/rp-portfolio/blog/views.py
|
6908f85df6edaaf743f6c840ecf0ae02d3c397e6
|
[] |
no_license
|
brahada/portfolio-blog-django
|
eedbd4a18436bf6753f190e607c3bbcb77871e23
|
a7cf3a4643cc02c0317fa2eba709536722615be1
|
refs/heads/master
| 2022-12-31T15:59:31.096722
| 2020-10-14T16:56:07
| 2020-10-14T16:56:07
| 304,079,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
from django.shortcuts import render
from blog.forms import CommentForm
from blog.models import Post, Comment
#from blog.forms import ContactForm
def blog_index(request):
posts = Post.objects.all().order_by("-created_on")
context = {"posts": posts}
return render(request, "blog_index.html", context)
def blog_category(request, category):
posts = Post.objects.filter(categories__name__contains=category).order_by(
"-created_on"
)
context = {"category": category, "posts": posts}
return render(request, "blog_category.html", context)
def blog_detail(request, pk):
post = Post.objects.get(pk=pk)
comments = Comment.objects.filter(post=post)
form = CommentForm()
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = Comment(
author=form.cleaned_data["author"],
body=form.cleaned_data["body"],
post=post,
)
comment.save()
context = {"post": post, "comments": comments, "form": form}
return render(request, "blog_detail.html", context)
|
[
"brahada25@gmail.com"
] |
brahada25@gmail.com
|
05df7dc2db7b637991a6d8462dc57fecd50efcce
|
de40d3fa8d8af0030556d27d6833f6a1a0e7700c
|
/baekjoon/2592py/a.py
|
4390728c02561068188fc42c3f63a8de97110232
|
[] |
no_license
|
NeoMindStd/CodingLife
|
cd6a627209c0353f4855f09fd5dfef8da4bbfef6
|
bcb6c3752f472e6a4f3b8f158d02bc3599dfcda3
|
refs/heads/master
| 2022-12-24T10:42:45.390085
| 2022-12-11T16:27:16
| 2022-12-11T16:27:16
| 191,797,634
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
s=0
d={}
for _ in range(10):
n=int(input())
try:d[n]+=1
except:d[n]=1
s+=n
print(s//10)
print(max(d.items(),key=lambda x:x[1])[0])
|
[
"dwj1996@naver.com"
] |
dwj1996@naver.com
|
5dd00d6d5517f4c7a804a51d9acf545ea04f39f4
|
e26149b575f166d052ce09ceaeb6ef2a31735860
|
/models/mlp_classifier.py
|
7cbac5c4b4cf8e56614e2d49fa9dcbdcec69d06f
|
[] |
no_license
|
mireshghallah/A4NT
|
0ccd755d9710c17010d8bb3d01eb9d82fb4b869f
|
d5aa57e8266a472b84216da254025e21308639a5
|
refs/heads/master
| 2023-04-25T01:16:25.951623
| 2021-05-04T18:52:39
| 2021-05-04T18:52:39
| 364,344,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,128
|
py
|
import torch
import torch.utils.data
import torch.nn as nn
from torch.autograd import Variable
from torch import tensor
from tqdm import trange, tqdm
import numpy as np
class MLP_classifier(nn.Module):
def __init__(self, params):
super(MLP_classifier, self).__init__()
#+1 is to allow padding index
self.output_size = params.get('num_output_layers',205)
self.hid_dims = params.get('hidden_widths',[])
self.inp_size = params.get('inp_size',-1)
prev_size = self.inp_size
self.hid_dims.append(self.output_size)
self.lin_layers = nn.ModuleList()
self.non_linearities = nn.ModuleList()
self.dropouts = nn.ModuleList()
for i in xrange(len(self.hid_dims)):
self.lin_layers.append(nn.Linear(prev_size, self.hid_dims[i]))
self.non_linearities.append(nn.SELU())
self.dropouts.append(nn.Dropout(p=params.get('drop_prob',0.0)))
prev_size = self.hid_dims[i]
self.softmax = nn.Softmax()
self.init_weights()
# we should move it out so that whether to do cuda or not should be upto the user.
self.cuda()
def init_weights(self):
# Weight initializations for various parts.
a = 0.01
# LSTM forget gate could be initialized to high value (1.)
for i in xrange(len(self.hid_dims)):
self.lin_layers[i].weight.data.uniform_(-a, a)
self.lin_layers[i].bias.data.fill_(0)
def forward(self, x, compute_softmax = False):
x = Variable(x).cuda()
prev_out = x
for i in xrange(len(self.hid_dims)-1):
prev_out = self.dropouts[i](prev_out)
prev_out = self.non_linearities[i](self.lin_layers[i](prev_out))
prev_out = self.dropouts[-1](prev_out)
prev_out = self.lin_layers[-1](prev_out)
if compute_softmax:
prob_out = self.softmax(prev_out)
else:
prob_out = prev_out
return prob_out
def fit(self, features, targs, feat_val, targ_test, epochs, lr=1e-3, l2=0.01):
n_samples = features.shape[0]
features = features.astype(np.float32)
b_sz = 10
iter_per_epoch = n_samples / b_sz
total_iters = epochs * iter_per_epoch
self.train()
criterion = nn.CrossEntropyLoss()
optim = torch.optim.RMSprop(self.parameters(), lr=lr, alpha=0.90,
eps=1e-8, weight_decay=l2)
idxes = np.arange(n_samples)
total_loss = 0.
#t = trange(total_iters, desc='ML')
best_loss = 10000.
for i in tqdm(xrange(total_iters)):
optim.zero_grad()
b_ids = np.random.choice(idxes, size=b_sz)
targets = Variable(torch.from_numpy(targs[b_ids])).cuda()
output = self.forward(torch.from_numpy(features[b_ids,:]))
loss = criterion(output, targets)
loss.backward()
# Take an optimization step
optim.step()
total_loss += loss.data.cpu().numpy()[0]
if i % 2000 == 0 and i > 0:
cur_loss = total_loss / 2000.
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2e} |'
'loss {:5.2f} | ppl {:8.2f}'.format(
i//iter_per_epoch, i, total_iters, lr,
cur_loss, np.exp(cur_loss)))
total_loss = 0.
#if cur_loss <= best_loss:
# best_loss = cur_loss
# best_model = model.state_dict()
def decision_function(self, features):
n_samples = features.shape[0]
features = features.astype(np.float32)
b_sz = 100
total_iters = n_samples // b_sz + 1
self.eval()
scores = np.zeros((n_samples, self.output_size))
for i in tqdm(xrange(total_iters)):
b_ids = np.arange(b_sz*i, min(n_samples,b_sz*(i+1)))
output = self.forward(torch.from_numpy(features[b_ids,:]), compute_softmax = True)
scores[b_ids,:] = output.data.cpu().numpy()
return scores
|
[
"f.mireshghallah@gmail.com"
] |
f.mireshghallah@gmail.com
|
ffb2e6251978a942c62d69f3a5f6b154501ad76c
|
1e7440822556f589cee3efe7023ee341a13ba5d7
|
/env/bin/jupyter-migrate
|
b12841132d82b056172d1072e62304c11de4a172
|
[] |
no_license
|
nhatminh2947/deep-learning
|
2b4f2e288fdb954801c10e79347b3116433be55e
|
afa3a5a4a77d033c91a7ed6c40e51791e06cea64
|
refs/heads/master
| 2020-04-03T20:30:02.522061
| 2018-11-05T19:30:11
| 2018-11-05T19:30:11
| 155,547,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
#!/home/cgilab/working/deeplearning/DLhw1/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.migrate import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nhatminh2947@gmail.com"
] |
nhatminh2947@gmail.com
|
|
44bab2d0245a7c589ff32e3972a72df4dd038efb
|
1aa1e6083c41bdb1337618f6403be0b8d02fb222
|
/catkin_ws/src/vicon_bridge/src/tester.py~
|
ed5d68ba9ac4a52c079b9e0a33f489821a4af9b8
|
[
"BSD-3-Clause"
] |
permissive
|
fantasYu-Chao/Stupid-Baxter
|
7cc05b9d292c42a9c74c3223c3be7dd78659ff61
|
d7dad7aba023f29bbca389175994e72ae3486315
|
refs/heads/master
| 2021-06-27T10:44:53.574794
| 2017-09-10T21:27:18
| 2017-09-10T21:27:18
| 103,059,643
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
#!/usr/bin/env python
import roslib
import sys
import rospy
import numpy as np
import pdb
from matplotlib import pyplot as plt
from std_msgs.msg import String
from sensor_msgs.msg import Image
from geometry_msgs.msg import TransformStamped
import os
#define subscriber node
binCor = []
class getBin():
def __init__(self):
subNode = rospy.Subscriber("/vicon/Bin/Bin", TransformStamped, self.callback_bin)
rospy.rostime.set_rostime_initialized( True )
def callback_bin(self, data):
binCor.append(data.transform.translation.x)
binCor.append(data.transform.translation.y)
binCor.append(data.transform.translation.z)
binCor.append(data.transform.rotation.x)
binCor.append(data.transform.rotation.y)
binCor.append(data.transform.rotation.z)
binCor.append(data.transform.rotation.w)
print binCor
class retrieveSponge():
def __init__(self):
subNode = rospy.Subscriber("/vicon/Sponge/Sponge", TransformStamped, self.callback_sponge)
rospy.rostime.set_rostime_initialized( True )
def callback_sponge(self, data):
swx = data.transform.translation.x
swy = data.transform.translation.y
swz = data.transform.translation.z
sqx = data.transform.rotation.x
sqy = data.transform.rotation.y
sqz = data.transform.rotation.z
sqw = data.transform.rotation.w
print "sponge data: %.4f %.4f %.4f %.4f %.4f %.4f %.4f" % (swx, swy, swz, sqx, sqy, sqz, sqw)
if __name__=='__main__':
sponge = retrieveSponge()
binLoc = getBin()
rospy.init_node('Tracker', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print "shutting down"
|
[
"noreply@github.com"
] |
fantasYu-Chao.noreply@github.com
|
|
63685baeb08e46ae65f832ecc2847c370da279e7
|
ca3863b7e1fc320cdf92b48924458f7109b3098f
|
/migrations/0020_auto_20170924_0549.py
|
82ae95006e7dcee2f698d91e5c8b56710f6a78b1
|
[] |
no_license
|
rcrowther/django-category-collection
|
eda27123954cdfd5a4062f9da830e659a99ed7ff
|
548591a27ebdae537cf663d7cd4a600fa2e570b2
|
refs/heads/master
| 2021-06-24T19:28:41.577466
| 2020-11-03T19:45:54
| 2020-11-03T19:45:54
| 102,010,428
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-24 05:49
from __future__ import unicode_literals
from django.db import migrations, models
import taxonomy.models
class Migration(migrations.Migration):
dependencies = [
('taxonomy', '0019_auto_20170908_1804'),
]
operations = [
migrations.CreateModel(
name='TreeTerm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tree', models.IntegerField(db_index=True, help_text='A Tree associated with this Term.')),
('term', models.IntegerField(db_index=True, help_text='Term associated with a Tree.')),
],
),
migrations.AddField(
model_name='termnode',
name='tree',
field=models.IntegerField(db_index=True, default=29, help_text='A Tree associated with an element.'),
preserve_default=False,
),
migrations.AlterField(
model_name='termnode',
name='term',
field=models.IntegerField(help_text='A Term associated with an element.', verbose_name=taxonomy.models.Term),
),
]
|
[
"rw.crowther@gmail.com"
] |
rw.crowther@gmail.com
|
0cd0f206c60724b2e71d6f6c33a6c782ea179603
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/testing/scripts/common.py
|
5cba136cb51561bbf0a1833997927573eba604c7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 5,124
|
py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import contextlib
import json
import os
import subprocess
import sys
import tempfile
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.abspath(
os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
# run-webkit-tests returns the number of failures as the return
# code, but caps the return code at 101 to avoid overflow or colliding
# with reserved values from the shell.
MAX_FAILURES_EXIT_STATUS = 101
def run_script(argv, funcs):
def parse_json(path):
with open(path) as f:
return json.load(f)
parser = argparse.ArgumentParser()
# TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
parser.add_argument('--build-config-fs')
parser.add_argument('--paths', type=parse_json, default={})
# Properties describe the environment of the build, and are the same per
# script invocation.
parser.add_argument('--properties', type=parse_json, default={})
# Args contains per-invocation arguments that potentially change the
# behavior of the script.
parser.add_argument('--args', type=parse_json, default=[])
parser.add_argument(
'--use-src-side-runtest-py', action='store_true',
help='Use the src-side copy of runtest.py, as opposed to the build-side '
'one')
subparsers = parser.add_subparsers()
run_parser = subparsers.add_parser('run')
run_parser.add_argument(
'--output', type=argparse.FileType('w'), required=True)
run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
run_parser.set_defaults(func=funcs['run'])
run_parser = subparsers.add_parser('compile_targets')
run_parser.add_argument(
'--output', type=argparse.FileType('w'), required=True)
run_parser.set_defaults(func=funcs['compile_targets'])
args = parser.parse_args(argv)
return args.func(args)
def run_command(argv, env=None):
print 'Running %r' % argv
rc = subprocess.call(argv, env=env)
print 'Command %r returned exit code %d' % (argv, rc)
return rc
def run_runtest(cmd_args, runtest_args):
if cmd_args.use_src_side_runtest_py:
cmd = [
sys.executable,
os.path.join(
cmd_args.paths['checkout'], 'infra', 'scripts', 'runtest_wrapper.py'),
'--',
]
else:
cmd = [
sys.executable,
cmd_args.paths['runit.py'],
'--show-path',
sys.executable,
cmd_args.paths['runtest.py'],
]
return run_command(cmd + [
'--target', cmd_args.build_config_fs,
'--xvfb',
'--builder-name', cmd_args.properties['buildername'],
'--slave-name', cmd_args.properties['slavename'],
'--build-number', str(cmd_args.properties['buildnumber']),
'--build-properties', json.dumps(cmd_args.properties),
] + runtest_args)
@contextlib.contextmanager
def temporary_file():
fd, path = tempfile.mkstemp()
os.close(fd)
try:
yield path
finally:
os.remove(path)
def parse_common_test_results(json_results, test_separator='/'):
def convert_trie_to_flat_paths(trie, prefix=None):
# Also see webkitpy.layout_tests.layout_package.json_results_generator
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + test_separator + name
if len(data) and not 'actual' in data and not 'expected' in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
results = {
'passes': {},
'unexpected_passes': {},
'failures': {},
'unexpected_failures': {},
'flakes': {},
'unexpected_flakes': {},
}
# TODO(dpranke): crbug.com/357866 - we should simplify the handling of
# both the return code and parsing the actual results, below.
passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE',
'NEEDSMANUALREBASELINE')
for test, result in convert_trie_to_flat_paths(
json_results['tests']).iteritems():
key = 'unexpected_' if result.get('is_unexpected') else ''
data = result['actual']
actual_results = data.split()
last_result = actual_results[-1]
expected_results = result['expected'].split()
if (len(actual_results) > 1 and
(last_result in expected_results or last_result in passing_statuses)):
key += 'flakes'
elif last_result in passing_statuses:
key += 'passes'
# TODO(dpranke): crbug.com/357867 ... Why are we assigning result
# instead of actual_result here. Do we even need these things to be
# hashes, or just lists?
data = result
else:
key += 'failures'
results[key][test] = data
return results
def run_integration_test(script_to_run, extra_args, log_file, output):
integration_test_res = subprocess.call(
[sys.executable, script_to_run] + extra_args)
with open(log_file) as f:
failures = json.load(f)
json.dump({
'valid': integration_test_res == 0,
'failures': failures,
}, output)
return integration_test_res
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
dfe42d3ff33a07cb33c6ff63594af3c18284b2e4
|
7e27687d12192b9ac44059801e1d4f5b4cd7575c
|
/dev2/Buscar palabra.py
|
2e61b6b9a090b23d02b4ec50de7faf9939c9b6f6
|
[] |
no_license
|
Latinaheadshot/DevF
|
6c220db2f508ddc0fe781bb98524e7345077bdac
|
d04e7953b2df661bf239e945990609a83447baf6
|
refs/heads/master
| 2021-07-10T10:55:49.634102
| 2017-10-11T01:39:50
| 2017-10-11T01:39:50
| 106,357,589
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
# Dar palabras y encontrar cuantas veces se repita
palabra = []
contador = 0
numero_palabras = input("Introduce una frase con palabras repetidas")
for i in numero_palabras:
print.filter("Hola")
|
[
"latinaheadshot@gmail.com"
] |
latinaheadshot@gmail.com
|
e71b52e341f37e5239e11b063025b35f18eb7355
|
d1e217093ee70dd9f2911781ea7ac60eb923bbbd
|
/python/prod_HCalMIPCali_test4_Data_PromptReco2017_v3_DoubleMuon_Run2017B-PromptReco-v2_v1.py
|
4c0a293106c2d70333b8b873c84b0c119d33df81
|
[] |
no_license
|
nanlu06/MuonCalibrationHCalPhaseI
|
555375b45fbeabe849d5f2e458e723cec1a5e1b9
|
5c2df37fc8760cf38da0894b65a69559eb29e639
|
refs/heads/master
| 2021-01-20T21:12:49.593639
| 2017-08-29T12:19:33
| 2017-08-29T12:19:33
| 101,739,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = "prod_HCalMIPCali_test4_Data_PromptReco2017_v3_5f20d31dd66741a69a27f27941a283cf_v1"
config.General.workArea = "crab_prod"
config.section_("JobType")
config.JobType.pluginName = "Analysis"
config.JobType.psetName = "prod.py"
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
config.Data.ignoreLocality = True
config.Data.inputDataset = "/DoubleMuon/Run2017B-PromptReco-v2/RECO"
config.Data.lumiMask = 'dataJSON/Cert_294927-299649_13TeV_PromptReco_Collisions17_JSON.txt'
config.Data.splitting = "LumiBased"
config.Data.unitsPerJob = 100
config.Data.outputDatasetTag = "HCalMIPCali_test4_Data_PromptReco2017_Run2017B-PromptReco-v2_v3_v1"
config.section_("Site")
config.Site.storageSite = "T2_CH_CERN"
config.Data.outLFNDirBase = '/store/user/nlu/'
|
[
"nan.lu@cern.ch"
] |
nan.lu@cern.ch
|
7633fd8074fa4323502df4f92dd8f1339ac6bfb3
|
60411095686f8046d5b5c53bad123c35df6a379a
|
/texttutils/texttutils/settings.py
|
4c2d31cf4466811dc8f5c73b54a971389ba1b08e
|
[] |
no_license
|
suraj-001/text_operations
|
463d49654d3b30ab440d451b5d1aefa3d22a773c
|
9a64c91434dd1dad91fa5639347c18c7572d709f
|
refs/heads/master
| 2023-07-18T06:03:12.105472
| 2021-09-07T10:09:41
| 2021-09-07T10:09:41
| 401,244,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,262
|
py
|
"""
Django settings for texttutils project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-3p30fa9l5t@-2p6#9$fh01%x6f&9eo2tnka+@@c@3e0vtx$e-n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'texttutils.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'texttutils.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[
"surajkumaragarwal36@gmail.com"
] |
surajkumaragarwal36@gmail.com
|
e05f4a9adc90c0dc69f6eff1b2ba02e28e11f695
|
18e3873e4c8b66ae5fc53dbc4ba595f8e7be6a85
|
/EntrepriseDevScraper/pagesjaune/pagesjaune/middlewares.py
|
c1126d7a9778a5f20f2ba75fbc582bf5e49a6ce2
|
[] |
no_license
|
zestelle2/page_scrape
|
b3557755c5c0cb38907f46d03dbc0cea13467172
|
191ffc7bc9d5437c4f5af844a42b17a716615ea8
|
refs/heads/master
| 2020-03-31T20:56:49.175581
| 2018-10-25T07:18:11
| 2018-10-25T07:18:11
| 152,561,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,605
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class PagesjauneSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class PagesjauneDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"noreply@github.com"
] |
zestelle2.noreply@github.com
|
8a8a91c3279bc0806a46df31b528f0258eb4f877
|
331368bdb5a9965a7c2b229094f749c733950196
|
/backend/test_27281/wsgi.py
|
67fa9715ca926805d323adc68be6449985f451bb
|
[] |
no_license
|
crowdbotics-apps/test-27281
|
86705fe86fc63745abf9b34129ca54db0f4d0d48
|
ce835ef081ae7006acf911dc9b6cd65f2b1fa0cd
|
refs/heads/master
| 2023-05-05T10:31:53.099862
| 2021-05-23T17:13:44
| 2021-05-23T17:13:44
| 370,111,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
WSGI config for test_27281 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_27281.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
f5c72769a2d84c877c0181b35d3f1f9c2a4c7277
|
89b33345a9c949c72231c0e09e31c2c2f3033a3e
|
/anagram.py
|
1a3fe93f440840315d00a387b781e865fef082f6
|
[] |
no_license
|
rakhiPurwar/hackerrank-solutions
|
2ab3c88930d7c0aa94122e64c020b63d354683dc
|
cbfd2dba11d410715d80a039bff511b43ad9e8e4
|
refs/heads/master
| 2022-11-14T23:28:37.472249
| 2020-07-07T05:46:56
| 2020-07-07T05:46:56
| 267,019,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
from collections import Counter
# Complete the anagram function below.
def anagram(s):
c = 0
l = len(s)
r = int(l/2)
print(r)
print(l)
if l%2 != 0:
return -1
else:
p = Counter(s[0:r])
q = Counter(s[r:l])
diff = p - q
print(diff)
return sum([val for key,val in diff.items()])
|
[
"noreply@github.com"
] |
rakhiPurwar.noreply@github.com
|
b0830505869673479f63ede4681823d98ca19bff
|
d22b6c2b4923f43f217bd0efc6584f6183afabd9
|
/tools/gh_api.py
|
6b042ed4c6801169763ef1ca2f150d35075e5aee
|
[
"BSD-3-Clause"
] |
permissive
|
fccoelho/ipython
|
9719e4d50f10b7996448ac5f8d9629b6c8b2f76b
|
128c40ea8cacd2f7c9bf6228765fdc0b31f8b816
|
refs/heads/master
| 2021-01-18T06:28:25.593133
| 2012-06-18T01:25:01
| 2012-06-18T01:25:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,465
|
py
|
"""Functions for Github authorisation."""
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import requests
import getpass
import json
# Keyring stores passwords by a 'username', but we're not storing a username and
# password
fake_username = 'ipython_tools'
token = None
def get_auth_token():
global token
if token is not None:
return token
import keyring
token = keyring.get_password('github', fake_username)
if token is not None:
return token
print("Please enter your github username and password. These are not "
"stored, only used to get an oAuth token. You can revoke this at "
"any time on Github.")
user = input("Username: ")
pw = getpass.getpass("Password: ")
auth_request = {
"scopes": [
"public_repo",
"gist"
],
"note": "IPython tools",
"note_url": "https://github.com/ipython/ipython/tree/master/tools",
}
response = requests.post('https://api.github.com/authorizations',
auth=(user, pw), data=json.dumps(auth_request))
response.raise_for_status()
token = json.loads(response.text)['token']
keyring.set_password('github', fake_username, token)
return token
def make_auth_header():
return {'Authorization': 'token ' + get_auth_token()}
def post_issue_comment(project, num, body):
url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
payload = json.dumps({'body': body})
r = requests.post(url, data=payload, headers=make_auth_header())
def post_gist(content, description='', filename='file', auth=False):
"""Post some text to a Gist, and return the URL."""
post_data = json.dumps({
"description": description,
"public": True,
"files": {
filename: {
"content": content
}
}
}).encode('utf-8')
headers = make_auth_header() if auth else {}
response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
response.raise_for_status()
response_data = json.loads(response.text)
return response_data['html_url']
def get_pull_request(project, num):
url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
response = requests.get(url)
response.raise_for_status()
return json.loads(response.text)
|
[
"takowl@gmail.com"
] |
takowl@gmail.com
|
c0cebb568d34cd714bb6bd5ad5eef84e6f688068
|
c5c56d8c00e9c30ed58893b0ad776c3f874fc493
|
/backend/vocabulary_wizard_21967/wsgi.py
|
2530f6001ac3b68ea7f972da5548202362a9fd6b
|
[] |
no_license
|
crowdbotics-apps/vocabulary-wizard-21967
|
4e512a10372746800dfc99f5a79cfba3d339cfc1
|
d63c837e95a0e06bd4bd740ef61eb9449fa82401
|
refs/heads/master
| 2023-01-06T17:02:34.971325
| 2020-10-26T14:22:41
| 2020-10-26T14:22:41
| 307,394,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
"""
WSGI config for vocabulary_wizard_21967 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "vocabulary_wizard_21967.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
dbea2e0eecf115e2d3e3c8613f4c8003d1dc1fd2
|
cb8a064f74a195f43d61b4e020e8d01e3e743c2c
|
/store/migrations/0002_variation.py
|
0689a1383cfc691814eb34223e29fe9132894b16
|
[] |
no_license
|
yifan1003/E-Commerce-application
|
c784441f6794bce59dd18287215a19d23a662e55
|
3a1388478fae3679068b3e44e3b736e9d6cbd313
|
refs/heads/main
| 2023-06-21T23:51:56.221425
| 2021-07-26T04:08:44
| 2021-07-26T04:08:44
| 381,929,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
# Generated by Django 3.1.7 on 2021-07-15 22:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variation_category', models.CharField(choices=[('color', 'color'), ('size', 'size')], max_length=100)),
('variation_value', models.CharField(max_length=100)),
('is_active', models.BooleanField(default=True)),
('created_date', models.DateTimeField(auto_now=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
],
),
]
|
[
"hyifan9988@gmail.com"
] |
hyifan9988@gmail.com
|
5ffe0e1a00a723873eb6f90c2251bd8659623cef
|
05105d09c6f3298dffc86cd2c33478e054a34017
|
/firstProject/urls.py
|
fdf4a1a9444974f1efcad99797895c82220b330e
|
[] |
no_license
|
LeeHyogeum12/tone1
|
0c660b7c02e5569b06c7a0dc24a8b38525aaaf75
|
bf0a4de8cd54da3ab6568ffd2069918487edc265
|
refs/heads/master
| 2022-11-13T02:02:13.100184
| 2020-07-05T14:47:16
| 2020-07-05T14:47:16
| 277,316,810
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,298
|
py
|
"""firstProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import blogapp.views
from django.conf.urls import include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', blogapp.views.index, name='index'),
path('blogMain/', blogapp.views.blogMain, name='blogMain'),
path('blogMain/createBlog/', blogapp.views.createBlog, name='createBlog'),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('blogMain/detail/<int:blog_id>/', blogapp.views.detail, name='detail'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"lhgeum0728@hs.ac.kr"
] |
lhgeum0728@hs.ac.kr
|
ed10f54681be411ff65af9f60f5ed3a72b206f90
|
4366d13ff81a552c933f9d57d04a541c9d76ce3a
|
/eHealth/migrations/0002_auto_20160313_0039.py
|
a17bdda414cab4a37a36dd364672c3812fa4c501
|
[] |
no_license
|
mariosfx540/eHealthProjectGoU
|
d3cde3922fdd59fdfe0c2a9f95d425601cb56cc5
|
c23aea48d0011a8fbd5fb8762094d5af84c2ca19
|
refs/heads/master
| 2021-01-10T07:14:09.805479
| 2016-03-25T23:58:44
| 2016-03-25T23:58:44
| 53,608,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('eHealth', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category_list',
name='user',
),
migrations.DeleteModel(
name='Category_List',
),
]
|
[
"marios_540@hotmail.com"
] |
marios_540@hotmail.com
|
d41d9b60b8385ac21e16adcbafb795302d3934f7
|
a8062308fb3bf6c8952257504a50c3e97d801294
|
/problems/N1610_Maximum_Number_Of_Visible_Points.py
|
ecf9ff8e7f827ff53db8bd41ec7802b16da2af95
|
[] |
no_license
|
wan-catherine/Leetcode
|
650d697a873ad23c0b64d08ad525bf9fcdb62b1b
|
238995bd23c8a6c40c6035890e94baa2473d4bbc
|
refs/heads/master
| 2023-09-01T00:56:27.677230
| 2023-08-31T00:49:31
| 2023-08-31T00:49:31
| 143,770,000
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
import math
class Solution(object):
def visiblePoints(self, points, angle, location):
"""
:type points: List[List[int]]
:type angle: int
:type location: List[int]
:rtype: int
"""
angles, same = [], 0
for x, y in points:
if x == location[0] and y == location[1]:
same += 1
continue
angles.append(math.atan2(y - location[1], x - location[0]))
angles.sort()
angles += [x + 2.0 * math.pi for x in angles]
start, end = 0, 0
length = len(angles)
base = angle * math.pi / 180
res = 0
while end < length:
while angles[end] - angles[start] > base:
start += 1
res = max(res, end - start + 1)
end += 1
return res + same
|
[
"rarry2012@gmail.com"
] |
rarry2012@gmail.com
|
15956d66ac056dbba08a63443b13fbbd58cebae0
|
6fcee2268c4fad9c4e37069d9c18a1452303b38e
|
/apps/user/views.py
|
91c6eaf0087402d13f2ec3e15aa8be2d353d1c21
|
[] |
no_license
|
JorgitoR/omnilatan-backend
|
ec6586492883f14c2a0bd6a99b9fb7dadfd63a4f
|
77f7d55278e595ebd47f9a96af165a08ca9a4354
|
refs/heads/main
| 2023-07-18T09:25:28.481306
| 2021-08-31T17:29:18
| 2021-08-31T17:29:18
| 400,928,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
from django.shortcuts import render
# Models
from omnilatam.apps.order.models import Order
def signup(request):
return render(request, 'user/signup.html')
def login(request):
return render(request, 'user/login.html')
def profile(request):
orders = Order.objects.filter(product__order_product__user=request.user).distinct()
context = {
'orders':orders
}
return render(request, 'user/profile.html', context)
|
[
"jorgitouribe@gmail.com"
] |
jorgitouribe@gmail.com
|
89ec2782bfac4b4bb21af8aa6c412cdb5b0c0648
|
f552ca018542184f34246405afb9b30999a57f2e
|
/criacionais/abstractFactory/carro/modelos/fiestaSedan.py
|
ce31e8fb64f1ba2656813f84c9d18ad256923a2f
|
[] |
no_license
|
PlumpMath/DesignPatterns-440
|
feea6847160e3c7393a2da80e6b22b9b2273ee92
|
bef2ff66dddc90b7e6b529828b094bfc48754a01
|
refs/heads/master
| 2021-01-20T09:52:12.704627
| 2017-04-29T22:58:07
| 2017-04-29T22:58:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from carro.categorias.carroSedan import CarroSedan
class FiestaSedan(CarroSedan):
def mostra_informacao(self):
print("Modelo: Fiesta")
print("Fabricante: Ford")
print("Categoria: Sedan\n")
|
[
"victorhad@gmail.com"
] |
victorhad@gmail.com
|
023a701fcbd187c2d9e32af920c7d0a29a98b36c
|
6600952c5a14bf306819feac737d3617024c1320
|
/core/guest.py
|
8861088cfc62646be2e8258211f8bd33b90aa6ee
|
[] |
no_license
|
SNDBOXLTD/cuckoo
|
aae51173a59d8085a53a175a4d825af40fec8b58
|
d2f342f128ea1d8c69fa481775362b3fee5757f8
|
refs/heads/master
| 2021-06-08T09:26:45.598247
| 2020-04-26T15:34:43
| 2020-04-26T15:34:43
| 115,909,717
| 3
| 2
| null | 2020-10-01T17:50:57
| 2018-01-01T09:17:05
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 20,797
|
py
|
# Copyright (C) 2012-2013 Claudio Guarnieri.
# Copyright (C) 2014-2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import datetime
import io
import json
import logging
import os
import requests
import socket
import time
import xmlrpclib
import zipfile
from cuckoo.common.config import config, parse_options
from cuckoo.common.constants import (
CUCKOO_GUEST_PORT, CUCKOO_GUEST_INIT, CUCKOO_GUEST_COMPLETED,
CUCKOO_GUEST_FAILED
)
from cuckoo.common.exceptions import (
CuckooGuestError, CuckooGuestCriticalTimeout
)
from cuckoo.common.utils import TimeoutServer
from cuckoo.core.database import Database
from cuckoo.misc import cwd
log = logging.getLogger(__name__)
db = Database()
def analyzer_zipfile(platform, monitor):
"""Creates the Zip file that is sent to the Guest."""
t = time.time()
zip_data = io.BytesIO()
zip_file = zipfile.ZipFile(zip_data, "w", zipfile.ZIP_STORED)
# Select the proper analyzer's folder according to the operating
# system associated with the current machine.
root = cwd("analyzer", platform)
root_len = len(os.path.abspath(root))
if not os.path.exists(root):
log.error("No valid analyzer found at path: %s", root)
raise CuckooGuestError(
"No valid analyzer found for %s platform!" % platform
)
# Walk through everything inside the analyzer's folder and write
# them to the zip archive.
for root, dirs, files in os.walk(root):
archive_root = os.path.abspath(root)[root_len:]
for name in files:
path = os.path.join(root, name)
archive_name = os.path.join(archive_root, name)
zip_file.write(path, archive_name)
# Include the chosen monitoring component and any additional files.
if platform == "windows":
dirpath = cwd("monitor", monitor)
# Generally speaking we should no longer be getting symbolic links for
# "latest" anymore, so in the case of a file; follow it.
if os.path.isfile(dirpath):
monitor = os.path.basename(open(dirpath, "rb").read().strip())
dirpath = cwd("monitor", monitor)
for name in os.listdir(dirpath):
zip_file.write(
os.path.join(dirpath, name), os.path.join("bin", name)
)
# Dump compiled "dumpmem" Yara rules for zer0m0n usage.
zip_file.write(cwd("stuff", "dumpmem.yarac"), "bin/rules.yarac")
zip_file.close()
data = zip_data.getvalue()
if time.time() - t > 10:
log.warning(
"It took more than 10 seconds to build the Analyzer Zip for the "
"Guest. This might be a serious performance penalty. Is your "
"analyzer/windows/ directory bloated with unnecessary files?"
)
return data
class OldGuestManager(object):
"""Old and deprecated Guest Manager.
This class handles the communications with the old agent running in the
virtual machine.
"""
def __init__(self, vm_id, ip, platform, task_id):
"""@param ip: guest's IP address.
@param platform: guest's operating system type.
"""
self.id = vm_id
self.ip = ip
self.platform = platform
self.task_id = task_id
# initialized in start_analysis so we can update the critical timeout
# TODO, pull options parameter into __init__ so we can do this here
self.timeout = None
self.server = None
def wait(self, status):
"""Waiting for status.
@param status: status.
@return: always True.
"""
log.debug("%s: waiting for status 0x%.04x", self.id, status)
end = time.time() + self.timeout
self.server._set_timeout(self.timeout)
while db.guest_get_status(self.task_id) == "starting":
# Check if we've passed the timeout.
if time.time() > end:
raise CuckooGuestCriticalTimeout(
"Machine %s: the guest initialization hit the "
"critical timeout, analysis aborted." % self.id
)
try:
# If the server returns the given status, break the loop
# and return.
if self.server.get_status() == status:
log.debug("%s: status ready", self.id)
break
except:
pass
log.debug("%s: not ready yet", self.id)
time.sleep(1)
self.server._set_timeout(None)
return True
def upload_analyzer(self, monitor):
"""Upload analyzer to guest.
@return: operation status.
"""
zip_data = analyzer_zipfile(self.platform, monitor)
log.debug(
"Uploading analyzer to guest (id=%s, ip=%s, monitor=%s, size=%d)",
self.id, self.ip, monitor, len(zip_data)
)
# Send the zip containing the analyzer to the agent running inside
# the guest.
try:
self.server.add_analyzer(xmlrpclib.Binary(zip_data))
except socket.timeout:
raise CuckooGuestError("{0}: guest communication timeout: unable "
"to upload agent, check networking or try "
"to increase timeout".format(self.id))
def start_analysis(self, options, monitor):
"""Start analysis.
@param options: options.
@return: operation status.
"""
# TODO Deal with unicode URLs, should probably try URL encoding.
# Unicode files are being taken care of.
self.timeout = options["timeout"] + config("cuckoo:timeouts:critical")
url = "http://{0}:{1}".format(self.ip, CUCKOO_GUEST_PORT)
self.server = TimeoutServer(url, allow_none=True,
timeout=self.timeout)
try:
# Wait for the agent to respond. This is done to check the
# availability of the agent and verify that it's ready to receive
# data.
self.wait(CUCKOO_GUEST_INIT)
# Invoke the upload of the analyzer to the guest.
self.upload_analyzer(monitor)
# Give the analysis options to the guest, so it can generate the
# analysis.conf inside the guest.
try:
self.server.add_config(options)
except:
raise CuckooGuestError(
"%s: unable to upload config to analysis machine" %
self.id
)
# If the target of the analysis is a file, upload it to the guest.
if options["category"] in ("file", "archive"):
try:
file_data = open(options["target"], "rb").read()
except (IOError, OSError) as e:
raise CuckooGuestError(
"Unable to read %s, error: %s" %
(options["target"], e)
)
data = xmlrpclib.Binary(file_data)
try:
self.server.add_malware(data, options["file_name"])
except Exception as e:
raise CuckooGuestError(
"#%s: unable to upload malware to analysis "
"machine: %s" % (self.id, e)
)
# Launch the analyzer.
pid = self.server.execute()
log.debug("%s: analyzer started with PID %d", self.id, pid)
# If something goes wrong when establishing the connection, raise an
# exception and abort the analysis.
except (socket.timeout, socket.error):
raise CuckooGuestError(
"%s: guest communication timeout, check networking or try "
"to increase timeout" % self.id
)
def wait_for_completion(self):
"""Wait for analysis completion.
@return: operation status.
"""
log.debug("%s: waiting for completion", self.id)
end = time.time() + self.timeout
self.server._set_timeout(self.timeout)
while db.guest_get_status(self.task_id) == "running":
time.sleep(1)
# If the analysis hits the critical timeout, just return straight
# away and try to recover the analysis results from the guest.
if time.time() > end:
log.info("%s: end of analysis reached!", self.id)
return
try:
status = self.server.get_status()
except Exception as e:
log.debug("%s: error retrieving status: %s", self.id, e)
continue
# React according to the returned status.
if status == CUCKOO_GUEST_COMPLETED:
log.info("%s: analysis completed successfully", self.id)
break
elif status == CUCKOO_GUEST_FAILED:
error = self.server.get_error()
raise CuckooGuestError(
"Analysis failed: %s" % (error or "unknown error")
)
else:
log.debug("%s: analysis not completed yet (status=%s)",
self.id, status)
self.server._set_timeout(None)
class GuestManager(object):
"""This class represents the new Guest Manager. It operates on the new
Cuckoo Agent which features a more abstract but more feature-rich API."""
def __init__(self, vmid, ipaddr, platform, task_id, analysis_manager):
self.vmid = vmid
self.ipaddr = ipaddr
self.port = CUCKOO_GUEST_PORT
self.platform = platform
self.task_id = task_id
self.analysis_manager = analysis_manager
self.timeout = None
self.is_vnc = False
# Just in case we have an old agent inside the Virtual Machine. This
# allows us to remain backwards compatible (for now).
self.old = OldGuestManager(vmid, ipaddr, platform, task_id)
self.is_old = False
# We maintain the path of the Cuckoo Analyzer on the host.
self.analyzer_path = None
self.environ = {}
self.options = {}
@property
def aux(self):
return self.analysis_manager.aux
def get(self, method, *args, **kwargs):
"""Simple wrapper around requests.get()."""
do_raise = kwargs.pop("do_raise", True)
url = "http://%s:%s%s" % (self.ipaddr, self.port, method)
session = requests.Session()
session.trust_env = False
session.proxies = None
r = session.get(url, *args, **kwargs)
do_raise and r.raise_for_status()
return r
def post(self, method, *args, **kwargs):
"""Simple wrapper around requests.post()."""
url = "http://%s:%s%s" % (self.ipaddr, self.port, method)
session = requests.Session()
session.trust_env = False
session.proxies = None
r = session.post(url, *args, **kwargs)
r.raise_for_status()
return r
def wait_available(self):
"""Wait until the Virtual Machine is available for usage."""
end = time.time() + self.timeout
while db.guest_get_status(self.task_id) == "starting":
try:
socket.create_connection((self.ipaddr, self.port), 1).close()
break
except socket.timeout:
log.debug("%s: not ready yet", self.vmid)
except socket.error:
log.debug("%s: not ready yet", self.vmid)
time.sleep(1)
if time.time() > end:
raise CuckooGuestCriticalTimeout(
"Machine %s: the guest initialization hit the critical "
"timeout, analysis aborted." % self.vmid
)
def query_environ(self):
"""Query the environment of the Agent in the Virtual Machine."""
self.environ = self.get("/environ").json()["environ"]
def determine_analyzer_path(self):
"""Determine the path of the analyzer. Basically creating a temporary
directory in the systemdrive, i.e., C:\\."""
systemdrive = self.determine_system_drive()
options = parse_options(self.options["options"])
if options.get("analpath"):
dirpath = systemdrive + options["analpath"]
r = self.post("/mkdir", data={"dirpath": dirpath})
self.analyzer_path = dirpath
else:
r = self.post("/mkdtemp", data={"dirpath": systemdrive})
self.analyzer_path = r.json()["dirpath"]
def determine_system_drive(self):
if self.platform == "windows":
return "%s/" % self.environ["SYSTEMDRIVE"]
return "/"
def determine_temp_path(self):
if self.platform == "windows":
return self.environ["TEMP"]
return "/tmp"
def upload_analyzer(self, monitor):
"""Upload the analyzer to the Virtual Machine."""
zip_data = analyzer_zipfile(self.platform, monitor)
log.debug(
"Uploading analyzer to guest (id=%s, ip=%s, monitor=%s, size=%d)",
self.vmid, self.ipaddr, monitor, len(zip_data)
)
self.determine_analyzer_path()
data = {
"dirpath": self.analyzer_path,
}
self.post("/extract", files={"zipfile": zip_data}, data=data)
def add_config(self, options):
"""Upload the analysis.conf for this task to the Virtual Machine."""
config = [
"[analysis]",
]
for key, value in options.items():
# Encode datetime objects the way xmlrpc encodes them.
if isinstance(value, datetime.datetime):
config.append("%s = %s" % (key, value.strftime("%Y%m%dT%H:%M:%S")))
else:
config.append("%s = %s" % (key, value))
data = {
"filepath": os.path.join(self.analyzer_path, "analysis.conf"),
}
self.post("/store", files={"file": "\n".join(config)}, data=data)
def start_vnc(self, options, monitor):
"""
Starting VNC on remote machine for remote connections
@param options: the task options
@param monitor: identified of the monitor to be used
"""
# Start VNC server on machine.
data = {
#"command": "C:\\Python27\\pythonw.exe %s\\analyzer.py" % self.analyzer_path,
"command": r"C:\windows\system32\calc.exe",
"async": "yes",
"cwd": self.analyzer_path,
}
self.post("/execute", data=data)
data = {
# "command": "C:\\Python27\\pythonw.exe %s\\analyzer.py" % self.analyzer_path,
"command": r"C:\Program Files\TigerVNC\winvnc4.exe",
"async": "no",
"cwd": self.analyzer_path,
}
self.post("/execute", data=data)
self.get("/kill")
def start_analysis(self, options, monitor):
"""Start the analysis by uploading all required files.
@param options: the task options
@param monitor: identifier of the monitor to be used.
"""
log.info("Starting analysis on guest (id=%s, ip=%s)",
self.vmid, self.ipaddr)
self.options = options
self.timeout = options["timeout"] + config("cuckoo:timeouts:critical")
# Wait for the agent to come alive.
self.wait_available()
# Could be beautified a bit, but basically we have to perform the
# same check here as we did in wait_available().
if db.guest_get_status(self.task_id) != "starting":
return
# Check whether this is the new Agent or the old one (by looking at
# the status code of the index page).
r = self.get("/", do_raise=False)
if r.status_code == 501:
# log.info("Cuckoo 2.0 features a new Agent which is more "
# "feature-rich. It is recommended to make new Virtual "
# "Machines with the new Agent, but for now falling back "
# "to backwards compatibility with the old agent.")
self.is_old = True
self.aux.callback("legacy_agent")
self.old.start_analysis(options, monitor)
return
if r.status_code != 200:
log.critical(
"While trying to determine the Agent version that your VM is "
"running we retrieved an unexpected HTTP status code: %s. If "
"this is a false positive, please report this issue to the "
"Cuckoo Developers. HTTP response headers: %s",
r.status_code, json.dumps(dict(r.headers)),
)
db.guest_set_status(self.task_id, "failed")
return
try:
status = r.json()
version = status.get("version")
features = status.get("features", [])
except:
log.critical(
"We were unable to detect either the Old or New Agent in the "
"Guest VM, are you sure you have set it up correctly? Please "
"go through the documentation once more and otherwise inform "
"the Cuckoo Developers of your issue."
)
db.guest_set_status(self.task_id, "failed")
return
log.info("Guest is running Cuckoo Agent %s (id=%s, ip=%s)",
version, self.vmid, self.ipaddr)
# Pin the Agent to our IP address so that it is not accessible by
# other Virtual Machines etc.
if "pinning" in features:
self.get("/pinning")
# Obtain the environment variables.
self.query_environ()
# Upload the analyzer.
self.upload_analyzer(monitor)
# Pass along the analysis.conf file.
self.add_config(options)
# Allow Auxiliary modules to prepare the Guest.
self.aux.callback("prepare_guest")
# If the target is a file, upload it to the guest.
if options["category"] == "file" or options["category"] == "archive":
data = {
"filepath": os.path.join(
self.determine_temp_path(), options["file_name"]
),
}
files = {
"file": ("sample.bin", open(options["target"], "rb")),
}
self.post("/store", files=files, data=data)
if "execpy" in features:
data = {
"filepath": "%s/analyzer.py" % self.analyzer_path,
"async": "yes",
"cwd": self.analyzer_path,
}
self.post("/execpy", data=data)
else:
# Execute the analyzer that we just uploaded.
data = {
"command": "C:\\Python27\\pythonw.exe %s\\analyzer.py" % self.analyzer_path,
"async": "yes",
"cwd": self.analyzer_path,
}
self.post("/execute", data=data)
def wait_for_completion(self):
if self.is_old:
self.old.wait_for_completion()
return
end = time.time() + self.timeout
while db.guest_get_status(self.task_id) == "running":
log.debug("%s: analysis still processing", self.vmid)
time.sleep(1)
# If the analysis hits the critical timeout, just return straight
# away and try to recover the analysis results from the guest.
if time.time() > end:
log.info("%s: end of analysis reached!", self.vmid)
return
try:
status = self.get("/status", timeout=5).json()
except Exception as e:
log.info("Virtual Machine /status failed (%r)", e)
# this might fail due to timeouts or just temporary network issues
# thus we don't want to abort the analysis just yet and wait for things to
# recover
continue
if status["status"] == "complete":
log.info("%s: analysis completed successfully", self.vmid)
return
elif status["status"] == "exception":
log.warning(
"%s: analysis caught an exception\n%s",
self.vmid, status["description"]
)
return
@property
def server(self):
"""Currently the Physical machine manager is using GuestManager in
an incorrect way. This should be fixed up later but for now this
workaround will do."""
return self.old.server
|
[
"ariel.koren@gmail.com"
] |
ariel.koren@gmail.com
|
c0540f6c40def264c7e6b7c6505f57004630d2f7
|
b3c47795e8b6d95ae5521dcbbb920ab71851a92f
|
/Nowcoder/剑指Offer/矩形覆盖.py
|
472cec999f79e099d22c2612bfd92794514a660a
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
Wizmann/ACM-ICPC
|
6afecd0fd09918c53a2a84c4d22c244de0065710
|
7c30454c49485a794dcc4d1c09daf2f755f9ecc1
|
refs/heads/master
| 2023-07-15T02:46:21.372860
| 2023-07-09T15:30:27
| 2023-07-09T15:30:27
| 3,009,276
| 51
| 23
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
# -*- coding:utf-8 -*-
class Solution:
def __init__(self):
self.dp = {}
def rectCover(self, number):
if number == 0:
return 0
return self.solve(number)
def solve(self, number):
assert number >= 0
if number == 0:
return 1
if number == 1:
return 1
if number in self.dp:
return self.dp[number]
self.dp[number] = self.solve(number - 1) + self.solve(number - 2)
return self.dp[number]
|
[
"noreply@github.com"
] |
Wizmann.noreply@github.com
|
c055c9f040fe6d19593fdb454ed7a5f6213662bf
|
69a306c83b8638be36b013cb0bcfa89a14ffe6b0
|
/genice2/lattices/preparing/12_2_32449.py
|
3dea0104b3a9af9f18f23602a8b1e23da21d161d
|
[
"MIT"
] |
permissive
|
vitroid/GenIce
|
10bda02a05e1042138f2481f301794eb3e870224
|
4162db1ff57af526abf3ab473a5a56300977dd73
|
refs/heads/main
| 2023-08-17T23:14:32.760909
| 2023-08-04T10:24:56
| 2023-08-04T10:24:56
| 37,752,975
| 58
| 23
| null | 2022-03-13T04:05:44
| 2015-06-20T01:03:52
|
Python
|
UTF-8
|
Python
| false
| false
| 915
|
py
|
from genice2.cell import cellvectors
import genice2.lattices
import numpy as np
desc = {
"ref": {
"12_2_32449": "Engel 2018",
"engel09": "Engel 2018"
},
"usage": "No options available.",
"brief": "Hypothetical zeolitic ice",
"test": ({"args": "",
"options": "-r 2 2 2"},)
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.cell = np.array([
[4.906093, -2.166984, -1.768499],
[-3.225085, 3.887306, -2.634777],
[3.832468, 1.696101, 2.81583],
])
self.waters = np.array([
[0.156218, 0.39529, -0.383508],
[-0.178719, -0.439869, 0.267773],
[-0.445277, 0.020649, -0.324495],
[0.431107, -0.051465, 0.202261],
[0.226932, 0.248658, 0.010101],
[-0.242547, -0.282667, -0.134704],
])
self.coord = 'relative'
|
[
"vitroid@gmail.com"
] |
vitroid@gmail.com
|
fcc1d9295eab3144a1ea29ba91e9c212543422d0
|
3614e22372f9f0a7f4c7c9baffc38b3468876432
|
/day2/school.py
|
50765fb4272dbdcaabb61a1798b0897b6d4907df
|
[
"MIT"
] |
permissive
|
jeonghkim/python-washu-2014
|
c94ac6d628e1d0278e792d3abdb05c4e74089e86
|
ef903ebaed96aad5752fbc7d348a17e6c1963d39
|
refs/heads/master
| 2021-01-21T00:38:47.644541
| 2014-08-26T00:03:38
| 2014-08-26T00:03:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,575
|
py
|
class School:
def __init__(self, schoolname): # initialize: it takes the argument "schoolname"
self.schoolname = schoolname # the element schoolname will be the input for the argument schoolname
self.db = {} # the element "db" will be an empty dictionary.
def add(self, student, grade): # add method takes the argument "student" and "grade"
if grade in self.db: # if the input of grade already exists as a key in dict self.db:
self.db[grade].add(student) # it will add the student name to the value for that key
else:
self.db[grade] = {student} # Otherwise, it will add a new key with and the matching value
def grade(self, key): # grade method is to call values corresponding to key
return self.db.get(key, None) # It will return the default value None, if key is not available. Otherwise, it returns a value for a given key
def sort(self): # sort is a method to return a dictionary where values are sorted (and in the form of tuple). It doesn't take any argument
sorted_students = {} # create an empty dictionary
for key in self.db.keys(): #
sorted_students[key] = tuple(sorted(self.db.get(key))) # Add a key to the dict sorted_student and assign matching values in self.db.
# it is sorted and tupled.
return sorted_students
# school = School("Haleakala Hippy School")
# # school.add("James", 2)
# # school.add("Blair", 2)
# # school.add("Paul", 2)
#
# school.add("Jennifer", 4)
# school.add("Kareem", 6)
# school.add("Christopher", 4)
# school.add("Kyle", 3)
# print school.db
# print school.grade(4)
# print school.sort()
|
[
"jhkim0508@gmail.com"
] |
jhkim0508@gmail.com
|
46f94efe29b346ecda0d7dc713057133d2069298
|
0687db020813c1de043ac0af6e463e41b981d300
|
/samtranslator/intrinsics/resolver.py
|
99967a1cc59fcd403de0da743271708d1b2ee502
|
[
"Apache-2.0"
] |
permissive
|
danishbacker/serverless-application-model
|
a1664e26553130e0f96ffc4fb3c3abd3730a8ab4
|
0ead3441b047fa2010a0fd4d05d70326dfe47f9e
|
refs/heads/develop
| 2020-03-15T19:53:49.824509
| 2018-05-03T21:24:35
| 2018-05-03T21:24:35
| 132,319,725
| 0
| 0
|
Apache-2.0
| 2019-03-24T18:54:25
| 2018-05-06T08:20:57
|
Python
|
UTF-8
|
Python
| false
| false
| 9,455
|
py
|
# Help resolve intrinsic functions
from samtranslator.intrinsics.actions import Action, SubAction, RefAction, GetAttAction
# All intrinsics are supported by default
DEFAULT_SUPPORTED_INTRINSICS = {action.intrinsic_name:action() for action in [RefAction, SubAction, GetAttAction]}
class IntrinsicsResolver(object):
def __init__(self, parameters, supported_intrinsics=DEFAULT_SUPPORTED_INTRINSICS):
"""
Instantiate the resolver
:param dict parameters: Map of parameter names to their values
:param dict supported_intrinsics: Dictionary of intrinsic functions this class supports along with the Action class that
can process this intrinsic
:raises TypeError: If parameters or the supported_intrinsics arguments are invalid
"""
if parameters is None or not isinstance(parameters, dict):
raise TypeError("parameters must be a valid dictionary")
if not isinstance(supported_intrinsics, dict) \
or not all([isinstance(value, Action) for value in supported_intrinsics.values()]):
raise TypeError("supported_intrinsics argument must be intrinsic names to corresponding Action classes")
self.supported_intrinsics = supported_intrinsics
self.parameters = parameters
def resolve_parameter_refs(self, input):
"""
Resolves references to parameters within the given dictionary recursively. Other intrinsic functions such as
!GetAtt, !Sub or !Ref to non-parameters will be left untouched.
Result is a dictionary where parameter values are inlined. Don't pass this dictionary directly into
transform's output because it changes the template structure by inlining parameter values.
:param input: Any primitive type (dict, array, string etc) whose values might contain intrinsic functions
:return: A copy of a dictionary with parameter references replaced by actual value.
"""
return self._traverse(input, self.parameters, self._try_resolve_parameter_refs)
def resolve_sam_resource_refs(self, input, supported_resource_refs):
"""
Customers can provide a reference to a "derived" SAM resource such as Alias of a Function or Stage of an API
resource. This method recursively walks the tree, converting all derived references to the real resource name,
if it is present.
Example:
{"Ref": "MyFunction.Alias"} -> {"Ref": "MyFunctionAliasLive"}
This method does not attempt to validate a reference. If it is invalid or non-resolvable, it skips the
occurrence and continues with the rest. It is recommended that you have an external process that detects and
surfaces invalid references.
For first call, it is recommended that `template` is the entire CFN template in order to handle
references in Mapping or Output sections.
:param dict input: CFN template that needs resolution. This method will modify the input
directly resolving references. In subsequent recursions, this will be a fragment of the CFN template.
:param SupportedResourceReferences supported_resource_refs: Object that contains information about the resource
references supported in this SAM template, along with the value they should resolve to.
:return list errors: List of dictionary containing information about invalid reference. Empty list otherwise
"""
return self._traverse(input, supported_resource_refs, self._try_resolve_sam_resource_refs)
def _traverse(self, input, resolution_data, resolver_method):
"""
Driver method that performs the actual traversal of input and calls the appropriate `resolver_method` when
to perform the resolution.
:param input: Any primitive type (dict, array, string etc) whose value might contain an intrinsic function
:param resolution_data: Data that will help with resolution. For example, when resolving parameter references,
this object will contain a dictionary of parameter names and their values.
:param resolver_method: Method that will be called to actually resolve an intrinsic function. This method
is called with the parameters `(input, resolution_data)`.
:return: Modified `input` with intrinsics resolved
"""
# There is data to help with resolution. Skip the traversal altogether
if len(resolution_data) == 0:
return input
#
# Traversal Algorithm:
#
# Imagine the input dictionary/list as a tree. We are doing a Pre-Order tree traversal here where we first
# process the root node before going to its children. Dict and Lists are the only two iterable nodes.
# Everything else is a leaf node.
#
# We do a Pre-Order traversal to handle the case where `input` contains intrinsic function as its only child
# ie. input = {"Ref": "foo}.
#
# We will try to resolve the intrinsics if we can, otherwise return the original input. In some cases, resolving
# an intrinsic will result in a terminal state ie. {"Ref": "foo"} could resolve to a string "bar". In other
# cases, resolving intrinsics is only partial and we might need to continue traversing the tree (ex: Fn::Sub)
# to handle nested intrinsics. All of these cases lend well towards a Pre-Order traversal where we try and
# process the intrinsic, which results in a modified sub-tree to traverse.
#
input = resolver_method(input, resolution_data)
if isinstance(input, dict):
return self._traverse_dict(input, resolution_data, resolver_method)
elif isinstance(input, list):
return self._traverse_list(input, resolution_data, resolver_method)
else:
# We can iterate only over dict or list types. Primitive types are terminals
return input
def _traverse_dict(self, input_dict, resolution_data, resolver_method):
"""
Traverse a dictionary to resolve intrinsic functions on every value
:param input_dict: Input dictionary to traverse
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified dictionary with values resolved
"""
for key, value in input_dict.iteritems():
input_dict[key] = self._traverse(value, resolution_data, resolver_method)
return input_dict
def _traverse_list(self, input_list, resolution_data, resolver_method):
"""
Traverse a list to resolve intrinsic functions on every element
:param input_list: List of input
:param resolution_data: Data that the `resolver_method` needs to operate
:param resolver_method: Method that can actually resolve an intrinsic function, if it detects one
:return: Modified list with intrinsic functions resolved
"""
for index, value in enumerate(input_list):
input_list[index] = self._traverse(value, resolution_data, resolver_method)
return input_list
def _try_resolve_parameter_refs(self, input, parameters):
"""
Try to resolve parameter references on the given input object. The object could be of any type.
If the input is not in the format used by intrinsics (ie. dictionary with one key), input is returned
unmodified. If the single key in dictionary is one of the supported intrinsic function types,
go ahead and try to resolve it.
:param input: Input object to resolve
:param parameters: Parameter values used to for ref substitution
:return:
"""
if not self._is_intrinsic_dict(input):
return input
function_type = input.keys()[0]
return self.supported_intrinsics[function_type].resolve_parameter_refs(input, parameters)
def _try_resolve_sam_resource_refs(self, input, supported_resource_refs):
"""
Try to resolve SAM resource references on the given template. If the given object looks like one of the
supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input
unmodified.
:param dict input: Dictionary that may represent an intrinsic function
:param SupportedResourceReferences supported_resource_refs: Object containing information about available
resource references and the values they resolve to.
:return: Modified input dictionary with references resolved
"""
if not self._is_intrinsic_dict(input):
return input
function_type = input.keys()[0]
return self.supported_intrinsics[function_type].resolve_resource_refs(input, supported_resource_refs)
def _is_intrinsic_dict(self, input):
"""
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
"""
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and input.keys()[0] in self.supported_intrinsics
|
[
"noreply@github.com"
] |
danishbacker.noreply@github.com
|
32d16c4be1951f647216c0e08722a51c8040eb27
|
5319df622cf03ec1a23b41c31a0d5d98969b886a
|
/radish.py
|
4a6f849ceb621733c098e61e24ce1ecf7708754a
|
[] |
no_license
|
erinleeny/the-radish
|
3769b182087cc13700e2b0e42367467126447936
|
8c6c222d65cbb86bdeb91d81b3df6f5ff017fbf4
|
refs/heads/master
| 2022-01-29T12:04:21.166471
| 2019-07-18T18:57:39
| 2019-07-18T18:57:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,649
|
py
|
#!/usr/bin/python3
print('Content-type: text/html\n')
import cgitb
import cgi
import random
cgitb.enable()
page = cgi.FieldStorage()['page'].value
print('''
<!DOCTYPE HTML>
<html>
<head>
<title>The Radish</title>
<link rel="icon" href="radish.png">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous">
<link rel="stylesheet" type="text/css"
href="https://fonts.googleapis.com/css?family=Abril+Fatface">
<link rel="stylesheet" type="text/css"
href="https://fonts.googleapis.com/css?family=Forum">
<link rel="stylesheet" type="text/css"
href="https://fonts.googleapis.com/css?family=Merriweather">
<link rel="stylesheet" type="text/css"
href="https://fonts.googleapis.com/css?family=Fjalla+One">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css" integrity="sha384-PsH8R72JQ3SOdhVi3uxftmaW6Vc51MKb0q5P2rRUpPvrszuE4W1povHYgTpBfshb" crossorigin="anonymous">
<link rel="stylesheet" type="text/css" href="radish.css">
</head>
<body>
<div class = 'flex-container header'><br>
<form action = 'radish.py' method = "GET">
<div class = 'flex-container title' style = 'width: 100%'><center>
<a href = 'home.py'><img src = 'title.png' style = 'height: 75px'></a>
<hr></hr></center>
<div class = 'row flex-container top'>
<div class = 'col-sm-1'>
<h5> <input class = 'link' name = 'page' type = 'submit' value = 'World'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'US'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Politics'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'N.Y.'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Business'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Opinion'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Tech'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Science'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Health'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Sports'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Arts'></h5>
</div>
<div class = 'col-sm-1'>
<h5><input class = 'link' name = 'page' type = 'submit' value = 'Books'></h5>
</div>
</div>
</form>
<hr>
</div>
<div class = 'flex-container'><h2>''' +
page + '''</h2></div><hr>
''')
def web_print(name):
master_ary = []
pic_ary = []
nopic_ary = []
name = name.lower()
if name == "n.y.":
file = 'nyregion'
elif name == 'tech':
file = 'technology'
else:
file = name
s = open('text/' + file + '.txt', encoding='utf-8').read()
s = s.encode('ascii', 'xmlcharrefreplace').decode()
articles = s.split('\n')
for article in articles:
sections = article.split("\t")
if len(sections) > 3:
dict = {}
dict["title"] = sections[0]
dict["description"] = sections[1]
dict["link"] = sections[2]
dict["photo"] = sections[3]
dict["author"] = sections[4]
master_ary.append(dict)
if dict['photo'] == 'NO-IMG':
nopic_ary.append(dict)
else:
pic_ary.append(dict)
return [master_ary, pic_ary, nopic_ary]
arys = web_print(page)
print('''
<div class = 'row' style = 'border-bottom: grey solid thin'>
<div class = 'col-md-8 rightB'>
''')
i = 0
while i < 2:
pic_a = arys[1][i]
photo = pic_a["photo"]
title = pic_a["title"]
description = pic_a["description"]
description = description.split("'")
des = ''
if description[1] != 'NO-DESCRIPTION':
for d in description:
if d != '' and d != "":
des+= d + "<br>"
author = pic_a["author"]
link = pic_a["link"]
# arys[1].pop(i)
print('''<div class = 'right row flex-container'>
<div class = 'col-md-8'>
<img src = ''' + photo +
'''>
</div>
<div class = 'col-md-4'>
<h4><a href = ' ''' + link +
''' '> ''' + title +
'''</a></h4><h5>''' +
des + "<br>"
+ ''' </h5><p>''' + author +
'''</p></div></div>''')
i+=1
print("</div><div class = 'col-md-4'><div class = 'row flex-container'>")
i = 2
arys[0] = arys[1] + arys[2]
while i < 5 and i < len(arys[0]):
ary = arys[0][i]
author = ary["author"]
title = ary["title"]
description = ary["description"]
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''<div class = 'article' style="width: 95%; padding-left: 10px; border-bottom: grey solid thin"><h4 class = 'article'><a href = ' ''' + link +
''' '><br> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br> </div><br><br>")
i +=1
if 5 < len(arys[0]):
ary = arys[0][5]
author = ary["author"]
title = ary["title"]
description = ary["description"]
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''<div class = 'article' style="width: 95%; padding-left: 10px;"><h4 class = 'article'><a href = ' ''' + link +
''' '><br> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br> </div><br><br>")
print('''</div></div></div><div class = 'row flex-container bottom' style="border-bottom: grey solid thin">''')
i = 0
while i < 2 and i < len(arys[1]):
ary = arys[1][random.randrange(len(arys[1]))]
title = ary["title"]
author = ary["author"]
description = ary["description"]
photo = ary['photo']
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''
<div class = 'col-md-6'><div class = 'row flex-container'>
<div class = 'col-md-5'>''' + '''
<h4 class = 'article'><a href = ' ''' + link +
''' '> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br>")
print('''</div><div class = 'col-md-7'><img src =' ''' + photo +
''' '></div></div></div>''')
i+=1
print('''</div> </div</div>
''')
ary = arys[1][random.randrange(len(arys[1]))]
title = ary["title"]
author = ary["author"]
description = ary["description"]
photo = ary['photo']
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''<br>
<div class="flex-container" style="border-bottom: grey solid thin"><img src =' '''
+ photo + ''' '><br><br><h4 class = 'article'><a href = ' ''' + link +
''' '> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br>" +
'''</div>
''')
if len(arys[0]) > 0:
ary = arys[0][random.randrange(len(arys[0]))]
title = ary["title"]
author = ary["author"]
description = ary["description"]
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''<br><div class = 'row flex-container'>
<br><div class="col-md-6 right"><h4 class = 'article'><a href = ' ''' + link +
''' '> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br></div>")
if len(arys[1]) > 0:
ary = arys[1][random.randrange(len(arys[1]))]
title = ary["title"]
author = ary["author"]
description = ary["description"]
photo = ary['photo']
description = description.split("'")
des = ''
if description[1] == 'NO-DESCRIPTION':
des = ''
for d in description:
if d != '' and d != "":
des+= d + "<br>"
link = ary["link"]
print('''<div class="col-md-6"><br><img src =' '''
+ photo + ''' '><br><br><h4 class = 'article'><a href = ' ''' + link +
''' '> ''' + title +
'''</a></h4><h5>''' +
des + "</h5><p>" + author +
"</p><br></div></div>" +
'''
''')
print('''
</body>
</html>
''')
|
[
"erin.lee.ny@gmail.com"
] |
erin.lee.ny@gmail.com
|
83e8030e31ad2c23f459c9b048dbafbf0f63fee1
|
bc441bb06b8948288f110af63feda4e798f30225
|
/monitor_sdk/model/easy_command/task_spec_pb2.py
|
dcb02fe5c9c9e655bdfa977987c035f067083564
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 9,514
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task_spec.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.easy_command import action_pb2 as monitor__sdk_dot_model_dot_easy__command_dot_action__pb2
from monitor_sdk.model.easy_command import target_pb2 as monitor__sdk_dot_model_dot_easy__command_dot_target__pb2
from monitor_sdk.model.easy_command import task_callback_pb2 as monitor__sdk_dot_model_dot_easy__command_dot_task__callback__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='task_spec.proto',
package='easy_command',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_command'),
serialized_pb=_b('\n\x0ftask_spec.proto\x12\x0c\x65\x61sy_command\x1a+monitor_sdk/model/easy_command/action.proto\x1a+monitor_sdk/model/easy_command/target.proto\x1a\x32monitor_sdk/model/easy_command/task_callback.proto\"\xef\x02\n\x08TaskSpec\x12\x0e\n\x06taskId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x11\n\toperation\x18\x04 \x01(\t\x12\x0f\n\x07groupId\x18\x05 \x01(\t\x12%\n\x07\x61\x63tions\x18\x06 \x03(\x0b\x32\x14.easy_command.Action\x12%\n\x07targets\x18\x07 \x03(\x0b\x32\x14.easy_command.Target\x12\r\n\x05\x61ppId\x18\x08 \x01(\t\x12\x11\n\tclusterId\x18\t \x01(\t\x12\x11\n\tpackageId\x18\n \x01(\t\x12\x11\n\tversionId\x18\x0b \x01(\t\x12\x12\n\nneedNotify\x18\x0c \x01(\x08\x12,\n\x08\x63\x61llback\x18\r \x01(\x0b\x32\x1a.easy_command.TaskCallback\x12\x10\n\x08\x62\x61tchNum\x18\x0e \x01(\x05\x12\x15\n\rbatchInterval\x18\x0f \x01(\x05\x12\x12\n\nfailedStop\x18\x10 \x01(\x08\x42HZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_commandb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_easy__command_dot_action__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_easy__command_dot_target__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_easy__command_dot_task__callback__pb2.DESCRIPTOR,])
_TASKSPEC = _descriptor.Descriptor(
name='TaskSpec',
full_name='easy_command.TaskSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='easy_command.TaskSpec.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='easy_command.TaskSpec.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_command.TaskSpec.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation', full_name='easy_command.TaskSpec.operation', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupId', full_name='easy_command.TaskSpec.groupId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='actions', full_name='easy_command.TaskSpec.actions', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targets', full_name='easy_command.TaskSpec.targets', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='easy_command.TaskSpec.appId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterId', full_name='easy_command.TaskSpec.clusterId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_command.TaskSpec.packageId', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_command.TaskSpec.versionId', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='needNotify', full_name='easy_command.TaskSpec.needNotify', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='callback', full_name='easy_command.TaskSpec.callback', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_command.TaskSpec.batchNum', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_command.TaskSpec.batchInterval', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_command.TaskSpec.failedStop', index=15,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=176,
serialized_end=543,
)
_TASKSPEC.fields_by_name['actions'].message_type = monitor__sdk_dot_model_dot_easy__command_dot_action__pb2._ACTION
_TASKSPEC.fields_by_name['targets'].message_type = monitor__sdk_dot_model_dot_easy__command_dot_target__pb2._TARGET
_TASKSPEC.fields_by_name['callback'].message_type = monitor__sdk_dot_model_dot_easy__command_dot_task__callback__pb2._TASKCALLBACK
DESCRIPTOR.message_types_by_name['TaskSpec'] = _TASKSPEC
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TaskSpec = _reflection.GeneratedProtocolMessageType('TaskSpec', (_message.Message,), {
'DESCRIPTOR' : _TASKSPEC,
'__module__' : 'task_spec_pb2'
# @@protoc_insertion_point(class_scope:easy_command.TaskSpec)
})
_sym_db.RegisterMessage(TaskSpec)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
f4e4f4503c9f478bd1926f4eb1748e96798b0abc
|
ddb9f17a4d943123f5e9c48cf02659ad54e968a9
|
/electionSoup_05.py
|
2c5a134d338b247b430b846cbd7f2131323850cb
|
[] |
no_license
|
jotasprout/scrapingElectionResults
|
665bcc76dc5c25bd0302914eeabdf9120bc0fb99
|
3c2e80ee67eb6c1ec4509d4bbfbeeafbef258b61
|
refs/heads/master
| 2021-09-17T06:40:27.550765
| 2018-06-28T18:40:53
| 2018-06-28T18:40:53
| 74,492,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,535
|
py
|
from bs4 import BeautifulSoup
import csv
# Grab local file I downloaded
htmlDoc = open("2016ElectionResultsPresidentPolitico2.htm")
soup = BeautifulSoup(htmlDoc)
# create a text file in which to put leftover soup
f = csv.writer(open("myElectionResults3.csv", "w"))
# Grab just the results table
articles = soup.find_all('article', {'class': 'timeline-group'})
for article in articles:
# Remove crap before state name
stateCrap1 = article.header.h3.a.b
stateCrap1.decompose()
state = article.header.h3.a.contents
f.writerow(state)
# write header row
f.writerow(["Candidate", "Percentage", "Popular", "Electoral College"])
trs = article.find_all('tr')
for tr in trs:
# Get candidate name
candidatex = tr.find('span', {'class': 'name-combo'})
# Remove crap before candidate name
canCrap = candidatex.find_all('span')
for crap in canCrap:
crap.decompose()
candidate = candidatex.contents
# Get popular vote
popularx = tr.find('td', {'class': 'results-popular'})
popular = popularx.contents
# Get percentage of vote
percentagex = tr.find('span', {'class': 'number'})
percentage = percentagex.contents
# Get electoral college vote
electoralCollegex = tr.find('td', {'class': 'delegates-cell'})
try:
electoralCollege = electoralCollegex.contents
except:
continue
f.writerow([candidate,popular,percentage,electoralCollege])
|
[
"jotasprout@gmail.com"
] |
jotasprout@gmail.com
|
c11ce1adc65498d3491e4e59b546206ff74eb43b
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/conpono/cpc/run_cpc.py
|
48d27c2350227bd9c3f6ac1f7daf95683099c8eb
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 18,336
|
py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT next sentence prediction / binary coherence finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from bert import modeling
from bert import optimization
from bert import tokenization
from language.conpono.cpc import model_builder
from six.moves import range
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import tpu as contrib_tpu
from tensorflow.contrib import training as contrib_training
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"train_file", None,
"The input data. Should be in tfrecord format ready to input to BERT.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_integer("num_choices", 32, "Number of negative samples + 1")
flags.DEFINE_bool("add_lv2loss", False, "Whether to use the level 2 loss.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 32, "Total batch size for eval.")
flags.DEFINE_integer("train_data_size", 10000, "The number of examples in the"
"training data")
flags.DEFINE_integer("eval_data_size", -1, "The number of examples in the"
"validation data")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 10000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
_SEP_TOKEN = "[SEP]"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder, num_choices):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
input_file = input_file.split(",")
expanded_files = []
for infile in input_file:
try:
sharded_files = tf.io.gfile.glob(infile)
expanded_files.append(sharded_files)
except tf.errors.OpError:
expanded_files.append(infile)
name_to_features = {}
for i in range(50):
name_to_features["input_ids" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["input_mask" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["segment_ids" + str(i)] = tf.FixedLenFeature([seq_length],
tf.int64)
name_to_features["label_types"] = tf.FixedLenFeature([4], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# total of 32 examples
# 4 labels / shuffled
# random samples from 23 + 23 last for distractors
num_sampled = 14
same_doc_idxs = tf.random.shuffle(tf.range(4, 27))[:num_sampled]
rand_doc_idxs = tf.random.shuffle(tf.range(27, 50))[:num_sampled]
batch_indexes = tf.concat([tf.range(4), same_doc_idxs, rand_doc_idxs],
axis=0)
batch_indexes = tf.random.shuffle(batch_indexes)
# At this point, we have shuffled the indexes and sampled them such that
# we still have the index of 4 targets, 14 sampled from the same doc
# and 14 sampled from different docs. But these are just indexes.
# Here we need to grab the inputs according to the indexes above
# We stack all the inputs so we can gather on the matrix
input_id_stack, input_mask_stack, segment_id_stack = [], [], []
for i in range(50):
input_id_stack.append(example["input_ids" + str(i)])
input_mask_stack.append(example["input_mask" + str(i)])
segment_id_stack.append(example["segment_ids" + str(i)])
input_id_stack = tf.stack(input_id_stack)
input_mask_stack = tf.stack(input_mask_stack)
segment_id_stack = tf.stack(segment_id_stack)
input_ids = tf.gather(input_id_stack, batch_indexes)
input_masks = tf.gather(input_mask_stack, batch_indexes)
segment_ids = tf.gather(segment_id_stack, batch_indexes)
# Note that we override the name of the input (eg. input_ids5)
# So we replace the input with the shuffled and sampled input
# We only set num_choices of them since those will be used.
for i in range(num_choices):
example["input_ids" + str(i)] = input_ids[i]
example["input_mask" + str(i)] = input_masks[i]
example["segment_ids" + str(i)] = segment_ids[i]
# Note that for inputs num_choices-50 will not be used so we must purge them
for i in range(num_choices, 50):
del example["input_ids" + str(i)]
del example["input_mask" + str(i)]
del example["segment_ids" + str(i)]
label_idx = []
for i in range(4):
label_idx.append(tf.where(tf.equal(batch_indexes, tf.constant(i)))[0])
label_idx = tf.reshape(tf.concat(label_idx, axis=0), [-1])
label_idx = tf.scatter_nd(
tf.reshape(example["label_types"], [4, 1]), label_idx, [8])
example["labels"] = label_idx
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
if len(expanded_files) == 1:
d = tf.data.TFRecordDataset(expanded_files[0])
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=256)
else:
dataset_list = [
tf.data.TFRecordDataset(expanded_files[i])
for i in range(len(expanded_files))
]
if is_training:
dataset_list = [d.repeat() for d in dataset_list]
wiki_pct = 0.02222
dset_weights = [wiki_pct, 1 - wiki_pct]
d = tf.data.experimental.sample_from_datasets(dataset_list, dset_weights)
# choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat()
# d = tf.data.experimental.choose_from_datasets(dataset_list,
# choice_dataset)
if is_training:
d = d.shuffle(buffer_size=256)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings, num_choices):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = [features["input_ids" + str(i)] for i in range(num_choices)]
input_mask = [features["input_mask" + str(i)] for i in range(num_choices)]
segment_ids = [features["segment_ids" + str(i)] for i in range(num_choices)]
label_ids = features["labels"]
label_types = features["label_types"]
seq_length = input_ids[0].shape[-1]
input_ids = tf.reshape(tf.stack(input_ids, axis=1), [-1, seq_length])
input_mask = tf.reshape(tf.stack(input_mask, axis=1), [-1, seq_length])
segment_ids = tf.reshape(tf.stack(segment_ids, axis=1), [-1, seq_length])
is_training = (mode == tf_estimator.ModeKeys.TRAIN)
is_real_example = tf.reduce_sum(tf.one_hot(label_types, 8), axis=1)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(total_loss, per_example_loss, logits,
probabilities) = model_builder.create_model(
model, label_ids, label_types,
FLAGS.train_batch_size if is_training else FLAGS.eval_batch_size,
num_choices, use_tpu, FLAGS.add_lv2loss)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf_estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(total_loss, learning_rate,
num_train_steps,
num_warmup_steps, use_tpu)
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf_estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits, is_real_example):
"""Collect metrics for function."""
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
metric_dict = {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
for i in range(8):
metric_dict["acc" + str(i)] = tf.metrics.accuracy(
labels=label_ids[:, i],
predictions=predictions[:, i],
weights=is_real_example[:, i])
return metric_dict
eval_metrics = (metric_fn,
[per_example_loss, label_ids, logits, is_real_example])
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = contrib_tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train`, `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = contrib_cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = contrib_tpu.InputPipelineConfig.PER_HOST_V2
run_config = contrib_tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=contrib_tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
num_train_steps = int(FLAGS.train_data_size / FLAGS.train_batch_size)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu,
num_choices=FLAGS.num_choices)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = contrib_tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=FLAGS.train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
num_choices=FLAGS.num_choices)
estimator.train(input_fn=train_input_fn, steps=num_train_steps)
if FLAGS.do_eval:
# This tells the estimator to run through the entire set.
if FLAGS.eval_data_size < 0:
eval_steps = None
else:
eval_steps = int(FLAGS.eval_data_size / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=FLAGS.eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
num_choices=FLAGS.num_choices)
# checkpoints_iterator blocks until a new checkpoint appears.
for ckpt in contrib_training.checkpoints_iterator(estimator.model_dir):
try:
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
tf.logging.info("********** Eval results:*******\n")
for key in sorted(result.keys()):
tf.logging.info("%s = %s" % (key, str(result[key])))
except tf.errors.NotFoundError:
tf.logging.error("Checkpoint path '%s' no longer exists.", ckpt)
if __name__ == "__main__":
flags.mark_flag_as_required("eval_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
app.run(main)
|
[
"kentonl@google.com"
] |
kentonl@google.com
|
3de801a5cf89ce31e26495438899885b07a98824
|
9170f6cecd25b963254ea456da5cd9c5d2decd23
|
/mode.py
|
95c22b6b2686814884e0625d0925999ee35ac6de
|
[] |
no_license
|
sandeeppunmia/PROJECT-104
|
a1ad8f90846417e95edf8c272081ec37cd6aa122
|
cf44848a6549173eea6a9d65aa082a9071f50c7f
|
refs/heads/main
| 2023-05-07T18:03:03.061990
| 2021-06-01T12:53:46
| 2021-06-01T12:53:46
| 372,829,241
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import csv
import statistics
with open('SOCR-HeightWeight.csv',newline='') as f:
reader= csv.reader(f)
file_data = list(reader)
file_data.pop(0)
new_data=[]
for i in range(len(file_data)):
n_num = file_data[i][2]
new_data.append(n_num)
mode = statistics.mode(new_data)
print('Mode is '+str(mode))
|
[
"noreply@github.com"
] |
sandeeppunmia.noreply@github.com
|
00fae6a272f80eac4199b2bbe400a042ce45afa0
|
99aa48b929961a3e8ac238a1f51ce0b2499af2b7
|
/exercises/dot-dsl/example.py
|
125bf278d07febe3ae509bed675a10ec219d3c07
|
[
"MIT",
"Python-2.0"
] |
permissive
|
JodieHaywood/python-6
|
b4dfbadcddd97d81e8bcb16b315395610f7fb8be
|
b71bfb6f82fb43d49752635d89eab843cb627e4c
|
refs/heads/master
| 2020-03-30T13:24:16.929668
| 2018-10-02T12:45:25
| 2018-10-02T12:45:25
| 151,271,387
| 1
| 0
|
MIT
| 2018-10-02T14:46:03
| 2018-10-02T14:46:02
| null |
UTF-8
|
Python
| false
| false
| 1,521
|
py
|
NODE, EDGE, ATTR = range(3)
class Node(object):
def __init__(self, name, attrs={}):
self.name = name
self.attrs = attrs
def __eq__(self, other):
return self.name == other.name and self.attrs == other.attrs
class Edge(object):
def __init__(self, src, dst, attrs={}):
self.src = src
self.dst = dst
self.attrs = attrs
def __eq__(self, other):
return (self.src == other.src and
self.dst == other.dst and
self.attrs == other.attrs)
class Graph(object):
def __init__(self, data=[]):
self.nodes = []
self.edges = []
self.attrs = {}
if not isinstance(data, list):
raise TypeError("Graph data malformed")
for item in data:
if len(item) < 3:
raise TypeError("Graph item incomplete")
type_ = item[0]
if type_ == ATTR:
if len(item) != 3:
raise ValueError("ATTR malformed")
self.attrs[item[1]] = item[2]
elif type_ == NODE:
if len(item) != 3:
raise ValueError("NODE malformed")
self.nodes.append(Node(item[1], item[2]))
elif type_ == EDGE:
if len(item) != 4:
raise ValueError("EDGE malformed")
self.edges.append(Edge(item[1], item[2], item[3]))
else:
raise ValueError("Unknown item {}".format(item[0]))
|
[
"nathan.parsons@warwick.ac.uk"
] |
nathan.parsons@warwick.ac.uk
|
6132066f7af58efd64ae52142b80a77a2a711ff6
|
f925f8778355445bea1950703e21d804b069eda5
|
/main.py
|
a6fcfca6d383f3021cf7d8c98c067d41cb2f8fac
|
[] |
no_license
|
tanerahmed/play_with_git
|
adc5bf89056542395df6e587fbca1710abdecf6f
|
154000c1599979cfba17bd22d10087743988f50c
|
refs/heads/main
| 2023-07-27T13:03:26.902738
| 2021-09-09T14:41:45
| 2021-09-09T14:41:45
| 383,704,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint.
print('after reset')
print('09.09')
print('09.09 -------- 2222222')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
[
"tanerahmed87@gmail.com"
] |
tanerahmed87@gmail.com
|
8b1df3ff3fc5477fb786efbc0cf04d80a24235f4
|
b45dc624c7bf5eb02da6853a3ae3cfb4fb61e4c4
|
/splash/__init__.py
|
1be2693f75edf80fddc0092ae29605f85e097927
|
[
"BSD-2-Clause"
] |
permissive
|
TrinityComputers/splash
|
c8fbf82c06b77016c32b3fbed3f880a21b5add39
|
f8421cab0594d9417f068f74d45d8fa64c97ae79
|
refs/heads/master
| 2020-12-25T02:20:39.219817
| 2016-02-20T21:33:17
| 2016-02-20T21:33:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
__version__ = '2.0'
from distutils.version import LooseVersion
version_info = tuple(LooseVersion(__version__).version)
__all__ = ['__version__', 'version_info']
|
[
"kmike84@gmail.com"
] |
kmike84@gmail.com
|
8cea7d6e9eb15d5318ed4c600ab7f89ddc82ac29
|
838642b4f7dae35e37198f0932ec713ef755f5b7
|
/solarproject/solarproject/settings.py
|
73607dcfd44b47e01c22a3a23d03d00390839030
|
[] |
no_license
|
guaka/kloud
|
a4d350d7f50898d4fd13689741626623aac38201
|
1d97dd21a90953ffd956e12d9dcb74fc9548c772
|
refs/heads/master
| 2020-05-29T20:52:47.657235
| 2014-03-11T22:12:47
| 2014-03-11T22:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
# Scrapy settings for solarproject project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'solarproject'
SPIDER_MODULES = ['solarproject.spiders']
NEWSPIDER_MODULE = 'solarproject.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'solarproject (+http://www.yourdomain.com)'
|
[
"kasper@guaka.org"
] |
kasper@guaka.org
|
1f941c786360efa674c2f027ec77232546ede5bf
|
5bd9ffad4ce5e0f0f2a7cd0dac51e6abe3470b9c
|
/reassessor/normalizer/gt.py
|
194f4269d38af11bad689102232c0643c1b61511
|
[
"MIT"
] |
permissive
|
SoftSec-KAIST/Reassessor
|
cd3e0cb698289b7c0bcf65efc07617d7aaf5d85f
|
55d40ec4ebb9c13a0e850967bc2ecc79fe16df25
|
refs/heads/main
| 2023-03-07T10:18:39.188463
| 2022-12-12T23:56:15
| 2022-12-12T23:56:15
| 537,323,886
| 23
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,957
|
py
|
import re
import struct
import capstone
import sys
import os
import pickle
import glob, json
from elftools.elf.elffile import ELFFile
from elftools.elf.descriptions import describe_reloc_type
from elftools.elf.relocation import RelocationSection
from collections import defaultdict
from reassessor.lib.types import Program, InstType, LblTy, Label
from reassessor.lib.parser import CompGen
from reassessor.lib.asmfile import AsmFileInfo, LocInfo, AsmInst
class JumpTable:
def __init__(self, entries):
self.entries = entries
self.lengh = len(entries)
self.base = 0
def set_base(self, base):
self.base = base
def get_entries(self):
pass
class CompData:
def __init__(self, entries):
self.entries = entries
self.lengh = len(entries)
self.base = 0
def set_base(self, base):
self.base = base
def get_entries(self):
pass
class FuncInst:
def __init__(self, inst_list, func_info, asm_path):
self.inst_list = inst_list
self.name, self.addr, self.size = func_info
self.asm_path = asm_path
def get_dwarf_loc(filename):
dwarf_loc_map = {}
def process_file(filename):
with open(filename, 'rb') as f:
elffile = ELFFile(f)
if not elffile.has_dwarf_info():
print(' file has no DWARF info')
return
dwarfinfo = elffile.get_dwarf_info()
for CU in dwarfinfo.iter_CUs():
line_program = dwarfinfo.line_program_for_CU(CU)
if line_program is None:
continue
line_entry_mapping(line_program)
def line_entry_mapping(line_program):
lp_entries = line_program.get_entries()
for lpe in lp_entries:
if not lpe.state or lpe.state.file == 0:
continue
filename = lpe_filename(line_program, lpe.state.file)
if lpe.state.address not in dwarf_loc_map.keys():
dwarf_loc_map[lpe.state.address] = set()
dwarf_loc_map[lpe.state.address].add('%s:%d'%(filename, lpe.state.line))
def lpe_filename(line_program, file_index):
lp_header = line_program.header
file_entries = lp_header["file_entry"]
file_entry = file_entries[file_index - 1]
dir_index = file_entry["dir_index"]
if dir_index == 0:
return file_entry.name.decode()
directory = lp_header["include_directory"][dir_index - 1]
return os.path.join(directory, file_entry.name).decode()
process_file(filename)
return dwarf_loc_map
def disasm(prog, cs, addr, length):
offset = addr - prog.text_base
insts = []
for inst in prog.disasm_range(cs, addr, length):
#if not is_semantically_nop(inst):
insts.append(inst)
return insts
def get_reloc_bytesize(rinfo_type):
if 'X86_64_' in rinfo_type and '32' not in rinfo_type:
return 8
else:
return 4
def get_reloc_gotoff(rinfo_type):
if 'GOTOFF' in rinfo_type:
return True
else:
return False
def get_reloc(elf):
relocs = {}
for section in elf.iter_sections():
if not isinstance(section, RelocationSection):
continue
if ( section.name.startswith(".rel") and \
( ("data" in section.name) or \
section.name.endswith(".dyn") or \
section.name.endswith('.init_array') or \
section.name.endswith('.fini_array') ) ) or \
section.name in ['.rela.plt'] or \
section.name in ['.rel.plt']:
for relocation in section.iter_relocations():
addr = relocation['r_offset']
t = describe_reloc_type(relocation['r_info_type'], elf)
sz = get_reloc_bytesize(t)
is_got = get_reloc_gotoff(t)
relocs[addr] = (sz, is_got, t)
return relocs
def get_reloc_symbs(elf, sec_name = '.symtab'):
names = {}
dynsym = elf.get_section_by_name(sec_name)#('.dynsym')
for symb in dynsym.iter_symbols():
if symb['st_shndx'] != 'SHN_UNDEF':
addr = symb['st_value']
name = symb.name
size = symb['st_size']
if addr != 0 and len(name) > 0:
if name in names:
names[name].append((addr, size))
else:
names[name] = [(addr, size)]
return names
class NormalizeGT:
def __init__(self, bin_path, asm_dir, reloc_file='', build_path=''):
self.bin_path = bin_path
self.asm_dir = asm_dir
self.build_path = build_path
self.reloc_file = reloc_file
#self.ex_parser = ATTExParser()
self.collect_loc_candidates()
f = open(self.bin_path, 'rb')
self.elf = ELFFile(f)
if self.elf.get_section_by_name('.got.plt'):
self.got_addr = self.elf.get_section_by_name('.got.plt')['sh_addr']
else:
self.got_addr = self.elf.get_section_by_name('.got')['sh_addr']
if reloc_file:
with open(reloc_file, 'rb') as fp:
reloc_elf = ELFFile(fp)
self.relocs = get_reloc(reloc_elf)
else:
self.relocs = get_reloc(self.elf)
self.symbs = get_reloc_symbs(self.elf)
self.text = self.elf.get_section_by_name(".text")
self.text_base = self.text.header["sh_addr"]
if self.elf['e_machine'] in ('EM_X86_64'):
self.cs = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
else:
self.cs = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
self.cs.detail = True
self.cs.syntax = capstone.CS_OPT_SYNTAX_ATT
disassembly = self.cs.disasm(self.text.data(), self.text_base)
self.comp_gen = CompGen(got_addr = self.got_addr)
self.instructions = {} # address : instruction
for instruction in disassembly:
self.instructions[instruction.address] = instruction
self.instruction_addrs = list(self.instructions.keys())
self.instruction_addrs.sort()
self.prog = Program(self.elf, self.cs, asm_path=asm_dir)
self.match_src_to_bin()
def is_semantically_nop(self, inst):
if isinstance(inst, capstone.CsInsn):
mnemonic = inst.mnemonic
operand_list = inst.op_str.split(', ')
elif isinstance(inst, AsmInst):
mnemonic = inst.opcode
operand_list = inst.operand_list
try:
if mnemonic.startswith("nop"):
return True
if mnemonic[:3] == "lea" and mnemonic != 'leave':
return operand_list[0] == "(" + operand_list[1] + ")"
elif mnemonic[:3] == "mov" and not mnemonic.startswith("movs"):
return operand_list[0] == operand_list[1]
except:
assert False, 'unexpected instruction %s' % ' '.join(operand_list)
return False
def get_section(self, addr):
for section in self.elf.iter_sections():
sec_addr = section['sh_addr']
sec_size = section['sh_size']
if sec_addr <= addr and addr < sec_addr + sec_size:
return section
return None
def get_int(self, addr, sz = 4):
section = self.get_section(addr)
if not section:
return 0
base = section['sh_addr']
offset = addr - base
data = section.data()
data = data[offset:offset + sz]
if sz == 4:
data = data.ljust(4, b'\x00')
return struct.unpack("<I", data)[0]
elif sz == 8:
data = data.ljust(8, b'\x00')
return struct.unpack("<Q", data)[0]
def update_table(self, addr, comp_data, asm_path):
for line, idx in comp_data.members:
directive = line.split()[0]
if directive in ['.long']:
sz = 4
elif directive in ['.quad']:
sz = 8
else:
assert False, 'Unsupported jump table entries'
value = self.get_int(addr, sz)
label_dict = {comp_data.label:comp_data.addr}
data = self.comp_gen.get_data(addr, asm_path, line, idx, value, label_dict)
self.prog.Data[addr] = data
#component = self.comp_gen.get_data_components(line.split()[1], value, label_dict)
#self.prog.Data[addr] = Data(addr, component, asm_path, idx+1, line)
addr += sz
def update_data(self, addr, comp_data, asm_path):
for line, idx in comp_data.members:
directive = line.split()[0]
if directive in ['.long']:
sz = 4
elif directive in ['.quad']:
sz = 8
elif directive in ['.word']:
sz = 2
elif directive in ['.byte']:
sz = 1
elif directive in ['.zero']:
sz = int(line.split()[1])
else:
print(line)
assert False, "unknown data type"
expr = ' '.join(line.split()[1:])
if sz in [4,8] and re.search('.[+|-]', expr):
value = self.get_int(addr, sz)
#if '@GOTOFF' in line:
# value += self.got_addr
data = self.comp_gen.get_data(addr, asm_path, line, idx , value)
self.prog.Data[addr] = data
#component = self.comp_gen.get_data_components(expr, value)
#self.prog.Data[addr] = Data(addr, component, asm_path, idx+1, directive+' '+ expr)
addr += sz
def update_labels(self, func_info, factors, asm_file): #label_dict, jmptbls, factors):
target_addr = factors.value - factors.num
for label in factors.labels:
if label == '_GLOBAL_OFFSET_TABLE_':
continue
if '@GOT' in label and '@GOTPCREL' not in label:
label = label.split('@')[0]
if label in asm_file.composite_data and not asm_file.composite_data[label].addr:
asm_file.composite_data[label].set_addr(target_addr)
if label in asm_file.jmp_dict:
asm_file.jmp_dict[label].set_addr(target_addr)
def get_objdump(self):
temp_file = "/tmp/xx" + self.bin_path.replace('/','_')
os.system("objdump -t -f %s | grep \"F .text\" | sort > %s" % (self.bin_path, temp_file))
funcs = []
with open(temp_file) as fp:
lines = fp.readlines()
for line in lines:
l = line.split()
fname = l[-1]
faddress = int(l[0], 16)
fsize = int(l[4], 16)
try:
#if len(loc_candidates) and fsize > 0:
if self.has_func_assem_file(fname) and fsize > 0:
funcs.append([fname, faddress, fsize])
except:
pass
os.unlink(temp_file)
return funcs
def update_instr(self, func_info):
fname, faddress, fsize = func_info
f_offset = faddress - self.text_base
f_end_offset = f_offset + fsize
dump = self.cs.disasm(self.text.data()[f_offset:f_end_offset], faddress)
for inst in dump:
if inst.address in self.instructions:
break
self.instructions[inst.address] = inst
self.instruction_addrs.append(inst.address)
self.instruction_addrs.sort()
def match_src_to_bin(self):
self.bin2src_dict = {}
self.composite_data = dict()
self.jmp_table_dict = dict()
debug_loc_paths = {}
src_files = {}
#result = {}
self.dwarf_loc = get_dwarf_loc(self.bin_path)
funcs = self.get_objdump() # [funcname, address, size] list
for func_info in funcs:
fname, faddress, fsize = func_info
if '__x86.get_pc_thunk' in fname:
continue
'''
Handle weird padding bytes
'''
if faddress not in self.instructions:
self.update_instr(func_info) #faddress, fsize)
func_code = self.get_func_code(faddress, fsize)
asm_file, addressed_asm_list = self.find_match_func(func_code, func_info)
func_summary = FuncInst(addressed_asm_list, func_info, asm_file.file_path)
self.bin2src_dict[faddress] = func_summary
prev_opcode = ''
for idx, (addr, capstone_insn, asm_token) in enumerate(addressed_asm_list):
if not asm_token:
# nop code might has no relevant assembly code
if prev_opcode in ['jmp', 'jmpq', 'jmpl', 'call', 'callq', 'calll', 'ret', 'retq', 'retl', 'halt', 'ud2']:
next_addr, _, _ = addressed_asm_list[idx+1]
self.prog.aligned_region.update([item for item in range(addr, next_addr)])
self.prog.Instrs[addr] = InstType(addr, asm_file.file_path)
continue
prev_opcode = capstone_insn.mnemonic
instr = self.comp_gen.get_instr(addr, asm_file.file_path, asm_token, capstone_insn)
self.prog.Instrs[addr] = instr
# update labels
if instr.imm and instr.imm.has_label():
self.update_labels(func_summary, instr.imm, asm_file)
if instr.disp and instr.disp.has_label():
self.update_labels(func_summary, instr.disp, asm_file)
text_end = self.text.data_size + self.text_base
prev_end = self.text_base
unknown_region = set()
for faddress in sorted(self.bin2src_dict.keys()):
unknown_region.update(range(prev_end, faddress))
prev_end = faddress + self.bin2src_dict[faddress].size
unknown_region.update(range(prev_end, text_end))
self.prog.unknown_region = unknown_region
def is_semantically_same(self, insn, asm):
if insn.mnemonic[:-1] == asm.opcode:
return True
if insn.mnemonic == asm.opcode[:-1]:
return True
if insn.mnemonic.startswith('rep') and asm.opcode.startswith('rep'):
if insn.mnemonic.split()[1] == asm.opcode.split()[1]:
return True
if insn.group(capstone.CS_GRP_JUMP):
jumps = [
["jo"],
["jno"],
["js"],
["jns"],
["je", "jz"],
["jne", "jnz"],
["jb", "jna", "jc"],
["jnb", "jae", "jnc"],
["jbe", "jna"],
["ja", "jnb"],
["jl", "jng"],
["jge", "jnl"],
["jle", "jng"],
["jg", "jnl"],
["jp", "jpe"],
["jnp", "jpo"],
["jcx", "jec"]
]
for jump in jumps:
if insn.mnemonic in jump and asm.opcode in jump:
return True
else:
opcodes = [
# Mnemonic Alias
["call", "callw"],
["call", "calll"],
["call", "callq"],
["cbw", "cbtw"],
["cwde", "cwtl"],
["cwd", "cwtd"],
["cdq", "cltd"],
["cdqe", "cltq"],
["cqo", "cqto"],
["lret", "lretw"],
["lret", "lretl"],
["leavel", "leave"],
["leaveq", "leave"],
["loopz", "loope"],
["loopnz", "loopne"],
["popf", "popfw"],
["popf", "popfl"],
["popf", "popfq"],
["popfd", "popfl"],
["pushf", "pushfw"],
["pushf", "pushfl"],
["pushf", "pushfq"],
["pushfd", "pushfl"],
["pusha", "pushaw"],
["pusha", "pushal"],
["repe", "rep"],
["repz", "rep"],
["repnz", "repne"],
["ret", "retw"],
["ret", "retl"],
["ret", "retq"],
["salb", "shlb"],
["salw", "shlw"],
["sall", "shll"],
["salq", "shlq"],
["smovb", "movsb"],
["smovw", "movsw"],
["smovl", "movsl"],
["smovq", "movsq"],
["ud2a", "ud2"],
["verrw", "verr"],
["sysret", "sysretl"],
["sysexit", "sysexitl"],
["lgdt", "lgdtw"],
["lgdt", "lgdtl"],
["lgdt", "lgdtq"],
["lidt", "lidtw"],
["lidt", "lidtl"],
["lidt", "lidtq"],
["sgdt", "sgdtw"],
["sgdt", "sgdtl"],
["sgdt", "sgdtq"],
["sidt", "sidtw"],
["sidt", "sidtl"],
["sidt", "sidtq"],
["fcmovz", "fcmove"],
["fcmova", "fcmovnbe"],
["fcmovnae", "fcmovb"],
["fcmovna", "fcmovbe"],
["fcmovae", "fcmovnb"],
["fcomip", "fcompi"],
["fildq", "fildll"],
["fistpq", "fistpll"],
["fisttpq", "fisttpll"],
["fldcww", "fldcw"],
["fnstcww", "fnstcw"],
["fnstsww", "fnstsw"],
["fucomip", "fucompi"],
["fwait", "wait"],
["fxsaveq", "fxsave64"],
["fxrstorq", "fxrstor64"],
["xsaveq", "xsave64"],
["xrstorq", "xrstor64"],
["xsaveoptq", "xsaveopt64"],
["xrstorsq", "xrstors64"],
["xsavecq", "xsavec64"],
["xsavesq", "xsaves64"],
# findings
['shl', 'sal'],
['cmovael', 'cmovnb'],
['cmovbq', 'cmovc'],
['retq', 'rep ret'],
['retl', 'rep ret'],
# assembler optimization
['leaq', 'movq'],
['leal', 'movl'],
]
for opcode in opcodes:
if insn.mnemonic in opcode and asm.opcode in opcode:
return True
if self.check_suffix(insn.mnemonic, asm.opcode):
return True
if insn.mnemonic in ['addq'] and asm.opcode in ['subq']:
if asm.operand_list[0].startswith('$-'):
return True
capstone_bugs = [
['movd', 'movq'],
['cmovaeq', 'cmovnb'],
['cmovaew', 'cmovnb'],
['cmovbl', 'cmovc'],
['cmovael', 'cmovnc'],
['cmovaeq', 'cmovnc'],
]
for opcode in capstone_bugs:
if insn.mnemonic in opcode and asm.opcode in opcode:
return True
return False
def check_suffix(self, opcode1, opcode2):
suffix_list = [('(.*)c$','(.*)b$'), #setc -> setb
('(.*)z$','(.*)e$'), #setz -> sete
('(.*)na$','(.*)be$'), #setna -> setbe
('(.*)nb$','(.*)ae$'), #setnb -> setae
('(.*)nc$','(.*)ae$'), #setnc -> setae
('(.*)ng$','(.*)le$'), #setng -> setle
('(.*)nl$','(.*)ge$'), #setnl -> setge
('(.*)nz$','(.*)ne$'), #setnl -> setge
('(.*)pe$','(.*)p$'), #setpe -> setp
('(.*)po$','(.*)np$'), #setpo -> setnp
('(.*)nae$','(.*)b$'), #setnae -> setb
('(.*)nbe$','(.*)a$'), #setnbe -> seta
('(.*)nge$','(.*)l$'), #setnbe -> seta
('(.*)nle$','(.*)g$')] #setnle -> setg
for (suff1, suff2) in suffix_list:
rex = suff1+'|'+suff2
if re.search(rex, opcode1) and re.search(rex,opcode2):
if re.search(suff1, opcode1): tmp1 = re.findall(suff1, opcode1)[0]
else: tmp1 = re.findall(suff2, opcode1)[0]
if re.search(suff1, opcode2): tmp2 = re.findall(suff1, opcode2)[0]
else: tmp2 = re.findall(suff2, opcode2)[0]
if tmp1 == tmp2:
return True
return False
def assem_addr_map(self, func_code, asm_token_list, candidate_len, debug=False):
addressed_asm_list = []
idx = 0
for bin_asm in func_code:
if idx >= len(asm_token_list):
if self.is_semantically_nop(bin_asm):
addressed_asm_list.append((bin_asm.address, bin_asm, ''))
continue
return []
asm_token = asm_token_list[idx]
if bin_asm.address in self.dwarf_loc:
dwarf_set1 = self.dwarf_loc[bin_asm.address]
dwarf_set2 = set()
while isinstance(asm_token, LocInfo):
dwarf_set2.add( '%s:%d'%(asm_token.path, asm_token.idx))
idx += 1
asm_token = asm_token_list[idx]
#give exception for a first debug info since some debug info is related to prev func
#in case of weak symbols, multiple debug info could be merged.
#ex) {'xercesc/dom/DOMNodeImpl.hpp:271', './xercesc/dom/impl/DOMNodeImpl.hpp:271'}
if dwarf_set2 - dwarf_set1:
#clang might eliminate file path..
new_dwarf_set1 = set()
for debug_str in dwarf_set1:
file_path, no = debug_str.split(':')
file_name = os.path.basename(file_path)
new_dwarf_set1.add('%s:%s'%(file_name, no))
new_dwarf_set2 = set()
for debug_str in dwarf_set2:
file_path, no = debug_str.split(':')
file_name = os.path.basename(file_path)
new_dwarf_set2.add('%s:%s'%(file_name, no))
if new_dwarf_set2 - new_dwarf_set1:
if debug:
pass
return []
if isinstance(asm_token, LocInfo):
# nop code might not have debug info
if self.is_semantically_nop(bin_asm):
addressed_asm_list.append((bin_asm.address, bin_asm, ''))
continue
elif debug:
# some debug info might be omitted
while isinstance(asm_token, LocInfo):
idx += 1
asm_token = asm_token_list[idx]
pass
else:
return []
if self.is_semantically_nop(bin_asm):
#.align might cause nop code
if self.is_semantically_nop(asm_token):
addressed_asm_list.append((bin_asm.address, bin_asm, asm_token))
else:
addressed_asm_list.append((bin_asm.address, bin_asm, ''))
continue
elif asm_token.opcode == bin_asm.mnemonic:
addressed_asm_list.append((bin_asm.address, bin_asm, asm_token))
elif self.is_semantically_same(bin_asm, asm_token):
addressed_asm_list.append((bin_asm.address, bin_asm, asm_token))
else:
if candidate_len > 1:
if debug:
pass
return []
print(bin_asm)
print('%s %s'%(asm_token.opcode, ' '.join(asm_token.operand_list)))
addressed_asm_list.append((bin_asm.address, bin_asm, asm_token))
#return []
#assert False, 'Unexpacted instruction sequence'
idx += 1
if idx < len(asm_token_list):
for idx2 in range(idx, len(asm_token_list)):
if not isinstance(asm_token_list[idx2], LocInfo):
#assert False, 'Unexpacted instruction sequence'
return []
return addressed_asm_list
def find_match_func(self, func_code, func_info):
fname, faddress, fsize = func_info
if not self.has_func_assem_file(fname):
return None
ret = []
candidate_list = self.get_assem_file(fname)
candidate_len = len(candidate_list)
for asm_file in candidate_list:
if os.path.basename(asm_file.file_path) in ['src_sha224sum-md5sum.s']:
if os.path.basename(self.bin_path) in ['sha512sum', 'sha256sum', 'sha384sum']:
continue
if os.path.basename(asm_file.file_path) in ['src_sha256sum-md5sum.s']:
if os.path.basename(self.bin_path) in ['sha512sum', 'sha224sum', 'sha384sum']:
continue
if os.path.basename(asm_file.file_path) in ['src_sha384sum-md5sum.s']:
if os.path.basename(self.bin_path) in ['sha512sum', 'sha224sum', 'sha256sum']:
continue
if os.path.basename(asm_file.file_path) in ['src_sha512sum-md5sum.s']:
if os.path.basename(self.bin_path) in ['sha224sum', 'sha256sum', 'sha384sum']:
continue
if 'usable_st_size' in fname:
'''
grep '^usable_st_size:' coreutils-8.30/x64/clang/nopie/o1-bfd/src/* -A 10 | grep orl
coreutils-8.30/x64/clang/nopie/o1-bfd/src/dd.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/head.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/od.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/shuf.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/split.s- orl in_stat_buf+24(%rip), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/tail.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/truncate.s- orl 24(%rdi), %eax
coreutils-8.30/x64/clang/nopie/o1-bfd/src/wc.s- orl 24(%rdi), %eax
'''
if os.path.basename(asm_file.file_path) in ['dd.s', 'head.s', 'od.s', 'shuf.s', 'tail.s', 'truncate.s', 'wc.s']:
if os.path.basename(self.bin_path) in ['split']:
continue
if os.path.basename(asm_file.file_path) in ['split.s']:
if os.path.basename(self.bin_path) in ['dd', 'head', 'od', 'shuf', 'tail', 'truncate', 'wc']:
continue
#asm_inst_list = [line for line in asm_file.func_dict[fname] if isinstance(line, AsmInst)]
#addressed_asm_list = self.assem_addr_map(func_code, asm_inst_list, candidate_len)
addressed_asm_list = self.assem_addr_map(func_code, asm_file.func_dict[fname], candidate_len)
if not addressed_asm_list:
continue
ret.append((asm_file, addressed_asm_list))
if not ret:
# debug info might be omitted.
# we give some exception to assembly matching.
for asm_file in candidate_list:
addressed_asm_list = self.assem_addr_map(func_code, asm_file.func_dict[fname], candidate_len, True)
if addressed_asm_list:
ret.append((asm_file, addressed_asm_list))
assert len(ret) == 1, 'No matched assembly code'
asm_file, addressed_asm_list = ret[0]
asm_file.visited_func.add(fname)
return asm_file, addressed_asm_list
def get_func_code(self, address, size):
try:
result = []
idx = self.instruction_addrs.index(address)
curr = address
while True:
if curr >= address + size:
break
inst = self.instructions[curr]
result.append(inst)
curr += inst.size
return result
except:
print("Disassembly failed.")
exit()
def get_src_files(self, src_files, loc_candidates):
for loc_path, _ in loc_candidates:
if loc_path not in src_files.keys():
if self.build_path:
loc_path_full = os.path.join(self.build_path, loc_path[1:])
f = open(loc_path_full, errors='ignore')
src_files[loc_path] = f.read()
else:
loc_path_full = os.path.join(self.asm_dir, loc_path[1:])
f = open(loc_path_full, errors='ignore')
src_files[loc_path] = f.read()
return src_files
def get_src_paths(self):
srcs = []
for i in range(20):
t = "*/" * i
srcs += glob.glob(self.asm_dir + t + "*.s")
# give a first priority to a main source code
main_src = '%s/src/%s.s'%(self.asm_dir, os.path.basename(self.bin_path))
if main_src in srcs:
srcs.remove(main_src)
srcs.insert(0, main_src)
return srcs
def has_func_assem_file(self, func_name):
return func_name in self._func_map
def get_assem_file(self, func_name):
ret = []
for asm_path in self._func_map[func_name]:
#ignored referred assembly file
#since local function can be defined twice
# _Z41__static_initialization in 483.xalancbmk
if func_name in self.asm_file_dict[asm_path].visited_func:
pass
else:
ret.append(self.asm_file_dict[asm_path])
return ret
def collect_loc_candidates(self):
srcs = self.get_src_paths()
#result = {}
self._func_map = defaultdict(list)
self.asm_file_dict = dict()
for src in srcs:
asm_file = AsmFileInfo(src)
asm_file.scan()
self.asm_file_dict[src] = asm_file
for func_name in asm_file.func_dict.keys():
self._func_map[func_name].append(src)
def normalize_data(self):
visited_label = []
for asm_path, asm_file in self.asm_file_dict.items():
for label, comp_data in asm_file.composite_data.items():
if comp_data.addr:
self.update_data(comp_data.addr, comp_data, asm_path)
visited_label.append(label)
for asm_path, asm_file in self.asm_file_dict.items():
for label, comp_data in asm_file.composite_data.items():
if not comp_data.addr:
if label in self.symbs and len(self.symbs[label]) == 1 and label not in visited_label:
#if symbol size is zero we ignore it
if self.symbs[label][0][1] == 0:
continue
self.update_data(self.symbs[label][0][0], comp_data, asm_path)
visited_label.append(label)
#else:
# print('unknown comp data %s:%s'%(asm_path, label))
comp_set = set(self.prog.Data.keys())
reloc_set = set(self.relocs)
if comp_set - reloc_set:
print(comp_set - reloc_set)
for asm_path, asm_file in self.asm_file_dict.items():
for label, comp_data in asm_file.jmp_dict.items():
if comp_data.addr:
self.update_table(comp_data.addr, comp_data, asm_path)
visited_label.append(label)
for addr in self.relocs:
if addr in self.prog.Data:
# composite ms || already processed
continue
sz, is_got, r_type = self.relocs[addr]
value = self.get_int(addr, sz)
#This reloc data is added by linker
#if value == 0 and r_type in ['R_X86_64_64']:
# asm_line = '.quad %s'%(r_type)
# pass
#elif value == 0:
# continue
if r_type in ['R_X86_64_COPY', 'R_X86_64_REX_GOTPCRELX', 'R_386_COPY']:
continue
elif r_type in ['R_X86_64_GLOB_DAT', 'R_X86_64_JUMP_SLOT', 'R_386_GLOB_DAT', 'R_386_JUMP_SLOT']:
label = 'L%x'%(value)
asm_line = '.long ' + label
else:
directive = '.long'
if value == 0:
label = r_type
else:
if is_got:
value += self.got_addr
label = 'L%x@GOTOFF'%(value)
else:
label = 'L%x'%(value)
if sz == 8: directive = '.quad'
asm_line = directive + ' ' + label
data = self.comp_gen.get_data(addr, '', asm_line, 0, value, r_type = r_type)
self.prog.Data[addr] = data
def save(self, save_file):
with open(save_file, 'wb') as f:
pickle.dump(self.prog, f)
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='normalize_retro')
parser.add_argument('bin_path', type=str)
parser.add_argument('asm_dir', type=str)
parser.add_argument('save_file', type=str)
parser.add_argument('--reloc', type=str)
parser.add_argument('--build_path', type=str)
args = parser.parse_args()
gt = NormalizeGT(args.bin_path, args.asm_dir, args.reloc, args.build_path)
gt.normalize_data()
gt.save(args.save_file)
|
[
"witbring@kaist.ac.kr"
] |
witbring@kaist.ac.kr
|
d5434e7f51707038dc79b37284a8b24146724f72
|
beeee5695deeb3b21eefcf44b31558746e9bc575
|
/build/ddynamic_reconfigure/catkin_generated/pkg.installspace.context.pc.py
|
aac9221fbfafd02e0785370f727f84f86861035d
|
[] |
no_license
|
mannylazalde/EECS106A
|
47db0728a02db498e77184010f1b59983c5a98a2
|
0ac0de951bdba56eb8634711448677b7c8a73114
|
refs/heads/master
| 2020-09-13T11:48:36.878212
| 2019-12-21T07:03:57
| 2019-12-21T07:03:57
| 222,768,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/cc/ee106a/fa19/class/ee106a-abe/ros_workspaces/project/install/include".split(';') if "/home/cc/ee106a/fa19/class/ee106a-abe/ros_workspaces/project/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;dynamic_reconfigure".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lddynamic_reconfigure".split(';') if "-lddynamic_reconfigure" != "" else []
PROJECT_NAME = "ddynamic_reconfigure"
PROJECT_SPACE_DIR = "/home/cc/ee106a/fa19/class/ee106a-abe/ros_workspaces/project/install"
PROJECT_VERSION = "0.2.1"
|
[
"mannylazalde@berkeley.edu"
] |
mannylazalde@berkeley.edu
|
5c0adca41bca66a94c48640ccd512fc21f71e304
|
d8ce069f4eee6c9a58fb7980b92d7b36f5698733
|
/last_digit_fibo.py
|
5c2524cbb36e5bbf6738da7c2c35578c4a37b743
|
[] |
no_license
|
vaibhavik/LeetCode
|
daf37ba88c3c47d05f7387da980a448d90850681
|
0d7bd671a2a5e00446f3a16d5750c74c6a4e08dd
|
refs/heads/master
| 2020-04-27T20:44:42.599566
| 2019-09-11T07:12:08
| 2019-09-11T07:12:08
| 174,668,790
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
n = input()
f=[]
i=2
f.append(0)
f.append(1)
for i in range(2,n+1):
f.append((f[i-1]+f[i-2])%10)
print f
print f[n]
|
[
"vaibhavikardale@gmail.com"
] |
vaibhavikardale@gmail.com
|
ddb5ec09564108fbfb8bbff984793835df0f096f
|
fea59bd9b97ca20a35315cda3927a261f48b64b4
|
/aws/serverlessjams/backend/verify_token.py
|
85d8c76036bd22436e2c1269fae5a35d8841cf28
|
[
"MIT"
] |
permissive
|
mengwangk/serverless-apps
|
f2eeb5f43c077e44f578a9e208d22d583615d584
|
cd6599c4f932bfceb2021af21ac15ca0d76b56a6
|
refs/heads/master
| 2023-01-23T09:17:23.580205
| 2021-01-18T15:08:46
| 2021-01-18T15:08:46
| 249,721,001
| 1
| 1
|
MIT
| 2023-01-09T22:30:23
| 2020-03-24T13:51:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,558
|
py
|
import os
import json
from six.moves.urllib.request import urlopen
from jose import jwt
AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN")
AUTH0_API_ID = os.environ.get("AUTH0_API_ID")
def verify_token(token):
# Validate the token to make sure it's authentic
jsonurl = urlopen("https://"+AUTH0_DOMAIN+"/.well-known/jwks.json")
jwks = json.loads(jsonurl.read())
# This currently expects the token to have three distinct sections
# each separated by a period.
unverified_header = jwt.get_unverified_header(token)
rsa_key = {}
for key in jwks["keys"]:
if key["kid"] == unverified_header["kid"]:
rsa_key = {
"kty": key["kty"],
"kid": key["kid"],
"use": key["use"],
"n": key["n"],
"e": key["e"]
}
if rsa_key:
try: # to validate the jwt
payload = jwt.decode(
token,
rsa_key,
algorithms=["RS256"],
audience=AUTH0_API_ID,
issuer="https://"+AUTH0_DOMAIN+"/"
)
print("token validated successfully")
return payload
except jwt.ExpiredSignatureError:
print("Token is expired")
raise Exception('Unauthorized')
except jwt.JWTClaimsError:
print("Token has invalid claims")
raise Exception('Unauthorized')
except Exception:
print("Unable to parse token")
raise Exception('Unauthorized')
|
[
"mengwangk@gmail.com"
] |
mengwangk@gmail.com
|
d7dcf1a33e440f7aba2c1ecc218ad4394276baf1
|
9caaf8613ca4f351275bebc0aa2cbe0f3e65beb0
|
/classes.py
|
0eda94d15cf5fa3538eb1040b93f9516eea4ce48
|
[] |
no_license
|
fatimarenderos/Polyglot-Sidequest
|
95e05dd66843d66bb307ec64aa460dd93736a4c8
|
7456adbb3aa12c9b6f675373a759d4eded9349e8
|
refs/heads/main
| 2023-05-12T06:39:35.958044
| 2021-05-29T00:11:41
| 2021-05-29T00:11:41
| 371,844,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,288
|
py
|
from enum import Enum
from abc import ABCMeta, abstractmethod
class indicators(Enum):
NOTHING = 0
class lines(Enum):
NOLINE = 0
SINGLELINE = 1
DOUBLELINE = 2
class modes(Enum):
NOMODE = 0
INT_FLOAT = 1
INT_FLOAT_FLOAT = 2
INT_INT_INT_INT = 3
class parameters(Enum):
THERMAL_CONDUCTIVITY = 0
HEAT_SOURCE = 1
class sizes(Enum):
NODES = 0
ELEMENTS = 1
DIRICHLET = 2
NEUMANN = 3
class Item:
def setId(self, id):
self._id = id
def setX(self, x):
self._x = x
def setY(self, y):
self._y = y
def setNode1(self, nodo1):
self._nodo1 = nodo1
def setNode2(self, nodo2):
self._nodo2 = nodo2
def setNode3(self, nodo3):
self._nodo3 = nodo3
def setValue(self, valores):
self._valores = valores
def getId(self):
return self._id
def getX(self):
return self._x
def getY(self):
return self._y
def getNode1(self):
return self._nodo1
def getNode2(self):
return self._nodo2
def getNode3(self):
return self._nodo3
def getValue(self):
return self._valores
@abstractmethod
def setValues(self, a, b, c, d, e, f, g):
pass
class node(Item):
def setValues(self, a, b, c, d, e, f, g):
self._id = a
self._x = b
self._y = c
class element(Item):
def setValues(self, a, b, c, d, e, f, g):
self._id = a
self._nodo1 = d
self._nodo2 = e
self._nodo3 = f
class condition(Item):
def setValues(self, a, b, c, d, e, f, g):
self._nodo1 = d
self._valores = g
class mesh:
parameters = [2]
sizes = [4]
def setParameters(self, k, Q):
self.parameters.insert(parameters.THERMAL_CONDUCTIVITY.value,k)
self.parameters.insert(parameters.HEAT_SOURCE.value,Q)
def setSizes(self, nnodes, neltos, ndirich, nneu):
self.sizes.insert(sizes.NODES.value, nnodes)
self.sizes.insert(sizes.ELEMENTS.value, neltos)
self.sizes.insert(sizes.DIRICHLET.value, ndirich)
self.sizes.insert(sizes.NEUMANN.value, nneu)
def getSize(self, s):
return self.sizes[s]
def getParameter(self, p):
return self.parameters[p]
def createData(self):
self.node_list = []
self.element_list = []
self.indices_dirich = []
self.dirichlet_list = []
self.neuman_list = []
def getNodes(self):
return self.node_list
def getElements(self):
return self.element_list
def getDirichletIndices(self):
return self.indices_dirich
def getDirichlet(self):
return self.dirichlet_list
def getNeumann(self):
return self.neuman_list
def getNode(self, i):
return self.node_list[i]
def getElement(self, i):
return self.element_list[i]
def getCondition(self, i, type):
if type == sizes.DIRICHLET.value : return self.dirichlet_list[i]
else : return self.neuman_list[i]
|
[
"noreply@github.com"
] |
fatimarenderos.noreply@github.com
|
c2e0c1cfd66bc5b8b7071ef717dd8dbf1b53d813
|
54f438fd29c63aa18bee3319c90b3a4beb98502b
|
/tests/test-minioconfiguration.py
|
8e75e0cbedc2dc8da101b1429c66c1c27864e1f5
|
[
"MIT"
] |
permissive
|
ronnyfriedland/minioclient
|
e949987c8e55f7e67ade30683177ad0b42311064
|
b4a55ed62f1d61f39254a4003ee65b3a778f3526
|
refs/heads/master
| 2020-08-26T19:45:02.041555
| 2020-01-17T19:16:08
| 2020-01-17T19:16:08
| 217,125,583
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 867
|
py
|
import unittest as ut
import os
from config.MinioConfiguration import MinioConfiguration
class TestAuthenticationConfiguration(ut.TestCase):
def test_init(self):
result = MinioConfiguration("config-test.ini")
self.assertIsNotNone(result.config)
def test_has_section(self):
self.assertTrue(MinioConfiguration("config-test.ini").check_config())
def test_read(self):
self.assertEqual(MinioConfiguration("config-test.ini").read_config("url"), "1")
@classmethod
def setUpClass(cls):
test_data = """
[minio]
url=1
accesskey=2
secretkey=3
"""
with open("config-test.ini", "w") as config_file:
config_file.write(test_data)
@classmethod
def tearDownClass(cls):
os.remove("config-test.ini")
if __name__ == '__main__':
ut.main()
|
[
"mail@ronnyfriedland.de"
] |
mail@ronnyfriedland.de
|
53a9111f5f398929fad8cf7a9a0bf02ae6d4ea28
|
189dcd9950284a096e6ced953f5c075bd235308f
|
/py_scripts/COOLH2+H+Hp.py
|
f53cf5f81647b788f5118aa607f502e809ab0116
|
[] |
no_license
|
ANDREMIV/stageM2
|
1ef7144d7ed7463c07b8d74062b4100a78c88f1d
|
6886baefb162ac374b182ca5a9618c02d9abfdf2
|
refs/heads/master
| 2021-04-02T12:28:14.899612
| 2020-06-26T11:48:03
| 2020-06-26T11:48:03
| 248,274,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,436
|
py
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from numpy.random import randn
import os
#In each pop file, you need to manually add number of levels, collisionners in y description, level file name, nbofplots
os.chdir('../')
file = "PD2.txt"
f = open(file, "r")
S=f.read()
A=S.splitlines()
L=len(A)
for i in range(0,L,1):
B=A[i].split('\t')
if B[1] == "1.#INF00e+000":
B[1] = "3.017931e+255"
break
L=i
X=randn(L)
Y=randn(L)
for i in range(0,L,1):
B=A[i].split('\t')
X[i]=float(B[0])
Y[i]=float(B[1])
ax = plt.subplot(111)
plt.ylim(1e-46,1e-31)
plt.xlim(10,200)
plt.xscale("log")
ax.invert_xaxis()
plt.yscale("log")
plt.ylabel('Collisionnal heating-cooling [ $erg.s^{-1}.cm^{-3}$ ]')
plt.xlabel('cosmological doppler shift Z')
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.40)
plt.plot(X,Y,"r",label="Flower and Pineau des Forêt (2000)")
f.close()
FFile= "levelsh2-h-H+-rot"
file = FFile+".txt"
f = open(file, "r")
S=f.read()
A=S.splitlines()
L=len(A)
for i in range(0,L,1):
B=A[i].split('\t')
S=int((L-1)/2)
X=randn(S)
V=randn(S)
YP=randn(S)
YN=randn(S)
Z=randn(S)
WP=randn(S)
WN=randn(S)
i=0
for i in range(2,L,1):
B=A[i].split('\t')
I=int((i-2)/2)
if i%2==0:
X[I]=float(B[0])
YP[I]=float(B[3])*1e7 #convert from J/s/cm^3 to erg/s/cm^3
if YP[I]<0:
YN[I]=-1*YP[I]
YP[I]=0
else:
YN[I]=0
Z[I]=float(B[2])
WP[I]=float(B[4])
V[I]=float(B[5])
if WP[I]<0:
WN[I]=-1*WP[I]
WP[I]=0
else:
WN[I]=0
plt.plot(X,YP,label='RADEX')
#plt.plot(X,YN,label='<0')
file = "expansion2.txt"
f = open(file, "r")
S=f.read()
A=S.splitlines()
L=len(A)
CST=3
S=int((L-CST))
XA=randn(S)
YA=randn(S)
ZA=randn(S)
CST2=10
for i in range(CST,L,1):
B=A[i].split('\t')
I=int(i-CST)
XA[I]=float(B[4])
YA[I]=float(B[CST2-1])*1e7 #convert from J/s/cm^3 to erg/s/cm^3
ZA[I]=
plt.plot(XA,YA,label="lsoda")
f.close()
leg = plt.legend(loc='best', shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.savefig("coolingZ"+FFile+".png")
plt.show()
ax = plt.subplot(111)
plt.ylim(1e-46,1e-31)
plt.xlim(2,500)
plt.xscale("log")
plt.yscale("log")
plt.ylabel('Collisionnal heating-cooling [ $erg.s^{-1}.cm^{-3}$ ]')
plt.xlabel('Kinetic Temperature (z) in K')
plt.plot(Z,YP,label='our model')
#plt.plot(Z,YN,label='<0')
leg = plt.legend(loc='best', shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.grid(b=True, which='major', color='#666666', linestyle='-')
plt.minorticks_on()
plt.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.40)
plt.savefig("coolingT"+FFile+".png")
plt.show()
ax = plt.subplot(111)
plt.ylim(1e-26,1e-15)
plt.xlim(10,200)
plt.xscale("log")
plt.yscale("log")
plt.ylabel('Temperature derivative [K/s]')
plt.xlabel('cosmological doppler shift Z')
plt.plot(X,WP)#,label='>0')
#plt.plot(X,WN,label='<0')
#leg = plt.legend(loc='best', shadow=True, fancybox=True)
#leg.get_frame().set_alpha(0.5)
plt.grid(b=True, which='major', color='#666666', linestyle='-')
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999',axis='both', linestyle='-', alpha=0.40)
plt.savefig("coolingdT"+FFile+".png")
plt.show()
f.close()
|
[
"andremiv@live.fr"
] |
andremiv@live.fr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.