blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6bf4ec8ef32a6af3ec0f6395c735cbf041f4f92
|
0536c34638b838b2998064b4b5f13ffa00a14e43
|
/ass_to_all_two/ass_module_keyword.py
|
d001ee5b3fc2e93bfd304cc30e5eb8b86451a131
|
[] |
no_license
|
ichoukou/git_repository
|
53e319d7df38e052d5509fc0f94efde4f5155f3b
|
5c4200832076960fd5ef047373e903adba0c5616
|
refs/heads/master
| 2020-07-22T02:13:44.806813
| 2017-01-13T04:57:53
| 2017-01-13T04:57:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,133
|
py
|
# -*- coding:utf-8 -*-
import os
import ass_base
import ass_report
import codecs
from ass_module import AssModule
from whoosh.highlight import Formatter,get_text
from whoosh.index import create_in
from whoosh.fields import *
from whoosh.qparser import QueryParser
from whoosh.index import open_dir
from whoosh.analysis import RegexAnalyzer
from whoosh.writing import AsyncWriter
from whoosh.writing import IndexingError
import json
#检测关键词清单
def_keyword = [{'id':'1_5', 'key':'password OR passwd OR passw OR pwd OR pass','name':'可能存在密码直接写在代码中,或者未使用专用密码输入法','cat':'源代码安全'},
{'id':'1_6', 'key':'key or def_keyword','name':'密钥直接写在代码里','cat':'源代码安全'},
{'id':'1_9', 'key':'const-string ','name':'硬编码字符串','cat':'源代码安全'},
{'id':'1_10', 'key':'RSAPublicKey or javax.crypto.Cipher or ENCRYPT( or "MD5" or "RSA" or "SHA" or "MD2" or base64 or DES','name':'没有使用加密算法','not':'1','cat':'源代码安全'},
{'id':'1_9', 'key':'[13000000000 TO 18999999999]','name':'可能存在手机号直接写在代码里','cat':'源代码安全'},
{'id':'2_2', 'key':'http:// OR File','name':'使用了不安全的网络协议','cat':'数据传输安全'},
{'id':'1_11', 'key':'forName','name':'应用了全反射机制','cat':'源代码安全'},
{'id':'0_6', 'key':"'Log.'",'name':'含有调试信息','cat':'数据存储安全'},
{'id':'1_12', 'key':"'insert into ' or 'update ' or delete from",'name':'可能存在直接SQL语句','cat':'源代码安全'},
{'id':'1_8', 'key':"Runtime.getRuntime().exec(\"su\")",'name':'调用root权限','cat':'权限检查'},
{'id':'3_0', 'key':'com.android.phone.PhoneGlobals$NotificationBroadcastReceiver OR engineNextBytes','name':'电话拨打权限绕过漏洞(CVE-2013-6272)','cat':'源代码安全'},
{'id':'1_0', 'key':'PackageManager.GET_SIGNATURES or getCrc()','not':'1','name':'未对自身和系统签名信息进行必要的安全性检查','cat':'自身验证安全'},
#{'key':'System.loadLibrary','name':'直接调用so文件','cat':'源代码安全','condition':'has_so'}
]
class AssFormatter(Formatter):
"""Puts square brackets around the matched terms.
"""
def format_token(self, text, token, replace=False):
# Use the get_text function to get the text corresponding to the
# token
tokentext = get_text(text, token, replace)
# Return the text as you want it to appear in the highlighted
# string
return "%s(*)" % tokentext
class AssKeyword(AssModule):
#从文件读取,设置检测关键词清单
#[json_file 关键词清单]
def set_keyword(self, json_file=''):
if json_file != '':
try:
# self.write_file("new.json", json.dumps(def_keyword))
with open(json_file) as fp:
self.keyword = json.load(fp)
print(type(self.keyword), self.keyword)
except:
print("error of load json")
self.keyword = def_keyword
else:
self.keyword = def_keyword
def init(self, argv):
super(AssKeyword, self).init(argv)
self.apk_index = self.apk_file+".index"
if len(argv)>3:
self.set_keyword(argv[3])
else:
self.set_keyword()
self.abc = []
self.decompile = []
self.condition = {}
return True
#建立源码索引
#[writer 写入文件对象]
def build_src_index(self, writer, ext):
topdir = os.path.join(self.apk_file+"."+ext, ext)
ext_name = "."+ext
ext_len = len(ext_name)
for root, dirs, files in os.walk(topdir, topdown=False):
#hanlde file
if root.find(os.path.join(topdir,"android"))==0:
continue
for name in files:
if name[-ext_len:] == ext_name:
if len(name[:-ext_len])==1 and name[:-ext_len]!='R':
self.abc.append(name)
path = os.path.join(root,name)
disp_path = path.replace(topdir, '')
self.decompile.append(disp_path)
try:
writer.add_document(path=ass_base.b2u(disp_path), content=ass_base.b2u(ass_base.read_file(path)))
except ValueError, Argument:
print "add_document error : ", Argument
# print(path, self.read_file(path))
#建立源码索引写入对象
#[ix 写入文件对象]
def build_index_writer(self, ix):
try:
writer = AsyncWriter(ix)
self.build_src_index(writer, "java")
writer.commit()
except IndexingError as ie:
print ie.message + "index Error!!!"
#建立索引
def build_index(self, force=False):
self.dex2jar(force)
# self.smali(force)
ass_base.rmdir(self.apk_index, force)
#判断文件内容
if not os.path.exists(self.apk_index):
os.mkdir(self.apk_index)
analyzer = RegexAnalyzer(ur"([\u4e00-\u9fa5])|(\w+(\.?\w+)*)")
schema = Schema(path=ID(stored=True), content=TEXT(stored=True, analyzer=analyzer))
ix = create_in(self.apk_index, schema)
self.build_index_writer(ix)
else:
ix = open_dir(self.apk_index)
return ix
#获取源码文件
def get_code_files(self):
out = ''
if len(self.decompile)>10:
out = ','.join(self.decompile[:10])
else:
out = ','.join(self.decompile)
return out+',...'
def run(self):
super(AssKeyword, self).run()
#扫描关键点
self.report.progress("扫描关键词点")
ix = self.build_index(True)
#检查关键信息
if len(self.decompile)>0:
#self.report.addItem(', '.join(self.decompile[:10])+',...', "能被反编译", '源代码安全')
self.report.setItem('1_3', ', '.join(self.decompile) + ',...')
if len(self.abc) < 3:
#self.report.addItem(', '.join(self.decompile[:10])+',...', "没有采用代码混淆技术", '源代码安全')
self.report.setItem('1_2', ', '.join(self.decompile) + ',...')
with ix.searcher() as searcher:
for k in self.keyword:
query = QueryParser("content", ix.schema).parse(k['key'])
results = searcher.search(query, terms=True)
if len(results)>1 :
results.formatter = AssFormatter()
if k.get('condition') != None:
str = results[0]['path']
self.condition[k.get('condition')]=ass_base.u2b(str)
else:
if k.get('not')=='1':
if len(results)==0:
#self.report.addItem("", k['name'])
self.report.setItem(k['id'], u'无')
else:
if len(results)>0:
#self.report.addItem(ass_base.u2b(results[0]['path']+"\n"+results[0].highlights("content")), k['name'], k['cat'])
self.report.setItem(k['id'], ass_base.u2b(results[0]['path']+"\n"+results[0].highlights("content")))
#print(self.condition.items())
ix.close()
self.clean()
if __name__=="__main__":
AssKeyword().main()
|
[
"415787837@qq.com"
] |
415787837@qq.com
|
9e08ed3d8b5fc2a5093ed686b9c5014984327021
|
9652657ce62bdbca2b0578d451ba5cf853a42400
|
/form/redisDB.py
|
0decff328a39fc332904a9ee2de4d16fef7c626e
|
[] |
no_license
|
astange/ExpoProject
|
775ca1785032cfbc858c90dfddde5a1af22daf06
|
fc233802a078728b69e70ced112a94a4163053f4
|
refs/heads/master
| 2016-09-05T11:04:12.349586
| 2014-07-29T05:28:06
| 2014-07-29T05:28:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13
|
py
|
../redisDB.py
|
[
"anthony@Anthonys-Laptop.local"
] |
anthony@Anthonys-Laptop.local
|
08ffa38d648d852c717fc3bd315fe33ff3f51547
|
2a8b5252ab21612e692999a4ca2d7d2e3a1d5143
|
/table.py
|
ae467dd8c4cbaf6b7693b5d680cdf9434c5cc66b
|
[] |
no_license
|
faunic/ideal-enigma
|
777835d77d0148d2fda9a2070127d14a1a870b3e
|
bf55c84f7879fc1c98a68cb37170536788ecdb56
|
refs/heads/main
| 2023-05-07T15:28:48.476205
| 2021-05-25T12:54:34
| 2021-05-25T12:54:34
| 347,642,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W', }
|
[
"noreply@github.com"
] |
noreply@github.com
|
89dd04e8b979a718563ec8de70bc2e2eff652161
|
6380adb492ee95afbbad559e5017af32830512e7
|
/lab-2/PCY.py
|
96abcdc03451b0a23e397d4ad754ecb7a20ec094
|
[] |
no_license
|
square-brackets/avsp-2018
|
743cf258037cb9ab8f2cfa13edd6ae37c36f4540
|
7076f086a5198d04d4367ff4d7396e1c3eb4ac05
|
refs/heads/master
| 2020-04-03T06:38:42.252574
| 2018-10-28T14:38:20
| 2018-10-28T14:38:20
| 155,080,142
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,059
|
py
|
import sys
import time
from itertools import combinations
def count_items(bucket_count):
# start_time = time.time()
item_count = {}
bucket_items = []
for bucket in range(bucket_count):
bucket_items_line = sys.stdin.readline()
bucket_items.append([])
for item in bucket_items_line.split(' '):
item_int = int(item)
item_count[item_int] = item_count.get(item_int, 0) + 1
bucket_items[bucket].append(item_int)
# print('[COUNT_ITEMS]: ', time.time() - start_time)
return item_count, bucket_items
def create_itemsets(buckets_items, itemset_count, item_count, threshold):
# start_time = time.time()
itemsets = [0] * itemset_count;
for bucket_items in buckets_items:
for itemA, itemB in combinations(bucket_items, 2):
if item_count[itemA] >= threshold and item_count[itemB] >= threshold:
k = (itemA * len(item_count) + itemB) % itemset_count
itemsets[k] += 1;
# print('[CREATE_ITEMSETS]: ', time.time() - start_time)
return itemsets
if __name__ == '__main__':
bucket_count = int(sys.stdin.readline())
s = float(sys.stdin.readline())
itemset_count = int(sys.stdin.readline())
threshold = int(s * bucket_count)
item_count, buckets_items = count_items(bucket_count)
itemsets = create_itemsets(buckets_items, itemset_count, item_count, threshold)
# start_time = time.time()
pairs = {}
for bucket_items in buckets_items:
for itemA, itemB in combinations(bucket_items, 2):
if item_count[itemA] >= threshold and item_count[itemB] >= threshold:
k = (itemA * len(item_count) + itemB) % itemset_count
if (itemsets[k] >= threshold):
pairs[(itemA, itemB)] = pairs.get((itemA, itemB), 0) + 1
# print('[COUNT_PAIRS]: ', time.time() - start_time)
print(len(list(filter(lambda x: x > 0, itemsets))))
print(len(pairs))
for s in sorted(pairs, key=pairs.get, reverse=True):
print(pairs[s])
|
[
"stjepan.petrusa@gmail.com"
] |
stjepan.petrusa@gmail.com
|
2f0cb96aaa337f7309712bd930d65de11673c433
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Pytest/pytest-django/pytest_django/plugin.py
|
cbfe15f79cb04f0e152ebe02bc8b4d3886108f5f
|
[
"BSD-3-Clause"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:4b9c174912c01ae59fb496601d8c4ecf26765ee33134d079295304c25873875a
size 26008
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
a81b4c8c52d69ca1c7a214f2eef794aea07a5f65
|
8610b91f0f36e0df7f343c55929e5861bf0eb144
|
/Smart Reply_02_Apr_2018.py
|
9afb0d7cd1992c0664fe01816ecabf33d4ff609d
|
[] |
no_license
|
abhijitdalavi/SmartReply
|
1236aa3e85cee2aeefa1362d7c54f5a9009109fc
|
a0f2c384550b579fc56b86a28412720793210f8d
|
refs/heads/master
| 2020-11-30T01:06:13.369949
| 2018-04-09T17:28:05
| 2018-04-09T17:28:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,750
|
py
|
# coding: utf-8
# In[1]:
from keras.models import Model
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Input, Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import ModelCheckpoint
from keras.utils.vis_utils import plot_model
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.tokenize import TweetTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
#from wordcloud import WordCloud
import gensim
from gensim.models import word2vec
import logging
import tensorflow as tf
from collections import Counter
import nltk
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import urllib.request
import os
import sys
import zipfile
import logging
import pydot
import graphviz
import re
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#import logging
#logger = logging.getLogger("Testing_Model")
#logger.setLevel(logging.INFO)
#https://github.com/chen0040/keras-chatbot-web-api/blob/master/chatbot_train/cornell_word_seq2seq_glove_train.py
# In[11]:
# **********************************************************************
# Reading a pre-trained word embedding and addapting to our vocabulary:
# **********************************************************************
def load_glove():
embeddings_index = {}
#f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
f = open('glove.6B.100d.txt', encoding = 'utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
return embeddings_index
def init_stopwords():
lem = WordNetLemmatizer()
#https://drive.google.com/file/d/0B1yuv8YaUVlZZ1RzMFJmc1ZsQmM/view
# Aphost lookup dict
APPO = {
"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not",
"tryin'":"trying"
}
eng_stopwords = set(stopwords.words("english"))
user_stop_words = []
print("Eng stopwords before user stopwords:: ", len(eng_stopwords))
for w in user_stop_words:
if w not in eng_stopwords:
eng_stopwords.add(w)
print("Eng stopwords after user stopwords:: ",len(eng_stopwords))
return lem, APPO, eng_stopwords
def pre_process(text, lem, APPO, eng_stopwords):
text=text.lower()
text = re.sub("was good.", " ", text)
text = re.sub("was bad.", " ", text)
words = word_tokenize(text)
# (')aphostophe replacement (ie) you're --> you are
words=[APPO[word] if word in APPO else word for word in words]
words=[lem.lemmatize(word, "v") for word in words]
# remove punctuations
words = [w.lower() for w in words if w.isalpha()]
words = [w for w in words if not w in eng_stopwords]
clean_sent = " ".join(words)
return clean_sent
# **********************************************************************
# Developing our vocabulary from the dataset:
# **********************************************************************
def load_vocab(self):
#print(type(inp_clean_corpus))
#sentences = inp_clean_corpus.tolist()
sentences = self.input_texts
print(sentences[1:5])
print(type(sentences))
model = word2vec.Word2Vec(sentences, iter=5, min_count=5, size=100, workers=4)
vocab_size = len(model.wv.vocab)
print(vocab_size)
# get the most common words
print(model.wv.index2word[0:10])
# get the least common words
print(model.wv.index2word[vocab_size - 1:vocab_size-10])
# convert the wv word vectors into a numpy matrix
# embedding_matrix = np.zeros((len(model.wv.vocab), 10))
# for i in range(len(model.wv.vocab)):
# embedding_vector = model.wv[model.wv.index2word[i]]
# if embedding_vector is not None:
# embedding_matrix[i] = embedding_vector
# print(embedding_matrix[0:5])
# convert the wv word vectors into a numpy matrix
embedding_matrix = {}
for i, word in enumerate(model.wv.vocab):
embedding_vector = model.wv[model.wv.index2word[i]]
if embedding_vector is not None:
embedding_matrix[word] = embedding_vector
embeddings_index = {}
#f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
f = open('glove.6B.100d.txt', encoding = 'utf8')
for line in f:
values = line.split()
word = values[0]
if word in ('start','stop'):
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix['start']=embeddings_index['start']
embedding_matrix['stop']=embeddings_index['stop']
return embedding_matrix
# **********************************************************************
# Reading input text and the replies
# **********************************************************************
def read_input():
df = pd.read_csv(DATA, encoding = 'latin-1')
input1 = df['SentimentText'].fillna("")
output1 = df['ResponseText'].fillna("")
print(len(df['SentimentText']))
# pre-processing only the input data
lem, APPO, eng_stopwords = init_stopwords()
inp_clean_corpus = input1.apply(lambda x: pre_process(str(x), lem, APPO, eng_stopwords))
print(type(inp_clean_corpus))
# vocab = load_vocab(inp_clean_corpus)
# some stats about input data
num_words = 5000
tokenize = Tokenizer(num_words=num_words)
tokenize.fit_on_texts(inp_clean_corpus)
tok_inp = tokenize.texts_to_sequences(inp_clean_corpus)
inp_len = [len(words) for words in tok_inp]
mod_tok_inp = int(np.mean(inp_len) + 2 * np.std(inp_len))
print("\nmean:: ", np.mean(inp_len))
print("max:: ", np.max(inp_len))
print("inp token size - average + 2*sd --> :: ", (mod_tok_inp))
print("inp - total inp tokens:: ", len(inp_len))
final_len_inp = [(tok) for tok in inp_len if tok < mod_tok_inp]
final_len_inp = list(final_len_inp)
print("inp - no of tokens < mod_tok :: ", len(final_len_inp))
input1 = inp_clean_corpus.tolist()
output1 = output1.tolist()
target_counter = Counter()
input_texts = []
target_texts = []
print(type(input1))
print("input:: \n", input1[1:5])
print(type(output1))
print("output:: \n", output1[1:5])
for line in input1:
inp_words = [w.lower() for w in nltk.word_tokenize(line)]
if len(inp_words) > MAX_TARGET_SEQ_LENGTH:
inp_words = inp_words[0:MAX_TARGET_SEQ_LENGTH]
input_texts.append(inp_words)
for line1 in output1:
out_words = [w.lower() for w in nltk.word_tokenize(line1)]
if len(out_words) > MAX_TARGET_SEQ_LENGTH:
out_words = out_words[0:MAX_TARGET_SEQ_LENGTH]
tar_words = out_words[:]
tar_words.insert(0, 'start')
tar_words.append('end')
for w in tar_words:
target_counter[w] += 1
target_texts.append(tar_words)
print("\n Input texts :: \n\n ", input_texts[1:5])
print("\n Target texts :: \n\n ",target_texts[1:5])
return input_texts, target_texts, target_counter
def get_target(self):
target_word2idx = dict()
for idx, word in enumerate(self.target_counter.most_common(MAX_VOCAB_SIZE)):
target_word2idx[word[0]] = idx + 1
if 'UNK' not in target_word2idx:
target_word2idx['UNK'] = 0
target_idx2word = dict([(idx, word) for word, idx in target_word2idx.items()])
num_decoder_tokens = len(target_idx2word)+1
input_texts_word2em = []
encoder_max_seq_length = 0
decoder_max_seq_length = 0
for input_words, target_words in zip(self.input_texts, self.target_texts):
encoder_input_wids = []
for w in input_words:
emb = np.zeros(shape=GLOVE_EMBEDDING_SIZE)
if w in self.word2em:
emb = self.word2em[w]
encoder_input_wids.append(emb)
input_texts_word2em.append(encoder_input_wids)
encoder_max_seq_length = max(len(encoder_input_wids), encoder_max_seq_length)
decoder_max_seq_length = max(len(target_words), decoder_max_seq_length)
#print("input_texts_word2em for first 2 sentenses:: \n", input_texts_word2em[1:3])
context = dict()
context['num_decoder_tokens'] = num_decoder_tokens
context['encoder_max_seq_length'] = encoder_max_seq_length
context['decoder_max_seq_length'] = decoder_max_seq_length
return target_word2idx, target_idx2word, context, input_texts_word2em
def generate_batch(input_word2em_data, output_text_data, self):
num_batches = len(input_word2em_data) // BATCH_SIZE
print("context:: \n", self.context)
print("len of input data :: ", len(input_word2em_data))
print("num of batches :: ", num_batches)
while True:
for batchIdx in range(0, num_batches):
start = batchIdx * BATCH_SIZE
end = (batchIdx + 1) * BATCH_SIZE
encoder_input_data_batch = pad_sequences(input_word2em_data[start:end], self.context['encoder_max_seq_length'])
decoder_target_data_batch = np.zeros(shape=(BATCH_SIZE, self.context['decoder_max_seq_length'], self.num_decoder_tokens))
decoder_input_data_batch = np.zeros(shape=(BATCH_SIZE, self.context['decoder_max_seq_length'], GLOVE_EMBEDDING_SIZE))
for lineIdx, target_words in enumerate(output_text_data[start:end]):
for idx, w in enumerate(target_words):
w2idx = self.target_word2idx['UNK'] # default UNK
if w in self.target_word2idx:
w2idx = self.target_word2idx[w]
if w in self.word2em:
decoder_input_data_batch[lineIdx, idx, :] = self.word2em[w]
if idx > 0:
decoder_target_data_batch[lineIdx, idx - 1, w2idx] = 1
yield [encoder_input_data_batch, decoder_input_data_batch], decoder_target_data_batch
class CornellWordGloveChatBot(object):
model = None
encoder_model = None
decoder_model = None
target_counter = None
target_word2idx = None
target_idx2word = None
max_decoder_seq_length = None
max_encoder_seq_length = None
num_decoder_tokens = None
word2em = None
context = None
input_texts = None
target_texts = None
def __init__(self):
#self.word2em = load_glove()
self.input_texts, self.target_texts, self.target_counter = read_input()
print("input texts:: \n", self.input_texts[0:5])
self.word2em = load_vocab(self)
print("Length of word2em :: ", len(self.word2em))
for idx, (input_words, target_words) in enumerate(zip(self.input_texts, self.target_texts)):
if idx > 10:
break
print([input_words, target_words])
self.target_word2idx, self.target_idx2word , self.context, input_texts_word2em = get_target(self)
self.max_encoder_seq_length = self.context['encoder_max_seq_length']
self.max_decoder_seq_length = self.context['decoder_max_seq_length']
self.num_decoder_tokens = self.context['num_decoder_tokens']
print("context: ",self.context)
encoder_inputs = Input(shape=(None, GLOVE_EMBEDDING_SIZE), name='encoder_inputs')
encoder_lstm1 = LSTM(units=HIDDEN_UNITS, return_state=True, name="encoder_lstm1" , dropout=0.2)
#logger.info("Added LSTM Layer")
#encoder_lstm2 = LSTM(units=HIDDEN_UNITS, return_state=True, name="encoder_lstm2", dropout=0.2)
#encoder_lstm3 = LSTM(units=HIDDEN_UNITS, return_state=True, name="encoder_lstm3")
#x = encoder_lstm1(encoder_inputs)
encoder_outputs, encoder_state_h, encoder_state_c = encoder_lstm1(encoder_inputs)
encoder_states = [encoder_state_h, encoder_state_c]
decoder_inputs = Input(shape=(None, GLOVE_EMBEDDING_SIZE), name='decoder_inputs')
decoder_lstm = LSTM(units=HIDDEN_UNITS, return_sequences=True, return_state=True, name='decoder_lstm', dropout=0.2)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)
decoder_dense = Dense(self.num_decoder_tokens, activation='softmax', name='decoder_dense')
decoder_outputs = decoder_dense(decoder_outputs)
self.model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
print(self.model.summary())
#plot_model(self.model, to_file='RNN_model.png', show_shapes=True)
#self.model.load_weights('../chatbot_train/models/' + DATA_SET_NAME + '/word-glove-weights.h5')
self.model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
Xtrain, Xtest, Ytrain, Ytest = train_test_split(input_texts_word2em, self.target_texts, test_size=0.2, random_state=42)
print("Length of train data:: ", len(Xtrain))
print("Length of test data:: ", len(Xtest))
train_gen = generate_batch(Xtrain, Ytrain, self)
test_gen = generate_batch(Xtest, Ytest, self)
train_num_batches = len(Xtrain) // BATCH_SIZE
test_num_batches = len(Xtest) // BATCH_SIZE
#checkpoint = ModelCheckpoint(filepath=WEIGHT_FILE_PATH, save_best_only=True)
self.model.fit_generator(generator=train_gen, steps_per_epoch=train_num_batches,
epochs=NUM_EPOCHS,
verbose=1, validation_data=test_gen, validation_steps=test_num_batches ) #, callbacks=[checkpoint])
self.model.save_weights(WEIGHT_FILE_PATH)
self.encoder_model = Model(encoder_inputs, encoder_states)
#plot_model(self.encoder_model, to_file='RNN_encoder_model.png', show_shapes=True)
decoder_state_inputs = [Input(shape=(HIDDEN_UNITS,)), Input(shape=(HIDDEN_UNITS,))]
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
self.decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
#plot_model(self.decoder_model, to_file='RNN_decoder_model.png', show_shapes=True)
def reply(self, input_text):
input_seq = []
input_emb = []
print("input text:: \n\n ", input_text)
# pre-processing only the input data
lem, APPO, eng_stopwords = init_stopwords()
clean_input_text = pre_process(str(input_text), lem, APPO, eng_stopwords)
for word in nltk.word_tokenize(clean_input_text.lower()):
emb = np.zeros(shape=GLOVE_EMBEDDING_SIZE)
if word in self.word2em:
emb = self.word2em[word]
input_emb.append(emb)
input_seq.append(input_emb)
input_seq = pad_sequences(input_seq, self.max_encoder_seq_length)
states_value = self.encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, GLOVE_EMBEDDING_SIZE))
target_seq[0, 0, :] = self.word2em['start']
target_text = ''
target_text_len = 0
terminated = False
while not terminated:
output_tokens, h, c = self.decoder_model.predict([target_seq] + states_value)
#print("output tokens shape :: \n\n ", output_tokens.shape)
sample_token_idx = np.argmax(output_tokens[0, -1, :])
sample_word = self.target_idx2word[sample_token_idx]
target_text_len += 1
if sample_word != 'start' and sample_word != 'end':
#print("sample word :: ", sample_word)
target_text += ' ' + sample_word
if sample_word == 'end' or target_text_len >= self.max_decoder_seq_length:
terminated = True
target_seq = np.zeros((1, 1, GLOVE_EMBEDDING_SIZE))
if sample_word in self.word2em:
target_seq[0, 0, :] = self.word2em[sample_word]
states_value = [h, c]
return target_text.strip()
def test_run(self):
print(self.reply('Not so good experience. Washroom was not cleaned properly and room service was not quick to resond.'))
print(self.reply('Hotel was ok. Food was good and staff was very cooperative in providing services.'))
print(self.reply('I loved the environment of the hotel !!!. It was great living there '))
def main():
np.random.seed(42)
model = CornellWordGloveChatBot()
model.test_run()
if __name__ == '__main__':
MAX_VOCAB_SIZE = 10000
BATCH_SIZE = 32
NUM_EPOCHS = 100
GLOVE_EMBEDDING_SIZE = 100
HIDDEN_UNITS = 32
MAX_INPUT_SEQ_LENGTH = 150
MAX_TARGET_SEQ_LENGTH = 150
DATA_SET_NAME = 'cornell'
DATA = 'D:/CBA/PositiveOnly.csv'
DATA_PATH = 'movie_lines_cleaned_10k.txt'
WHITELIST = 'abcdefghijklmnopqrstuvwxyz1234567890?.,'
WEIGHT_FILE_PATH = 'D:/CBA/word-glove-weights.h5'
main()
|
[
"asksonu.sunil@gmail.com"
] |
asksonu.sunil@gmail.com
|
bb5d93cb6f7562c1613483c8b5d2df9e1d874fac
|
46192947e8a87bdb5ddf4de92f832f8c2d53bccb
|
/REST API/api_project/testpython.py
|
ca9eb6794f25f1aa41ba61c45ca544892381354a
|
[] |
no_license
|
mkansari31/Rest_API
|
216a060330923c75e003a2b9d75071bc717a60be
|
b0325c907e3d8dd93f5711c274f57a0592efb912
|
refs/heads/master
| 2023-01-31T00:10:11.129491
| 2020-12-14T09:47:39
| 2020-12-14T09:47:39
| 321,298,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
from flask import Flask, render_template, request
from flask_mysqldb import MySQL
app = Flask(__name__)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'MyDB'
app.config['DEBUG'] = True
mysql = MySQL(app)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == "POST":
details = request.form
firstName = details['fname']
lastName = details['lname']
cur = mysql.connection.cursor()
cur.execute("INSERT INTO MyUsers(firstName, lastName) VALUES (%s, %s)", (firstName, lastName))
mysql.connection.commit()
cur.close()
return 'success'
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
[
"mkansari515@gmail.com"
] |
mkansari515@gmail.com
|
3982714fe0bab687a1e4f4ee2cab7d5183777153
|
d846460c9fcb294fb88a68c14f081013466f47d5
|
/matplotlib.py
|
b2af1a9ac17bdec4142f724cb00879ebde715844
|
[] |
no_license
|
divyajaincs/Python-Practice-
|
52d5dc0a7d998d7476fe332e723dee7b89b182cb
|
679df72102555a7f1afe055c1257aca2cfada0f3
|
refs/heads/master
| 2021-04-12T10:07:28.868872
| 2018-03-25T19:32:14
| 2018-03-25T19:32:14
| 126,732,330
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
import numpy as np
from matplotlib import pyplot as plt
x=np.arange(0,3*np.pi,0.1)
y=np.tan(x)
plt.plot(x,y)
plt.show()
|
[
"divyajnvh@gmail.com"
] |
divyajnvh@gmail.com
|
2fae047ea5b7af3cba687716d80fa7aab18a4d0a
|
4d259f441632f5c45b94e8d816fc31a4f022af3c
|
/date/tt.py
|
9f4bc74af2f7f817b5cc2a96f52b570bd76401f0
|
[] |
no_license
|
xiaoruiguo/lab
|
c37224fd4eb604aa2b39fe18ba64e93b7159a1eb
|
ec99f51b498244c414b025d7dae91fdad2f8ef46
|
refs/heads/master
| 2020-05-25T01:37:42.070770
| 2016-05-16T23:24:26
| 2016-05-16T23:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
a=['1','2','3']
s = ['sss'*3]+a
print s
|
[
"junmein@junmeinde-macbook-pro-3.local"
] |
junmein@junmeinde-macbook-pro-3.local
|
74c6a926ed7a08e4d04f9fed809bdbf9a1a4c9b6
|
f0f9e59b3b62a89b1e9cf3f02627d80be3359711
|
/webpersonal/core/views.py
|
8cc05b98cc068c425cdf03633c4553a6812fef81
|
[] |
no_license
|
gemamoreira/web-personal-django3
|
372d500bc9dd16768ef0223dcfb68d4eb1e6498c
|
3177db3bdaa7478f979e1f1d071904faef693035
|
refs/heads/main
| 2023-01-01T03:52:34.047986
| 2020-10-13T05:27:27
| 2020-10-13T05:27:27
| 291,835,070
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from django.shortcuts import render, HttpResponse
html_base = """
<h1>Mi web Personal</h1>
<ul>
<li><a href="/">Portada</a></li>
<li><a href="/about/">Acerca de...</a></li>
<li><a href="/portfolio/">Portafolio</a></li>
<li><a href="/contact/">Contacto</a></li>
</ul>
"""
# Create your views here.
def home(request):
return render(request, "core/home.html")
def about (request):
return render(request, "core/about.html")
def contact (request):
return render(request, "core/contact.html")
|
[
"glmoremu20@gmail.com"
] |
glmoremu20@gmail.com
|
62b6273166486acf1ece5437a98e41a0350b1124
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/verbs/_celebrating.py
|
305a78d8f0d008577d0f029e5a82a8910f663133
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 261
|
py
|
from xai.brain.wordbase.verbs._celebrate import _CELEBRATE
#calss header
class _CELEBRATING(_CELEBRATE, ):
def __init__(self,):
_CELEBRATE.__init__(self)
self.name = "CELEBRATING"
self.specie = 'verbs'
self.basic = "celebrate"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ec67fcd528e10f1d86e9a1a620b455fc3c212ba6
|
10f57edc51f50742625b405f2f7c37cdd4734700
|
/app/recipe/urls.py
|
034085c433bce22da70cdc48e8a58fb0632aba75
|
[
"MIT"
] |
permissive
|
smkempin/recipe-app-api
|
5eea18b812bf822c292db8b66499ac1020dd886c
|
453223e68616ff092964c8414ede5cccc2e38351
|
refs/heads/master
| 2021-01-26T05:17:09.742683
| 2020-03-30T13:13:26
| 2020-03-30T13:13:26
| 243,324,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from recipe import views
router = DefaultRouter()
router.register('tags', views.TagViewSet)
router.register('ingredients', views.IngredientViewSet)
router.register('recipes', views.RecipeViewSet)
app_name = 'recipe'
urlpatterns=[
path('', include(router.urls))
]
|
[
"scott@precisionwre.com"
] |
scott@precisionwre.com
|
f4506a41f21652bd250f6896810cd6fbdec72bfb
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03042/s013075072.py
|
044f87c3be49952ef7be8bf867e28108c9b4cd05
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 186
|
py
|
s=int(input())
a=s//100
b=s%100
if a>0 and a<=12:
if b>0 and b<=12:
print("AMBIGUOUS")
else:
print("MMYY")
else:
if b>0 and b<=12:
print("YYMM")
else:
print("NA")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
babd1c65ab124645809ec009676a41dc323194be
|
935a4ebc2cb54553c9d51e1fcd885d5d5a993218
|
/fulfillment/fulfillment.py
|
1359858cc480454ef0e9f5eea67947f74b7cce36
|
[
"MIT"
] |
permissive
|
ahmetyazar/adj-demo
|
c03a6a481470c0548294e448a158b370e79cd6a0
|
9f7c48faa65d951c040dcfed9904e2186415221a
|
refs/heads/master
| 2021-01-06T20:34:25.652447
| 2017-08-11T02:45:21
| 2017-08-11T02:45:21
| 99,522,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,462
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 30 15:28:00 2017
@author: yazar
"""
import sys
import logging
import rds_config
import pymysql
import datetime
import json
import requests
# rds settings
rds_host = "creditadjudication.cz3lpdttkjpo.us-east-1.rds.amazonaws.com"
name = rds_config.db_username
password = rds_config.db_password
db_name = rds_config.db_name
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(rds_host, user=name, passwd=password, db=db_name,
connect_timeout=5, cursorclass=pymysql.cursors.DictCursor)
except:
logger.error("ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def datetime_handler(x):
if isinstance(x, datetime.datetime):
return x.isoformat()
raise TypeError("Unknown type")
json.JSONEncoder.default = datetime_handler
def addBlockedCustomers(event, context):
"""
This function fetches content from mysql RDS instance
"""
logger.info('#################')
logger.info(event)
logger.info('#################')
message = event['Records'][0]['Sns']['Message']
logger.info('From SNS: ' + message)
message = json.loads(message)
sql = "insert into FulfillmentDb.BlockedCustomers (partyID, effectiveDate) "
sql += "values ({0}, NOW())".format(message['partyID'])
logger.info(sql)
with conn.cursor() as cur:
# create Customers db and load some sample records
cur.execute(sql)
conn.commit()
cur.execute("select * from FulfillmentDb.BlockedCustomers")
item_count = 0
for row in cur:
item_count += 1
logger.info(row)
logger.info("Added %d items from RDS MySQL table".format(item_count))
conn.commit()
return {
'statusCode': 200,
'headers': {'Content-Type': 'application/json'},
'body': json.dumps(event)
}
def checkPotentialFraud(event, context):
logger.info('#################')
logger.info(event)
logger.info('#################')
# check if there is a fraud
sql = "select * from FulfillmentDb.PotentialFraud where partyID = {}".format(event['partyID'])
def processCreditLimit(event, context):
logger.info('#################')
logger.info(event)
logger.info('#################')
|
[
"ahmetyazar@yahoo.com"
] |
ahmetyazar@yahoo.com
|
fa56238a5d5fa13f842dcbd60a00c830bce51d0a
|
dcdb7a05d52cd1f9d558a70570b3ecbd85cefbe6
|
/dj_blog/settings.py
|
afa71345b7ec029e8bcda1221ebdb1f056053ba3
|
[] |
no_license
|
GDCenter/blog_django
|
ea7a9a556292b212a6d5a2de3d02f7b1e9002871
|
5cb12f630618bb49bd955bcc9072339ff3a01387
|
refs/heads/master
| 2020-09-09T15:51:49.918515
| 2018-05-11T09:21:05
| 2018-05-11T09:21:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,064
|
py
|
"""
Django settings for dj_blog project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%7&gfs!u_s4ci@5o2*j6tv^$*ju_2ds7-83o#$s#423(4+y4ds'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 添加apps
'apps.blog_article',
'apps.blog_console',
'apps.blog_index',
'apps.blog_sign',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'dj_blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# 使用media文件夹
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'dj_blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blog_django',
'HOST': '106.14.1.9',
'PORT': 3306,
'USER': 'liduo',
'PASSWORD': '624695549'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'zh-hans' # 使用中文
TIME_ZONE = 'Asia/Shanghai' # 设定时区
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# 添加静态文件路径
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'statics')
# 设置media文件夹url
MEDIA_URL = '/media/'
# 添加media文件夹
MEDIA_DIR = os.path.join(BASE_DIR, 'media')
MEDIA_ROOT = MEDIA_DIR
####################################################
# 自定义User模型类设置
# 继承重写User模型类时,指定自定义的模型类,'app名字.类名'
AUTH_USER_MODEL = 'blog_sign.User'
####################################################
# login_required装饰器设置
# 配置登录界面,供登录验证装饰器login_required使用
LOGIN_URL = '/sign/login'
####################################################
# 邮件设置
# 发送邮件配置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# 使用ssl加密方式
EMAIL_USE_SSL = True
# smpt服务地址
EMAIL_HOST = 'smtp.163.com'
# 使用ssl的smtp端口465,非ssl端口25
EMAIL_PORT = 465
# 发送邮件的邮箱
EMAIL_HOST_USER = 'liduo945@163.com'
# 在邮箱中设置的客户端授权密码
EMAIL_HOST_PASSWORD = 'liduo945163'
# 收件人看到的发件人
EMAIL_FROM = 'LeeBlog<liduo945@163.com>'
####################################################
# Redis缓存设置
# Django的缓存配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://:624695549@106.14.1.9:6379/9",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 配置session存储
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
####################################################
# FastDFS分布式文件存储系统设置
# 设置Django的文件存储类
DEFAULT_FILE_STORAGE = 'utils.fdfs.storage.FDFSStorage'
# 设置fdfs使用的client.conf文件路径
FDFS_CLIENT_CONF = os.path.join(BASE_DIR, 'dj_blog/client.conf')
# 设置fdfs存储服务器上nginx的IP和端口号
FDFS_URL = 'http://106.14.1.9/'
####################################################
|
[
"liduo945@163.com"
] |
liduo945@163.com
|
2339baadffae6ec1d43540f2b5a2f88e5c5dddd0
|
4d74a14506b95289084379d85d09e0da020ba951
|
/condet/apps.py
|
de69d6066b4c6e98a8927d9908d1d593112724ce
|
[] |
no_license
|
eduardozamudio/unam-tourism-research
|
6213e2763010cee57a85c2aa755f6e978cc56efe
|
3a307877b610cb250ba142d34933261d657d837a
|
refs/heads/master
| 2022-12-11T16:31:30.005504
| 2017-10-26T11:39:02
| 2017-10-26T11:39:02
| 102,898,354
| 0
| 0
| null | 2022-12-08T00:36:55
| 2017-09-08T19:42:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 87
|
py
|
from django.apps import AppConfig
class CondetConfig(AppConfig):
name = 'condet'
|
[
"eduardozamudio@gmail.com"
] |
eduardozamudio@gmail.com
|
4c6318615664d63994ac62eccd6557cda896e2a9
|
2476d1dbac904aed784cc51550f74febe3b783c4
|
/farmer/config/__init__.py
|
f5de67a172870543bf9b9c7b151f084a878d6972
|
[] |
no_license
|
9231058/farmer
|
ccdbae0cc168adfa84bda016d87809ee5797c554
|
83904d86e30ab31622eaa0d4534041a4972bf842
|
refs/heads/master
| 2022-11-08T04:57:54.166269
| 2020-06-18T21:21:39
| 2020-06-18T21:21:39
| 266,701,420
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 58
|
py
|
from .config import Config, RequiredSeed, NonrequiredSeed
|
[
"parham.alvani@gmail.com"
] |
parham.alvani@gmail.com
|
1731a6bc44fffbafb6437d4bb39a9bb76acfeb29
|
45c170fb0673deece06f3055979ece25c3210380
|
/toontown/coghq/BossbotCountryClubMazeRoom_Battle00.py
|
218b80966c9553066709cc1c2f781554cc97b785
|
[] |
no_license
|
MTTPAM/PublicRelease
|
5a479f5f696cfe9f2d9dcd96f378b5ce160ec93f
|
825f562d5021c65d40115d64523bb850feff6a98
|
refs/heads/master
| 2021-07-24T09:48:32.607518
| 2018-11-13T03:17:53
| 2018-11-13T03:17:53
| 119,129,731
| 2
| 6
| null | 2018-11-07T22:10:10
| 2018-01-27T03:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,389
|
py
|
#Embedded file name: toontown.coghq.BossbotCountryClubMazeRoom_Battle00
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_12/models/bossbotHQ/BossbotMazex1_C',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
110000: {'type': 'battleBlocker',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-131.21, 84.92, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'radius': 10},
110202: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 110001,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 1,
'isLock1Unlocked': 0,
'isLock2Unlocked': 1,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 0,
'secondsOpen': 1,
'unlock0Event': 0,
'unlock1Event': 110000,
'unlock2Event': 0,
'unlock3Event': 0},
110002: {'type': 'maze',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-141.563, -78.8353, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'numSections': 1},
10002: {'type': 'nodepath',
'name': 'props',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1},
110001: {'type': 'nodepath',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-106.91, 82.6953, 0),
'hpr': Point3(270, 0, 0),
'scale': Vec3(1, 1, 1)}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
[
"linktlh@gmail.com"
] |
linktlh@gmail.com
|
c5020aa411c33ba9eb808cd247fe814f9c0ece17
|
8f5f92beeaefcd9effc93da87b26acb5ea159274
|
/xtorch/modules/seq2seq_encoders/seq2seq_encoder.py
|
edcdada140696dba36c224bbb20440c20a1c8b5f
|
[
"MIT"
] |
permissive
|
altescy/xtorch
|
15f984bf08654dc00fc1be603cca696676428cc1
|
bcbbbe645f4d62c211af5b3555c526cc60792c32
|
refs/heads/main
| 2023-04-12T15:45:52.192602
| 2021-04-25T11:35:45
| 2021-04-25T11:35:45
| 361,373,990
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 805
|
py
|
from typing import Optional
import torch
class Seq2seqEncoder(torch.nn.Module):
def forward(
self,
inputs: torch.Tensor,
mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
"""
Parameters
==========
inputs: `torch.Tensor`
Tensor of shape (batch_size, sequence_length, embedding_size).
mask: `torch.BoolTensor`, optional (default = None)
BoolTensor of shape (batch_size, sequence_length).
Return
======
output:
Tensor of shape (batch_size, sequence_length, encoding_size).
"""
raise NotImplementedError
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
|
[
"altescy@fastmail.com"
] |
altescy@fastmail.com
|
226b1a82100d5a5ec91accefe4709d3526693183
|
2fe868ab7e641629013445af85f412cfd0fc323d
|
/04_문자열자료형/문자열생성.py
|
11a405fd099d52523f193b556ee27d3ae638f5d2
|
[] |
no_license
|
lgy94/pythonOjt
|
9e0988a8e9ee005688f1f7e841077d34f403b13c
|
8d1eb5ed52527153153d3a2d47f346d0810e2d1a
|
refs/heads/master
| 2021-03-28T21:36:39.095570
| 2020-03-20T07:40:08
| 2020-03-20T07:40:08
| 247,898,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
s = 'Python is great!'
print (s)
s = "Python is great!"
print (s)
s = '''Python is great!'''
print (s)
s = """Python is great!"""
print (s)
sentence = 'Python is the\
most popular programming\
language in these days.'
print (sentence)
a = 'say "hello" to mom'
b = "say 'hello' to mom"
c = '''say 'hello' to "mom"'''
print (a)
print (b)
print (c)
#letter to Alice
print ('''Dear Alice,
How are you?
Say hello to your parents.
Sincerely,
Bob''')
|
[
"dudrk94@naver.com"
] |
dudrk94@naver.com
|
07737492b88c075fe7d11b3f01a276520d1b854b
|
f7e459e0a9bc5bfa7c635abe6ed9c922bae27339
|
/dfvfs/analyzer/luksde_analyzer_helper.py
|
465bd7ffdc366edbcf727ae526ca4f4605627966
|
[
"Apache-2.0"
] |
permissive
|
sanjaymsh/dfvfs
|
bcf5153a1743cb4bdc1d9e5bd45e383a2e6e675d
|
049c71df15f46ac0ef552f0c6f71f7c61797af87
|
refs/heads/master
| 2022-12-23T23:25:14.035173
| 2020-09-27T06:06:33
| 2020-09-27T06:06:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
# -*- coding: utf-8 -*-
"""The LUKSDE format analyzer helper implementation."""
from __future__ import unicode_literals
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class LUKSDEAnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""LUKSDE analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_VOLUME_SYSTEM])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_LUKSDE
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# LUKSDE signature.
format_specification.AddNewSignature(b'LUKS\xba\xbe', offset=0)
return format_specification
analyzer.Analyzer.RegisterHelper(LUKSDEAnalyzerHelper())
|
[
"noreply@github.com"
] |
noreply@github.com
|
be2ea70bbdeaae35443f15931b34646e0979c465
|
489ffb5efea81b6f374037d1e4b5856041f6a5a1
|
/main.py
|
c555233cd622eca6a3b73e8d8fa869339f3abc8f
|
[] |
no_license
|
petar-tomov/RentACar
|
c183223e5b8b28dca6fe384904d00d99dbec0de0
|
cc9d3d36b72f99f8f4641f9f3d9bba027d1ea394
|
refs/heads/main
| 2023-03-18T21:06:09.081059
| 2021-03-12T08:31:04
| 2021-03-12T08:31:04
| 345,719,621
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 638
|
py
|
from Customer import Customer
pesho = Customer("Pesho")
pesho.check_catalogue()
pesho.rentacar_hours("PB5079TT", 5)
pesho.rentacar_day("CB4078BM")
pesho.rentacar_week("PB9189PC")
pesho.rentacar_hours("PB9189PC", 10) # You can't rent a car which is already rented
pesho.rentacar_day("PB9999AH") # You also can't rent a car which isn't in the catalogue, of course
pesho.rentacar_hours("PB0168MX", 3) # Pesho's fourth car
pesho.checkout()
drago = Customer("Drago")
drago.check_catalogue() # The cars rented by Pesho are no longer in the catalogue
drago.rentacar_hours("PA5460AB", 4)
drago.rentacar_hours("EB6633AH", 4)
drago.checkout()
|
[
"pepyy.tommyy@gmail.com"
] |
pepyy.tommyy@gmail.com
|
3f0b424620cadbd7007d11df02a06feeb6089c28
|
24c2132b45590c3e1af9b8383fc3d3b4d85afb1f
|
/20_DNN/scratch/nn/basic/nn_mnist_batch.py
|
9a93d20ec7bc9520e769dd3c536b337fc4d8ca68
|
[] |
no_license
|
harperfu6/ML_Tips
|
cbee8029ec8b5ef1a03d0b0e3bb9818e73e56442
|
e5776d17102113fc5e3187b1cdfb4d4bafe891f4
|
refs/heads/master
| 2022-02-27T09:24:42.946713
| 2019-11-28T13:15:48
| 2019-11-28T13:15:48
| 183,880,194
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# coding: utf-8
# データセットに対し,まとめて予測するだけ
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポート
import numpy as np
import pickle
from dataset.mnist import load_mnist
from common.functions import sigmoid, softmax
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open('sample_weight.pkl', 'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = sigmoid(a3)
return y
def main():
x, t = get_data()
network = init_network()
batch_size = 100 # バッチ数
accuracy_cnt = 0
for i in range(0, len(x), batch_size):
x_batch = x[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) # 最も確率の高い要素のインデックスを取得
accuracy_cnt += np.sum(p == t[i:i+batch_size])
print('Accuracy:' + str(float(accuracy_cnt) / len(x)))
if __name__ == '__main__':
main()
|
[
"beharp8a8@gmail.com"
] |
beharp8a8@gmail.com
|
44b5d7f99598cb32ba93499d8f2d394a484573cf
|
49edf974d6502d339095601b101e705911426c07
|
/project_files/noah_analysis/ordered_analysis/process_mle_slope_int_soft.py
|
9a78dc727874ee47ab3e7cc339e6e3007ae25bdc
|
[] |
no_license
|
minghao2016/protein_design_and_site_variability
|
1ca7b7486464b109cb1b054e8d20fe15af41976a
|
605ad641c0061234f841e3ceed9d885dabc0d2ce
|
refs/heads/master
| 2021-05-30T05:32:37.344639
| 2015-06-27T04:50:25
| 2015-06-27T04:50:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,174
|
py
|
import os, re
import subprocess
from subprocess import Popen
from numpy import *
from scipy.stats import pearsonr as pearson
#list of files that contain L vs RSA data
data = []
#search string to use
searchStr = "^align_data_array_ordered" + "[a-zA-Z0-9_\.\-]*" + "_soft.dat"
x = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
fpO = open("raw_mle_lines_ordered_soft_noah.csv","w")
#find all csv files that match the search string
for path, names, filename in os.walk('.',False):
for file in filename:
#print "Testing file: %s" %file
if(re.search(searchStr, file)!=None):
print "Found file: %s" % file
output = subprocess.Popen(["/usr/bin/Rscript /Users/Eleisha/Documents/Wilke_Lab/Project_1/project/r_scripts/MLE_calc.R " + file], shell=True, stdout=subprocess.PIPE).communicate()
print output
result = re.findall("\-?[0-9]+\.?[0-9]*", re.split("\n",output[0])[-2])
print result
slop = result[0]
int = result[1]
print file + " y(x) = " + str(int) + " + x" + str(slop)
fpO.write(file+","+str(int)+","+str(slop)+"\n")
data.append([int, slop])
fpO.close()
|
[
"eleishaj@utexas.edu"
] |
eleishaj@utexas.edu
|
dd55eae4011f0cb80d47c940385e7a3ff85cd7a3
|
602fa0e4ce194d3073d78230c61f7053281f9f9b
|
/code/python/src/categories/catutil.py
|
df03a0027b66f8d76d4265de7c7074d56b487bab
|
[] |
no_license
|
ziqizhang/wop
|
111cfdda1686a874ff1fc11a453a23fb52d43af1
|
ea0c37f444de9f2d5303f74b989f6d1a09feb61d
|
refs/heads/master
| 2022-09-14T20:14:11.575021
| 2021-12-10T21:23:24
| 2021-12-10T21:23:24
| 166,239,995
| 2
| 1
| null | 2022-09-01T23:11:13
| 2019-01-17T14:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,128
|
py
|
import pandas as pd
from nltk import PorterStemmer, WordNetLemmatizer
import numpy
from categories import cleanCategories as cc
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
#0=stem; 1=lem; else=nothing
def normalise_categories(in_file_name, col, stem_or_lem):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats=set()
max_toks=0
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
c="NONE"
toks = len(c.split(" "))
if toks>max_toks:
max_toks=toks
if stem_or_lem==0:
c=stemmer.stem(c).strip()
if len(c)>2:
norm_cats.add(c)
elif stem_or_lem==1:
c=lemmatizer.lemmatize(c).strip()
if len(c)>2:
norm_cats.add(c)
else:
norm_cats.add(c)
norm_cats_list=list(norm_cats)
norm_cats_list=sorted(norm_cats_list)
print(len(norm_cats_list))
print(max_toks)
for nc in norm_cats_list:
print(nc)
def get_parent_category_level(in_file_name, col):
df = pd.read_csv(in_file_name, header=0, delimiter=";", quoting=0, encoding="utf-8",
).as_matrix()
norm_cats = set()
norm_cats_list=[]
for r in df:
c = r[col]
if type(c) is not str and numpy.isnan(c):
continue
c= cc.normaliseCategories(c)
try:
trim = c.index(">")
except ValueError:
continue
c=c[0:trim].strip()
norm_cats.add(c)
norm_cats_list.append(c)
norm_cats_unique_list=sorted(list(norm_cats))
norm_cats=sorted(norm_cats)
for nc in norm_cats:
print(nc)
print("\n\n>>>>>>>>>\n\n")
for nc in norm_cats_unique_list:
print(nc)
if __name__ == "__main__":
# normalise_categories("/home/zz/Work/data/wop_data/goldstandard_eng_v1_cleanedCategories.csv",
# 13,0)
get_parent_category_level("/home/zz/Work/data/wop_data/goldstandard_eng_v1_utf8.csv",
8)
|
[
"ziqizhang.email@gmail.com"
] |
ziqizhang.email@gmail.com
|
e5051b8cb2577762cfa4eefebf5dafbcea28c428
|
56b69a58a8844d09e213dc38aab9aa62422dd58e
|
/128.py
|
9f90303f0eefb77e7f24e3e9b2c106715dc321dc
|
[] |
no_license
|
dilkas/project-euler
|
d697daf4087a0b436e2dc7b2840d5e53c2ff07b8
|
e637fc34d406de7b05755d9c85b370aef1beb2a7
|
refs/heads/master
| 2021-05-31T15:46:45.342257
| 2016-05-15T10:29:45
| 2016-05-15T10:29:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
import math
target = 2000
def prime(n):
for k in range(2, math.floor(n ** 0.5) + 1):
if n % k == 0: return False
return True
n = 1
counter = 0
while True:
if prime(6 * n - 1) and prime(6 * n + 1) and prime(12 * n + 5):
counter += 1
if counter == target:
print(3 * n * n - 3 * n + 2)
break
if prime(6 * n - 1) and prime(6 * n + 5) and prime(12 * n - 7):
counter += 1
if counter == target:
print(3 * n * n + 3 * n + 1)
break
n += 1
|
[
"paulius.dilkas@gmail.com"
] |
paulius.dilkas@gmail.com
|
01cc3441b94a034279d96bc8ad271736ee058bcd
|
8a2a76d66a92c91ee07c44e533f8bbc81778ff28
|
/rund.py
|
76776386f4c4858a96a99032c7460c3953455839
|
[] |
no_license
|
Shashant-R/GSCEventMOD
|
9941935caf966ca601b1e3c59b05eefd7dc18c67
|
25e85af84b12f725a188f6b872d44010f2f385bc
|
refs/heads/main
| 2023-08-14T00:01:00.469037
| 2021-09-23T12:55:32
| 2021-09-23T12:55:32
| 409,591,561
| 0
| 1
| null | 2021-10-01T18:17:30
| 2021-09-23T12:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,774
|
py
|
import os
import matplotlib.pyplot as plt
import random
import h5py
import numpy as np
import warnings
from sklearn.neighbors import kneighbors_graph
from sklearn.cluster import SpectralClustering
from sklearn.cluster import DBSCAN
from sklearn.cluster import MeanShift
from sklearn.metrics import silhouette_score
from sklearn.metrics import calinski_harabasz_score
from sklearn.ensemble import IsolationForest
warnings.filterwarnings('ignore', '.*Graph is not fully connected*')
print('reading Cars_sequence...')
file_name = "Object Motion Data (mat files)/Cars_sequence.mat"
f = h5py.File(file_name, "r")
davis = f['davis']
dvs = davis['dvs']
pol = dvs['p'][0]
#ts = dvs['t'][0]
ts = np.load("street_ts.npy")
ts = ts*0.000001
#x = dvs['x'][0]
#y = dvs['y'][0]
aps_ts = np.load("street_img_ts.npy")
#dvs_ts = np.load("cars_all_ts.npy")
print(len(ts), len(aps_ts))
'''
# events frequency distribution
y_eve = []
i = 0
ctr = 0
j = 0
while i<len(ts):
if ts[i] < aps_ts[j]:
ctr += 1
else:
y_eve.append(ctr)
ctr = 1
j += 1
if j==len(aps_ts):
break
i += 1
np.save("event_dist_street.npy", np.asarray(y_eve))
'''
# plot frequency distribution
y_eve = np.load("event_dist_street.npy")
print(y_eve)
print(len(y_eve))
fig = plt.figure()
plt.bar(range(200), y_eve, color='r')
plt.xlabel("Segments")
plt.ylabel("No. of events")
plt.title("Frequency of events in different segments")
plt.show()
print(sum(y_eve))
'''
#without cleaning
n = len(dvs_ts)
last = 0
ALL = len(pol)
NEIGHBORS = 100
ctr = -1
for idx in dvs_ts:
ctr+=1
xx = '0000000000'
yy = str(ctr)
file_name = xx[:len(xx) - len(yy)] + yy
print(last)
selected_events = []
for i in range(0, ALL)[last:idx]:
selected_events.append([y[i], x[i], ts[i] * 0.0001, pol[i] * 0])
if len(selected_events)==6000:
break
last = idx
selected_events = np.asarray(selected_events)
cleaned_events = IsolationForest(random_state=0, n_jobs=-1, contamination=0.05).fit(selected_events)
unwanted_events = cleaned_events.predict(selected_events)
selected_events = selected_events[np.where(unwanted_events == 1, True, False)]
adMat = kneighbors_graph(selected_events, n_neighbors=NEIGHBORS)
max_score = -20
opt_clusters = 2
scores = []
print('predicting number of clusters...')
for CLUSTERS in range(2, 10):
clustering = SpectralClustering(n_clusters=CLUSTERS, random_state=0,
affinity='precomputed_nearest_neighbors',
n_neighbors=NEIGHBORS, assign_labels='kmeans',
n_jobs=-1).fit_predict(adMat)
curr_score = silhouette_score(selected_events, clustering)
scores.append(curr_score)
if curr_score > max_score:
max_score = curr_score
opt_clusters = CLUSTERS
np.save(os.path.join('results/656/predict_k',
file_name + '.npy'),
np.asarray(scores))
clustering = SpectralClustering(n_clusters=opt_clusters, random_state=0, affinity='precomputed_nearest_neighbors',
n_neighbors=NEIGHBORS, assign_labels='kmeans',
n_jobs=-1).fit_predict(adMat)
np.save(os.path.join('results/656/selected_events',
file_name + '.npy'),
selected_events)
np.save(os.path.join('results/656/clusters',
file_name + '.npy'),
clustering)
print('done')
'''
'''
# indices of nearest timestamps
event_idx = []
for t in aps_ts:
idx_t = (np.abs(ts - t)).argmin()
print(t)
event_idx.append(idx_t)
event_idx = np.asarray(event_idx)
np.save("cars_all_ts.npy", event_idx)
print(len(event_idx))
'''
'''
#with cleaning and cluster prediction
ALL = len(pol)
NEIGHBORS = 30
print(str(ALL)+' events in dataset...')
seg = 64
while seg >= 64:
print('dividing the sequence into '+str(seg)+' segments...')
X = ALL//seg
print('each segment has '+str(X)+' events, out of which '+str(X//4)+' events will be selected...')
for sl_no in range(seg):
print('segment no: '+str(sl_no+1))
selected_events = []
for i in range(0,ALL)[sl_no*X:sl_no*X+X:4]:
selected_events.append([y[i], x[i], ts[i]*0.0001, pol[i]*0])
selected_events = np.asarray(selected_events)
cleaned_events = IsolationForest(random_state=0, n_jobs=-1, contamination=0.1).fit(selected_events)
unwanted_events = cleaned_events.predict(selected_events)
selected_events_cleaned = selected_events[np.where(unwanted_events == 1, True, False)]
adMat_cleaned = kneighbors_graph(selected_events_cleaned, n_neighbors=NEIGHBORS)
print('clustering...')
clustering_cleaned = SpectralClustering(n_clusters=2, random_state=0, affinity='precomputed_nearest_neighbors',
n_neighbors=NEIGHBORS, assign_labels='kmeans',
n_jobs=-1).fit_predict(adMat_cleaned)
xx = '0000000000'
yy = str(sl_no)
file_name = xx[:len(xx) - len(yy)] + yy
np.save(os.path.join('results/clean/64/selected_events',
file_name+'.npy'),
selected_events_cleaned)
np.save(os.path.join('results/clean/64/clusters',
file_name + '.npy'),
clustering_cleaned)
seg = seg // 2
break
print('done')
'''
|
[
"noreply@github.com"
] |
noreply@github.com
|
16a9c914cb9d2272c13f251203a93fa646574f5b
|
3e2f7ff88aabbf17ee93a30176a40396adfbc7ec
|
/core/migrations/0003_auto_20171118_0536.py
|
fdb20ee9327f420442d4608bc28d9fa64520b0bd
|
[] |
no_license
|
mahima-c/uhvpe
|
04cbc519904261b45193dea4450f646b83a4184d
|
9dc47d4fae17b6e405d5d9500c75585c45afbf20
|
refs/heads/master
| 2021-05-25T19:08:21.414764
| 2019-10-15T07:19:13
| 2019-10-15T07:19:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-18 05:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20171117_1726'),
]
operations = [
migrations.CreateModel(
name='Presentation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
('language', models.CharField(choices=[('HI', 'Hindi'), ('EN', 'English')], max_length=10)),
('file', models.FileField(unique=True, upload_to='presentation/')),
],
),
migrations.RemoveField(
model_name='workshop',
name='start_date',
),
]
|
[
"apoorvapandey365@gmail.com"
] |
apoorvapandey365@gmail.com
|
5a21f18a39ff0c0dda0b64a7e34fc7fc468fd600
|
330146ad205bb1c21b63c2eeaf11c8a2996e2a4f
|
/mylab/coursera/timeofday.py
|
21979edc23726092bf550c7561ed29d32721bbd8
|
[] |
no_license
|
bkrishna2006/system
|
f3c63c03cbdf817f102a7b84075018a21e9e3c54
|
c478c7a001722af2b300fdaca9e10fdbec4d6a04
|
refs/heads/master
| 2020-04-10T20:01:49.806318
| 2018-01-22T16:34:36
| 2018-01-22T16:34:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 541
|
py
|
filename = raw_input("Enter your file name: ")
try:
fd = open(filename)
except:
print "File not found: %s" % filename
quit()
theDict = dict()
for line in fd:
words = line.split()
if len(words) < 1 or words[0] != "From" or words[0][-1] == ":" or len(words) < 6: continue
time = words[5]
time_split = time.split(":")
if len(time_split) < 1: continue
hour = time_split[0]
theDict[hour] = theDict.get(hour, 0) + 1
result = theDict.items()
result.sort()
for hour, count in result:
print hour, count
|
[
"luke.nothingness@gmail.com"
] |
luke.nothingness@gmail.com
|
4c7a6ca0278e20ba7b9ba747006fe1ff9e5d0326
|
e6c58d75f3cea45639b6dd0f8fe1d1ec6a00bae5
|
/weather/views.py
|
1a56ce2cef21e16d096be18a5f9308784b69156d
|
[
"MIT"
] |
permissive
|
cindyjialiu/WeatherApp
|
4d2d0ae092b410ad6f35008a00ce938426d81a6c
|
c91f5928708d4cd79286bcd51a3934cb9d2e3a92
|
refs/heads/master
| 2020-04-11T19:00:55.616764
| 2018-12-21T15:54:44
| 2018-12-21T15:54:44
| 162,019,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,968
|
py
|
import os
import datetime
import requests
from django.shortcuts import render
def index(request):
api_key=os.environ['WEATHER_API_KEY']
today = datetime.datetime.today()
response = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q=London,uk&units=metric&appid={api_key}')
# TODO: handle extra errors
weather_data = response.json()
response2 = requests.get(f'http://api.openweathermap.org/data/2.5/forecast?q=London,uk&units=metric&appid={api_key}')
# TODO: handle extra errors
weather_forecast_data = response2.json()
weather_summary = get_weather_summary(weather_data)
weather_forecast_summary = get_temps_for_tomorrow(get_weather_forecast_temp_and_dt(weather_forecast_data['list']), today)
weather_forecast_tomorrow = get_temps_for_tomorrow_without_date(weather_forecast_summary)
return render(request, 'index.html', { 'weather_forecast_tomorrow': weather_forecast_tomorrow,
'weather_summary': weather_summary
})
def get_weather_summary(weather_data):
return {
'temp': weather_data['main']['temp'],
'min': weather_data['main']['temp_min'],
'max': weather_data['main']['temp_max'],
'humidity':weather_data['main']['humidity']
}
def get_weather_forecast_temp_and_dt(weather_forecast_data):
return list(map(lambda x: {
'y': x['main']['temp'],
'x': x['dt_txt']
}, weather_forecast_data))
def get_temps_for_tomorrow(filtered_forecast_data, today):
tomorrow = str(today + datetime.timedelta(days = 1)).split(' ')[0]
return list(filter(lambda x: x['x'].split(' ')[0] == tomorrow, filtered_forecast_data ))
def get_temps_for_tomorrow_without_date(tomorrow_temps_data):
return list(map(lambda x: {
'x': dt_txt_formatter(x['x']), 'y': x['y']}, tomorrow_temps_data))
def dt_txt_formatter(dateTime):
return dateTime.split(' ')[1][:-3]
|
[
"jl7e12@gmail.com"
] |
jl7e12@gmail.com
|
cbefe3934df661d760f6292ce580d43fddcb9dac
|
00d809abff2460c051cf3aeaf0f98005bd5f0397
|
/API/api_open_subtitles.py
|
748d2f77e2667ea0a234b8b7701134410a2497c0
|
[
"MIT"
] |
permissive
|
andreibastos/movie-info
|
e57d821becb8aea820e7858459d71ac34a537a1c
|
57da167b4d0ed602b5a9dce8c3f2544f7dfdacd9
|
refs/heads/master
| 2016-09-01T18:36:47.182014
| 2015-08-15T01:09:20
| 2015-08-15T01:09:20
| 40,301,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,679
|
py
|
# coding: utf-8
import urllib
from lxml import html
import lxml.html as html
from lxml import etree
from lxml.html import fromstring, tostring
import json
url_base = 'http://www.opensubtitles.org'
url_search = 'http://www.opensubtitles.org/pb/search/sublanguageid-pob/imdbid-'
def get_legend(imdbID):
legend = {}
imdbID = imdbID.strip('tt')
print (imdbID)
link = url_search + imdbID
print (link)
# link = '/home/andrei/scripts/torrent/imdb/legend.html'
# link = 'http://www.opensubtitles.org/pb/search/sublanguageid-all/subtrusted-on/hearingimpaired-on/hd-on/autotranslation-on'
page = None
tree = None
response = False
try:
page = urllib.urlopen(link)
tree = etree.HTML(page.read())
except Exception as inst:
return json.dumps({'error':{'code':1,'mensage':str(inst)},'response':response})
try:
for div in tree.xpath("//div[@style='text-align:center']"):
for a in div.xpath('h1/a'):
legend['link_download'] = a.get('href')
legend['name_movie'] = a.xpath('span')[0].text
legend['downloads'] = 0
for span in div.xpath("//div[@itemtype='http://schema.org/Movie']/h2"):
legend['name_movie_file'] = span.text
legends = []
legends.append(legend)
response = True
return json.dumps({'response':response,'legends':legends})
except Exception as msg:
legends = []
try:
for tr in tree.xpath("//table[@id='search_results']//tr"):
# for columns of tr
index_column = 0
# to legend
name_movie = None
quality_legend = []
name_movie_file = None
language = None
cd = None
date = None
fps = 0.0
downloads = None
link_download = None
legend_type = None
punctuation = None
comments = 0
imdbVote = 0
autor_name = None
autor_rank = None
len_columns = len(tr)
legend = None
tmp_class = tr.get('class')
if len_columns == 9 and not 'head' in tmp_class:
legend = {}
for td in tr.xpath('td'):
if index_column == 0:
for a in td.xpath('strong/a'):
name_movie = ((a.text.replace('\n','').replace('\t','')))
for img in td.xpath('img'):
src = img.get('src')
if src:
if '/' in src:
tmp_src = src.split('/')
src = tmp_src[len(tmp_src)-1]
if '.' in src:
src = src.split('.')[0]
quality_legend.append(src)
for br in td.xpath("//br"):
if br.tail:
name_movie_file = br.tail
for span in td.xpath('span'):
name_movie_file = span.get('title')
if index_column == 1:
a = td.xpath('a')
if a:
language = a[0].get('title')
if index_column == 2:
cd = td.text.replace('\n','').replace('\t','')
if index_column == 3:
for time in td.xpath('time'):
date = time.text
for span in td.xpath('span'):
fps = float(span.text)
if index_column == 4:
for a in td.xpath('a'):
link_download = url_base + a.get('href')
downloads = int (a.text.replace('x','').replace('\n',''))
for span in td.xpath('span'):
legend_type = span.text
if index_column == 5:
punctuation = td.text
for img in td.xpath('img'):
punctuation = (img.get('src').split('/')[len(img)-1])
if index_column == 6:
comments = td.text
if index_column == 7:
imdbVote = td.xpath('a')[0].text
if index_column == 8:
if len(td.xpath('a'))>0:
autor_name = td.xpath('a')[0].text
if len(td.xpath('a/img'))>0:
autor_rank = td.xpath('a/img')[0].get('alt')
index_column +=1;
legend['name_movie'] = name_movie
legend['name_movie_file'] = name_movie_file
legend['quality_legend'] = quality_legend
legend['language'] = language
legend['cd'] = cd
legend['date'] = date
legend['fps'] = fps
legend['downloads'] = downloads
legend['link_download'] = link_download
legend['punctuation'] = punctuation
legend['comments'] = comments
legend['imdbVote'] = imdbVote
legend['autor_name'] = autor_name
legend['autor_rank'] = autor_rank
legends.append(legend)
response = True
return json.dumps({'response':response,'legends':legends}, indent=4, sort_keys=True)
except Exception as inst:
print 'error 2 '
return json.dumps({'error':{'code':2,'mensage':str(inst)},'response':response})
# imdbID = "tt3235888"
# f = open(imdbID +".json",'w')
# legendas = get_legend(imdbID)
# print (legendas)
# f.write(legendas)
# f.close
|
[
"andreibastos@outlook.com"
] |
andreibastos@outlook.com
|
e32d9ecd5addc70ef1833cfb869c834a230a4f2c
|
7f97814acd76ca96aee877fd70d401380f848fae
|
/7_training/re_start_end.py
|
e5842c00b391813441ccd2346854697e29805bbb
|
[] |
no_license
|
tberhanu/all_trainings
|
80cc4948868928af3da16cc3c5b8a9ab18377d08
|
e4e83d7c71a72e64c6e55096a609cec9091b78fa
|
refs/heads/master
| 2020-04-13T12:12:21.272316
| 2019-03-16T04:22:20
| 2019-03-16T04:22:20
| 163,195,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
"""
https://www.hackerrank.com/challenges/re-start-re-end/problem?h_r=next-challenge&h_v=zen
"""
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
s, k = input(), input()
i = 0
found = False
while i < len(s):
string = s[i:]
match = re.match(r'{}'.format(k), string)
if match == None:
i = i + 1
else:
found = True
print((match.start() + i, match.end() + i - 1))
i = i + 1
if not found:
print('(-1, -1')
|
[
"tberhanu@berkeley.edu"
] |
tberhanu@berkeley.edu
|
16f02a9531c8dbb7c2e6d252e5094a83efbd7217
|
a89bcfe5a2fff6727a39a64e36e92a5f5a72644f
|
/929_unique_email_addresses/solution.py
|
fa0a932ff6febfdfcffa431123abb50352a9fc0e
|
[] |
no_license
|
vanshaw2017/leetcode_vanshaw
|
6fdde2c012f53470efa9f4b13b0d123f3fef0e89
|
12393cfaf4b1b758e8a0407787a2a8150285678d
|
refs/heads/master
| 2020-04-06T13:59:18.119910
| 2019-02-25T02:10:28
| 2019-02-25T02:10:28
| 157,522,844
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 473
|
py
|
class Solution:
def numUniqueEmails(self, emails: 'List[str]') -> 'int':
result = []
for i in emails:
local = i.split("@")[0]
far = i.split("@")[1]
if '+' in local:
local = local.split("+")[0]
if '.' in local:
local = local.replace('.', '')
email = local + far
if email not in result:
result.append(email)
return len(result)
|
[
"614664248@qq.com"
] |
614664248@qq.com
|
c463bc62085e0e254656df795762807e09e2e229
|
949d9ed95d94c2cbce94e76120009c9d6b370fb1
|
/app/core/routers/__init__.py
|
eb3c2c8a4e245e044d5d6ff836de1452a79d69ab
|
[] |
no_license
|
dexer13/guane-intern-fastapi
|
d8bc5fb808570c5c584f8fb0f685ed9f47b6a497
|
40f4ee5facf523ec93d974ba6613a959ebadae7c
|
refs/heads/main
| 2023-08-24T18:45:28.938807
| 2021-10-30T15:58:22
| 2021-10-30T15:58:22
| 420,717,715
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
from . import animals
from . import users
from . import security
from . import files
|
[
"disidro@campus.udes.edu.co"
] |
disidro@campus.udes.edu.co
|
aa122ff357ac2276d0569e9bae610a18e81b9c11
|
45be54f14406418be8bf9c1c9a695e77f2c79d1e
|
/workflow/rules/quality_control.smk
|
bcb80c300cab633d395dd1523e96c80286362b8c
|
[] |
no_license
|
G-Molano-LA/circrna-workflow
|
41d48c097a7909c8e843beab5e59685950daddf0
|
2be5e61e0eea5395758819c41f39ff9cac279fa0
|
refs/heads/main
| 2023-06-11T00:49:36.596498
| 2021-07-02T12:32:38
| 2021-07-02T12:32:38
| 348,354,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,490
|
smk
|
#!/usr/bin/python3
__author__ = "G. Molano, LA (gonmola@hotmail.es)"
__state__ = "ALMOST FINISHED" # requires execution to finish it
################################################################################
# Snakefile to realize a quality control of RNA-seq reads.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Author: G. Molano, LA (gonmola@hotmail.es)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Date :
# Last modification : 01-06-2021
################################################################################
RAW_READS = expand("{path}/{sample}{ext}", path = config["quality_control"]["reads"], sample = SAMPLES,
ext = [config["quality_control"]["suffix"][1],config["quality_control"]["suffix"][2]] )
# TARGET RULE
rule quality_control_results:
input:
html = f'{OUTDIR}/quality_control/raw_data/summary/multiqc_report.html'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~FASTQC~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rule fastqc1:
input:
RAW_READS
output:
html = expand("{outdir}/quality_control/raw_data/{sample}_{replicate}_fastqc.html",
outdir = OUTDIR, sample = SAMPLES, replicate = [1,2]),
zip = expand("{outdir}/quality_control/raw_data/{sample}_{replicate}_fastqc.zip",
outdir = OUTDIR, sample = SAMPLES, replicate = [1,2])
params:
outdir = f'{OUTDIR}/quality_control/raw_data/'
threads: config["trimming"]["threads"]
conda: config["envs"]["quality_control"]
# message:
# "Starting quality analysis control with FASTQC programm on the "
# "following files {input.reads}. Number of threads used are {threads}."
priority: 1
shell:
"fastqc -t {threads} {input} --outdir={params.outdir}"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~MULTIQC~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rule multiqc1:
input:
zip = expand("{outdir}/quality_control/raw_data/{sample}_{replicate}_fastqc.zip",
outdir = OUTDIR, sample = SAMPLES, replicate = [1,2])
output:
html = f'{OUTDIR}/quality_control/raw_data/summary/multiqc_report.html',
params:
replace_old = "--force", # revisar que no remplaze al anterior
outdir = f'{OUTDIR}/quality_control/raw_data/summary/'
conda: config["envs"]["quality_control"]
priority: 2
shell:
"multiqc --interactive {params.replace_old} {input.zip} --outdir {params.outdir}"
|
[
"gonmola@hotmail.es"
] |
gonmola@hotmail.es
|
80ade65f1adcd28e82e85178c589f646d0eb1e6b
|
d5f518c8f23705396fd8da3317520cff0ab543f7
|
/algorithms/team 3/spikefinder/__init__.py
|
4413e5d15c8306e460e19e9e225a26b4cc57d8eb
|
[] |
no_license
|
j-friedrich/spikefinder_analysis
|
78322f8ead7579b1b8bfb50769d6467cee66930e
|
def1f8c2c5268eb71e83f57c265d4b3c102fb5f8
|
refs/heads/master
| 2021-10-27T17:14:49.134302
| 2018-02-19T11:42:18
| 2018-02-19T11:42:18
| 93,436,281
| 1
| 0
| null | 2017-06-05T18:51:23
| 2017-06-05T18:51:23
| null |
UTF-8
|
Python
| false
| false
| 50
|
py
|
from .main import load, score
__version__='1.0.0'
|
[
"noreply@github.com"
] |
noreply@github.com
|
edcbbc430b0d1a558d19be8a4a2625b7c762eb20
|
5add80be09ee754fced03e512a9acc214971cddf
|
/python-code/openvx-learning/helloworld.py
|
61352b55542a81f5e56cc66c6767ea1beb6c1d65
|
[
"Apache-2.0"
] |
permissive
|
juxiangwu/image-processing
|
f774a9164de9c57e88742e6185ac3b28320eae69
|
c644ef3386973b2b983c6b6b08f15dc8d52cd39f
|
refs/heads/master
| 2021-06-24T15:13:08.900960
| 2019-04-03T10:28:44
| 2019-04-03T10:28:44
| 134,564,878
| 15
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from pyvx import vx
context = vx.CreateContext()
images = [
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_UYVY),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_S16),
vx.CreateImage(context, 640, 480, vx.DF_IMAGE_U8),
]
graph = vx.CreateGraph(context)
virts = [
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
vx.CreateVirtualImage(graph, 0, 0, vx.DF_IMAGE_VIRT),
]
vx.ChannelExtractNode(graph, images[0], vx.CHANNEL_Y, virts[0])
vx.Gaussian3x3Node(graph, virts[0], virts[1])
vx.Sobel3x3Node(graph, virts[1], virts[2], virts[3])
vx.MagnitudeNode(graph, virts[2], virts[3], images[1])
vx.PhaseNode(graph, virts[2], virts[3], images[2])
status = vx.VerifyGraph(graph)
if status == vx.SUCCESS:
status = vx.ProcessGraph(graph)
else:
print("Verification failed.")
vx.ReleaseContext(context)
|
[
"kkoolerter@gmail.com"
] |
kkoolerter@gmail.com
|
30fd9ea784dffc56bf761d1938ac9c00617ccab4
|
8467f6026afd620aa7efc6bf8d5db7970a25460e
|
/calc.py
|
ad775e41cdf051c726c1954980c2cd0e20f73396
|
[] |
no_license
|
alisebruevich/Graphing-Calculator
|
848563a89d1e28c7dcf760a35034bcd2c5a24bad
|
06c3ae99f46408d475bd31f715cf4bc448922af5
|
refs/heads/master
| 2020-08-19T14:08:55.178307
| 2019-10-18T02:40:19
| 2019-10-18T02:40:19
| 215,926,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,595
|
py
|
#newest, latest, freshest
#fix derivatives with masking and unmasking arrays for x and y: Bonnie
#presentation (what we did, learned; 3-5 slides): Alise
#code block diagram: Bonnie
#test to see if exponential functions will work: Alise -> IT DOESN'T CRY
#make labels for what different colors/symbols mean on the graph: Alise -> DONE
#double check that log stuff works: Alise -> IT DOESN'T CRY
##
#f is original, g is first derivative, h is second derivative
import math as m
from tkinter import *
from sympy import *
def graph(input,a,b):
import numpy.ma as M
import numpy as np
from numpy import linspace
from sympy.parsing.sympy_parser import parse_expr
import matplotlib.pyplot as mpl
from sympy.parsing.sympy_parser import standard_transformations,\
implicit_multiplication_application
transformations = (standard_transformations +(implicit_multiplication_application,))
mpl.axhline(color="black")
mpl.axvline(color="black")
x = symbols('x')
listofx=[]
x_vals = np.arange(-50,50,0.01)
for i in range(0, len(x_vals)):
x_vals[i] = round(x_vals[i],4)
specialx_vals=x_vals
f=parse_expr(input, transformations=transformations)#parsing function
fy_vals=[]
zoo = parse_expr("1/0", transformations = transformations)
nan = parse_expr("0/0", transformations = transformations)
for i in x_vals:
fy_vals.append(f.subs(x,i))
for i in specialx_vals:
if f.subs(x,i).is_real==False:
x_vals=x_vals[1:len(x_vals)]
fy_vals=fy_vals[1:len(fy_vals)]
specialx_vals=x_vals
maskedy_vals=fy_vals
vals=[]
for i in range(0, len(fy_vals)):
#Case 1: 1/0 , or zoo, which is an Asymptote
if (fy_vals[i] == zoo):
vals.append(x_vals[i])
#Case 2: 0/0, or nan, which is a Hole
elif (fy_vals[i] == nan):
vals.append(x_vals[i])
#graph derivative
gy_vals=differentiate(x_vals,fy_vals)
maskedgy_vals=gy_vals
x_vals=x_vals[0:len(x_vals)-1]
for i in range(0, len(vals)):
maskedgy_vals = M.masked_where(x_vals == vals[i], maskedgy_vals)
maskedgy_vals = M.masked_where(x_vals == x_vals[0], maskedgy_vals)
mpl.plot(x_vals, maskedgy_vals,color="orange")#orange
if isnumeric(a) and isnumeric(b):
a=float(a)
b=float(b)
integrate(f,a,b)
#graph 2nd derivative
hy_vals=differentiate(x_vals,gy_vals)
maskedhy_vals=hy_vals
x_vals=x_vals[0:len(x_vals)-1]
for i in range(0, len(vals)):
maskedhy_vals = M.masked_where(x_vals == vals[i], maskedhy_vals)
maskedhy_vals= M.masked_where(x_vals==x_vals[0],maskedhy_vals)
maskedhy_vals= M.masked_where(x_vals==x_vals[1],maskedhy_vals)
mpl.plot(x_vals, maskedhy_vals, color="green")#green
#graph discontinuities
lastFunction = simplify(f)
print(lastFunction)
for i in range(0, len(fy_vals)):
#Case 1: 1/0 , or zoo, which is an Asymptote
if (fy_vals[i] == zoo):
print("it's asymptote!")
maskedy_vals = M.masked_where(specialx_vals == specialx_vals[i], maskedy_vals)
mpl.axvline(x=specialx_vals[i], color='r')
#Case 2: 0/0, or nan, which is a Hole
elif (fy_vals[i] == nan):
print("it's a hole!")
maskedy_vals = M.masked_where(specialx_vals == specialx_vals[i], maskedy_vals)
mpl.plot(specialx_vals[i],lastFunction.subs(x, specialx_vals[i]),color="black",marker="p")
mpl.plot(specialx_vals, maskedy_vals,color="blue")#plots#blue
#find extrema
#extrema are graphed as pink pentagons
basically_zero = 1*10**-4
#print(x_vals)
for i in range(0,len(gy_vals)-1):
if (not(gy_vals[i]== nan or gy_vals[i]==zoo)) and abs(gy_vals[i])<basically_zero:
if gy_vals[i-1]>basically_zero and gy_vals[i+1]<(basically_zero*-1):
mpl.plot(x_vals[i],maskedy_vals[i],color="#FF00FC",marker="p")
print("extrema:")
print(x_vals[i])
print(maskedy_vals[i])
if gy_vals[i-1]<(-1*basically_zero) and gy_vals[i+1]>basically_zero:
mpl.plot(x_vals[i],maskedy_vals[i],color="#FF00FC",marker="p")
print("extrema:")
print(x_vals[i])
print(maskedy_vals[i])
elif (not(gy_vals[i]== nan or gy_vals[i]==zoo)) and (not(gy_vals[i+1]== nan or gy_vals[i+1]==zoo)) and gy_vals[i]>0 and gy_vals[i+1]<0:
xbetween=np.arange(x_vals[i],x_vals[i+1],0.00001)
betweenvals=[]
fbetween=[]
for i in xbetween:
fbetween.append(f.subs(x,i))
maskedfbetween=fbetween
for i in range(0,len(maskedfbetween)):
if maskedfbetween[i] == zoo or maskedfbetween == nan:
betweenvals.append(xbetween[i])
for i in range(0, len(betweenvals)):
maskedfbetween = M.masked_where(xbetween == betweenvals[i], maskedfbetween)
gbetween=differentiate(xbetween,fbetween)
for i in range(0,len(gbetween)-1):
if abs(gbetween[i])<basically_zero:
if gbetween[i-1]>0 and gbetween[i+1]<0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="p")
print("extrema:")
print(xbetween[i])
print(maskedfbetween[i])
if gbetween[i-1]<0 and gbetween[i+1]>0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="p")
print("extrema:")
print(xbetween[i])
print(maskedfbetween[i])
elif (not(gy_vals[i]== nan or gy_vals[i]==zoo)) and (not(gy_vals[i+1]== nan or gy_vals[i+1]==zoo)) and gy_vals[i]<0 and gy_vals[i+1]>0:
xbetween=np.arange(x_vals[i],x_vals[i+1],0.00001)
betweenvals=[]
fbetween=[]
for i in xbetween:
fbetween.append(f.subs(x,i))
maskedfbetween=fbetween
for i in range(0,len(maskedfbetween)):
if maskedfbetween[i] == zoo or maskedfbetween == nan:
betweenvals.append(xbetween[i])
for i in range(0, len(betweenvals)):
maskedfbetween = M.masked_where(xbetween == betweenvals[i], maskedfbetween)
gbetween=differentiate(xbetween,fbetween)
for i in range(0,len(gbetween)-1):
if abs(gbetween[i])<basically_zero:
if gbetween[i-1]>0 and gbetween[i+1]<0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="p")
print("extrema:")
print(xbetween[i])
print(maskedfbetween[i])
if gbetween[i-1]<0 and gbetween[i+1]>0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="p")
print("extrema:")
print(xbetween[i])
print(maskedfbetween[i])
#find inflection points
#inflection points are graphed as green stars
for i in range(0,len(hy_vals)-1):
if (not(hy_vals[i]== nan or hy_vals[i]==zoo)) and abs(hy_vals[i])<basically_zero:
if not(hy_vals[i-1]== nan or hy_vals[i+1]==zoo):
if hy_vals[i-1]>basically_zero and hy_vals[i+1]<(basically_zero*-1):
mpl.plot(x_vals[i],maskedy_vals[i],color="#FF00FC",marker="*")
print("inflection:")
print(x_vals[i])
print(maskedy_vals[i])
if hy_vals[i-1]<(-1*basically_zero) and hy_vals[i+1]>basically_zero:
mpl.plot(x_vals[i],maskedy_vals[i],color="#FF00FC",marker="*")
print("inflection:")
print(x_vals[i])
print(maskedy_vals[i])
elif (not(hy_vals[i]== nan or hy_vals[i]==zoo)) and (not(hy_vals[i+1]== nan or hy_vals[i+1]==zoo)) and hy_vals[i]>0 and hy_vals[i+1]<0:
xbetween=np.arange(x_vals[i],x_vals[i+1],0.00001)
betweenvals=[]
fbetween=[]
for i in xbetween:
fbetween.append(f.subs(x,i))
maskedfbetween=fbetween
for i in range(0,len(maskedfbetween)):
if maskedfbetween[i] == zoo or maskedfbetween == nan:
betweenvals.append(xbetween[i])
gbetween=differentiate(xbetween,fbetween)
xbetween=xbetween[0:len(xbetween)-1]
maskedgbetween=gbetween
for i in range(0, len(betweenvals)):
maskedgbetween = M.masked_where(xbetween == betweenvals[i], maskedgbetween)
hbetween=differentiate(xbetween,gbetween)
for i in range(0,len(hbetween)-1):
if abs(hbetween[i])<basically_zero:
if hbetween[i-1]>0 and hbetween[i+1]<0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="*")
print("inflection:")
print(xbetween[i])
print(maskedfbetween[i])
if hbetween[i-1]<0 and hbetween[i+1]>0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="*")
print("inflection:")
print(xbetween[i])
print(maskedfbetween[i])
elif (not(hy_vals[i]== nan or hy_vals[i]==zoo)) and (not(hy_vals[i+1]== nan or hy_vals[i+1]==zoo)) and hy_vals[i]<0 and hy_vals[i+1]>0:
xbetween=np.arange(x_vals[i],x_vals[i+1],0.00001)
betweenvals=[]
fbetween=[]
for i in xbetween:
fbetween.append(f.subs(x,i))
maskedfbetween=fbetween
for i in range(0,len(maskedfbetween)):
if maskedfbetween[i] == zoo or maskedfbetween == nan:
betweenvals.append(xbetween[i])
gbetween=differentiate(xbetween,fbetween)
xbetween=xbetween[0:len(xbetween)-1]
maskedgbetween=gbetween
for i in range(0, len(betweenvals)):
maskedgbetween = M.masked_where(xbetween == betweenvals[i], maskedgbetween)
hbetween=differentiate(xbetween,gbetween)
for i in range(0,len(hbetween)-1):
if abs(hbetween[i])<basically_zero:
if hbetween[i-1]>0 and hbetween[i+1]<0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="*")
print("inflection:")
print(xbetween[i])
print(maskedfbetween[i])
if hbetween[i-1]<0 and hbetween[i+1]>0:
mpl.plot(xbetween[i],maskedfbetween[i],color="#FF00FC",marker="*")
print("inflection:")
print(xbetween[i])
print(maskedfbetween[i])
mpl.axhline(color="black")
mpl.axvline(color="black")
mpl.xlim(-5,5)
mpl.ylim(-10, 10)
mpl.grid(b=True)#sets grid
mpl.show()
def isnumeric(s):
try:
float(s)
return True
except ValueError:
return False
def differentiate(x,y): #takes in arguments: list of x values and list of y values
gy_vals=[] #makes list of derivative values
for n in range(1,len(x)-1): #loops through x values
hi=(y[n+1]-y[n-1])/(x[n+1]-x[n-1]) #finds the slope using the values around it
gy_vals.append(hi) #adds it to the list
gy_vals.insert(0,gy_vals[0])
return gy_vals #returns derivative values
##############integrate function
def integrate(f,a,b): #takes in arguments: function f, lower bound a, and upper bound b
import numpy.ma as M
import numpy as np
from numpy import linspace
from sympy.parsing.sympy_parser import parse_expr
import matplotlib.pyplot as mpl
x = symbols('x')
#generates list of x values in small increments in between a and b
if a<b:
xvals=np.arange(a,b,0.001)
if a>b:
xvals=np.arange(b,a,0.001)
if a==b:
return
fyvals=[]
for i in xvals:
fyvals.append(f.subs(x,i)) #creates list of y vals that correspont to x vals
gyvals=differentiate(xvals,fyvals) #gets list of derivative values
xvals=xvals[0:len(xvals)-1]
sum=0
for n in range(0,len(xvals)-1): #finds areas of trapezoids and then adds them to sum
trapezoid=0.5*(xvals[n+1]-xvals[n])*(gyvals[n]+gyvals[n+1])
sum+=trapezoid
ftc=f.subs(x,b)-f.subs(x,a) #calculates f(b)-f(b) which should be equal to the sum
thing="%s = %s" %(sum,ftc) #puts the equality on the graph
mpl.text(0,0,thing)
def evaluate(event):
input= entry1.get()
a=entrya.get()
b=entryb.get()
graph(input,a,b)
w = Tk()
w.title("Graphing Calculator")
Label(w, text="Your Expression:").pack()
entry1 = Entry(w)
entry1.bind("<Return>", evaluate)
entry1.pack()
Label(w, text="a:").pack()
entrya = Entry(w)
entrya.bind("<Return>", evaluate)
entrya.pack()
Label(w, text="b:").pack()
entryb = Entry(w)
entryb.bind("<Return>", evaluate)
entryb.pack()
Label(w, text="MARKERS:").pack()
Label(w, text="Extrema are graphed as pink pentagons").pack()
Label(w, text="Inflection points are graphed as pink stars").pack()
Label(w, text="Asymptotes graphed as a red line").pack()
Label(w, text="Holes graphed as black pentagons").pack()
res = Label(w)
res.pack()
w.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
eb78517e4f6aebbbb42fb241163e359f5183bcfc
|
9feb3d3c59113506cbd5e86aef84f42fb257f165
|
/Session04/cross_check.py
|
79654247efdbdc93feafa2e1ae9dc9a5d95a1dc6
|
[] |
no_license
|
reanimation47/ledoananhquan-fundametal-c4e13
|
112a67bbb3093e62f9c53d9f0e7fc57870384fdd
|
6e1e576702c4d68e0aa33a3b21a193b112fa70b6
|
refs/heads/master
| 2021-09-03T11:03:52.598663
| 2018-01-08T14:33:34
| 2018-01-08T14:33:34
| 108,993,661
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 112
|
py
|
# l = [0, 1, 2, 3, 4]
n = 4 #0,1,2,3
for i in range(n-1):
for j in range(i + 1, n):
print(i,"vs",j)
|
[
"reanimation47@gmail.com"
] |
reanimation47@gmail.com
|
f3bf9cff8bd771da7387a3f9128836a954960113
|
661437f8881d9eb5b1d1fd0e28591c27e074326e
|
/Python/interesting/NOT_SUPPORT/parallel/Process_condition.py
|
97a9623051b5c7c0966f9736904a7dccaf913910
|
[] |
no_license
|
gnosiop/fromPhone
|
c16c03bac3a3920a4daaa1f8b41dacfede257de0
|
ac427cf125573319c2adcc437b6286ea1da372f8
|
refs/heads/master
| 2023-08-14T23:46:14.919026
| 2021-09-25T08:11:38
| 2021-09-25T08:11:38
| 404,894,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import multiprocessing
cv = multiprocessing.Condition()
def produce():
with cv:
for i in range(1, 6):
print(i)
cv.notify()
# как только один поток выполнил операцию
# он уведомил другой и "разбудил" его
def consume():
with cv:
cv.wait(timeout=2)
# время на "пробуждение"
print("второй поток")
t2 = multiprocessing.Process(target=consume)
t1 = multiprocessing.Process(target=produce)
t1.start()
t2.start()
|
[
"idoodi@ya.ru"
] |
idoodi@ya.ru
|
2033b58ac5c7b829f095eafefba8e88a252ab286
|
5cb6907c93b4d8d4efdc318ccb44bc7dd2f7789f
|
/factors.py
|
3eddb30138797bc77b43b0c51a24523dd64128b6
|
[] |
no_license
|
malempati0/malempati00
|
d4ab09cad24ee9cfe5b4b8c20c7ba8fc414bd31c
|
b951bd215cb79a04305bdc5039d6b7597725d3ee
|
refs/heads/master
| 2020-04-07T07:11:13.201614
| 2018-11-26T04:45:58
| 2018-11-26T04:45:58
| 158,167,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
def print_factors(x):
for i in range(1, x + 1):
if x % i == 0:
print(i)
num = 6
print_factors(num)
|
[
"noreply@github.com"
] |
noreply@github.com
|
c38344d14b0ab7fc67752b5c676ed3fb625393cf
|
414c0bd290c264a55dc03d0657c5f812914bf050
|
/ApnaBazaar/urls.py
|
7ae85e3480a521c384db656e40d4b499b0ea12c2
|
[] |
no_license
|
sakshamsin09/ApnaBazaar
|
b8af9aea2538d69f27ef80caeb05df3d10d08993
|
02c2a69e6950d8e81ec0bc51e7247f96c2f3f77a
|
refs/heads/main
| 2023-07-09T14:20:40.971602
| 2021-08-24T10:57:16
| 2021-08-24T10:57:16
| 383,352,840
| 0
| 0
| null | 2021-08-22T13:30:28
| 2021-07-06T05:42:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 824
|
py
|
"""ApnaBazaar URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls.conf import include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app.urls')),
]
|
[
"sakshamsinghal09@gmail.com"
] |
sakshamsinghal09@gmail.com
|
6b21ff7967cabb367eae29730fcc4d5cd9aee141
|
78adcbb441d703c64553c09f6e08ae01a6d95ad0
|
/main.py
|
c59c9f7a49d137b8ca303ed85c45e90d9e858529
|
[] |
no_license
|
victor369basu/MongoDBFlask
|
8dfa6fb576a23e6d47f7211fbb2949bbdbfcddbf
|
c7de5027e655b8768d11f0c4184aeb5bfa9ccd17
|
refs/heads/master
| 2023-03-04T04:18:31.649237
| 2023-02-21T12:08:39
| 2023-02-21T12:08:39
| 303,291,017
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
from typing import Optional
import uvicorn
from fastapi import FastAPI, Request
from MongoAPI import MongoAPI
import json
app = FastAPI()
@app.get("/")
async def base():
return {'response':{"Status": "Health Check!"},
'status':200,
'mimetype':'application/json'
}
@app.get('/mongodb')
async def mongo_read(info : Request):
'''
Reading the data when a request is sent using the GET HTTP Method
'''
data = await info.json()
if data is None or data == {}:
return {'response': {"Error": "Please provide connection information"},
'status':400,
'mimetype':'application/json'}
obj = MongoAPI(data)
response = obj.read()
return {'response':response,
'status':200,
'mimetype':'application/json'}
@app.post('/mongodb')
async def mongo_write(info : Request):
'''
Writing the data when a request is sent using the POST HTTP Method.
'''
data = await info.json()
print(data['Document'])
if data is None or data == {} or 'Document' not in data:
return {'response': {"Error": "Please provide connection information"},
'status':400,
'mimetype':'application/json'}
obj = MongoAPI(data)
response = obj.write(data)
return {'response':response,
'status':200,
'mimetype':'application/json'}
@app.put('/mongodb')
async def mongo_update(info : Request):
'''
Updating the data when a request is sent using the PUT HTTP Method.
'''
data = await info.json()
if data is None or data == {} or 'DataToBeUpdated' not in data:
return {'response': {"Error": "Please provide connection information"},
'status':400,
'mimetype':'application/json'}
obj = MongoAPI(data)
response = obj.update()
return {'response':response,
'status':200,
'mimetype':'application/json'}
@app.delete('/mongodb')
async def mongo_delete(info : Request):
'''
Deleting the data when a request is sent using the DELETE HTTP Method.
'''
data = await info.json()
if data is None or data == {} or 'Filter' not in data:
return {'response': {"Error": "Please provide connection information"},
'status':400,
'mimetype':'application/json'}
obj = MongoAPI(data)
response = obj.delete(data)
return {'response':response,
'status':200,
'mimetype':'application/json'}
# if __name__ == '__main__':
# uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True, access_log=False)
|
[
"victor.basu@lumiq.ai"
] |
victor.basu@lumiq.ai
|
f7e150fe2c2755c2ee7c29b21367a36323defc16
|
e8977e740aa31a501e0be51ab9df159e1e9834d7
|
/Main.py
|
da7f5e22c36be88b80fa54d361a9836d5f4eb343
|
[] |
no_license
|
bharatkumar7/stuff
|
67a06e552aa6839f596e508cf26973df36574ad2
|
99cb02e6926d6465e4773df4c178f278d9bc7fa6
|
refs/heads/master
| 2021-01-10T14:09:25.814402
| 2016-01-13T11:03:36
| 2016-01-13T11:03:36
| 49,219,047
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,119
|
py
|
'''# -*- coding: utf-8 -*- If you are just trying to use UTF-8 characters or don't care if they are in your code, add this line'''
import numpy as np
import time,os,talib
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc, candlestick2_ohlc
from matplotlib import gridspec,colors
from math import pi
import funct
import mibian
import pandas as pd
from pandas import *
stock_data=1 # 1 - Streamline data, 2 - OHLC data
tickskip=1 #1 denotes every tick
colm=0
# This must be dump 123456789
for k in range(0,1):
if k==0:fname="code07.01.16.txt"
#if k==0:fname="1sec_close.txt"
status = 0
pruy=0
AccountSize=1000000
maxtick=25
target_profit=1.02 #Risk to Reward ratio 3:1
stop_loss=0.97 #stop loss when entering a position
tpf=target_profit
tsl=stop_loss
graph=1 #1 - Yes, 0 - None
tick_analysis_display=1 #1 - Yes , 0 - No
exchange_charges=8 # 16.69 for equities. 90 for option. 8 for futures
file_format=2 # 1- OHLC format, 2 - Live ticker format(Time OHLC), 3 - Live ticker format(Range OHLC)
tf_sec='30s'
initcap=AccountSize
#----------------------- File reader ------------------------------------------------------------------------------------------------------
if file_format==1:
tedhi,price_open,price_high,price_low,price_close, vol=(np.loadtxt(fname, dtype=float, delimiter=',', usecols=(1,3,4,5,6,7), skiprows=1,unpack=True))
numb=len(price_open)
xoi=np.min(price_low)
yoi=np.max(price_high)
xoi=xoi-(xoi*0.05)
yoi=yoi+(yoi*0.05)
elif file_format==2:
tedhi,dummy=(np.loadtxt(fname, dtype=str, delimiter=',', usecols=(0,1),skiprows=1,unpack=True))
cl_price, vol=(np.loadtxt(fname, dtype=float, delimiter=',', usecols=(15,2), skiprows=1,unpack=True))
d={'Datetime': Series(to_datetime(tedhi.astype(int)*1e9)),
'price': Series((cl_price)),
'volume':Series((vol))}
df=DataFrame(d)
df.set_index('Datetime', inplace=True)
vol_sum = (df['volume'].resample(tf_sec, how='sum',fill_method='backfill',limit=0)).dropna(how='any', axis=0)
price_ohlc = (df['price'].resample(tf_sec, how='ohlc',fill_method='backfill',limit=0)).dropna(how='any', axis=0)
numb=len(price_ohlc)
xoi=np.min(price_ohlc.low)
yoi=np.max(price_ohlc.high)
xoi=xoi-(xoi*0.001)
yoi=yoi+(yoi*0.001)
elif file_format==3:
cl_price, vol=(np.loadtxt(fname, dtype=float, delimiter=',', usecols=(11,16), skiprows=1,unpack=True))
if cl_price[0]<=25:rangeper=3.5/100 #constant range percent "0.01" represents 1%
elif cl_price[0]<=50:rangeper=2.50/100
elif cl_price[0]<=100:rangeper=2.0/100
elif cl_price[0]<=250:rangeper=2.5/100
else: rangeper=0.001/100
price_open,price_high,price_low,price_close=funct.range_bar(cl_price,vol,rangeper)
numb=len(price_close)
xoi=np.min(price_low)
yoi=np.max(price_high)
xoi=xoi-(xoi*0.001)
yoi=yoi+(yoi*0.001)
#----------------------- END OF FILE READER ------------------------------------------------------------------------------------------------------
tdata_ltp=np.zeros(maxtick,'f')
tdata_vol=np.zeros(maxtick,'f')
tdata_op=np.zeros(maxtick,'f')
tdata_hi=np.zeros(maxtick,'f')
tdata_lo=np.zeros(maxtick,'f')
buy=np.full(numb-maxtick,-10) #full fills all the array with the defned number "-10" this case
bstime=np.arange(0,numb-maxtick) #arange puts all real numbers in order 1,2,3
sell=np.full(numb-maxtick,-10)
short=np.full(numb-maxtick,-10)
cover=np.full(numb-maxtick,-10)
alltrades=np.zeros(numb-maxtick)
PLP=np.zeros(numb-maxtick)
PL=np.zeros(numb-maxtick)
AS=np.zeros(numb-maxtick)
minute=np.arange(0,numb)
trade=0
fp=0.0 #final percentage
pot=AccountSize
broker=0.0
AS[0]=AccountSize
kfp=0.0
ktrades=0
kpot=0.0
tech_one=np.zeros(numb-maxtick,'f')
tech_two=np.zeros(numb-maxtick,'f')
tech_three=np.zeros(numb-maxtick,'f')
flatornot=np.zeros(numb-maxtick,'f')
#flatornot_ema=np.zeros(numb-maxtick,'f')
# ---------------------- Analysis -----------------------------------
for i in range(0,numb-maxtick):
bstime[i]=i-1+maxtick
if file_format==1 or file_format==3:
for j in range(i,maxtick+i):
tdata_ltp[j-i]=price_close[j]
tdata_vol[j-i]=vol[j]
tdata_op[j-i]=price_open[j]
tdata_hi[j-i]=price_high[j]
tdata_lo[j-i]=price_low[j]
#time.sleep(2)
if file_format==2:
for j in range(i,maxtick+i):
tdata_ltp[j-i]=price_ohlc.close[j]
tdata_vol[j-i]=vol_sum[j]
tdata_op[j-i]=price_ohlc.open[j]
tdata_hi[j-i]=price_ohlc.high[j]
tdata_lo[j-i]=price_ohlc.low[j]
float_data = [float(x) for x in tdata_ltp]
tdata_ltp = np.array(float_data)
#float_data = [float(x) for x in tdata_ltp_nifty]
#tdata_ltp_nifty = np.array(float_data)
float_data = [float(x) for x in tdata_vol]
tdata_vol = np.array(float_data)
float_data = [float(x) for x in tdata_op]
tdata_op = np.array(float_data)
float_data = [float(x) for x in tdata_hi]
tdata_hi = np.array(float_data)
float_data = [float(x) for x in tdata_lo]
tdata_lo = np.array(float_data)
upordown_ltp = talib.EMA(tdata_ltp,5)
upordown_ltpl = talib.EMA(tdata_ltp,15)
upordown_ltplong = talib.EMA(upordown_ltpl,10)
kmav = talib.KAMA(tdata_ltp,10)
upordown_kmav = talib.EMA(kmav,10)
atrv = talib.ATR(tdata_hi, tdata_lo, tdata_ltp,timeperiod=5)
#upordown_ltplong = talib.EMA(tdata_ltp,40)
'''macd, macdsignal, macdhist = talib.MACD(tdata_ltp, fastperiod=6, slowperiod=13, signalperiod=4)
upordown_vol = talib.EMA(tdata_vol,5)
rocv = talib.ROC(tdata_ltp,5)
tanv = talib.TAN(tdata_ltp)
rsiv=talib.RSI(tdata_ltp, 14)
tanv = talib.TAN(tdata_ltp)'''
#bbuv, bbmv, bblv = talib.BBANDS(tdata_ltp, timeperiod=5, nbdevup=1, nbdevdn=1, matype=0)
#adxv=talib.ADX(tdata_hi, tdata_lo, tdata_ltp, timeperiod=14)
'''cciv=talib.CCI(tdata_hi, tdata_lo, tdata_ltp, timeperiod=5)
ultoscv=talib.ULTOSC(tdata_hi, tdata_lo, tdata_ltp, timeperiod1=5, timeperiod2=10, timeperiod3=15)
willrv=talib.WILLR(tdata_hi, tdata_lo, tdata_ltp, timeperiod=5)
midpointv = talib.MIDPOINT(tdata_ltp, timeperiod=5)
momv=talib.MOM(tdata_ltp, timeperiod=10)
stfastkv, stfastdv = talib.STOCHF(tdata_hi, tdata_lo, tdata_ltp, fastk_period=5, fastd_period=3, fastd_matype=0)
strsifastkv, strsifastdv = talib.STOCHRSI(tdata_ltp, timeperiod=14, fastk_period=5, fastd_period=3, fastd_matype=0)
'''
hhv=np.max(tdata_hi[maxtick-14:maxtick-1])
llv=np.min(tdata_lo[maxtick-14:maxtick-1])
latest=maxtick-1
if (kmav[latest]>1.001*kmav[latest-1]>1.001*kmav[latest-2]>1.001*kmav[latest-3]) or \
(kmav[latest]<0.999*kmav[latest-1]<0.999*kmav[latest-2]<0.999*kmav[latest-3]):flatornot[latest]=kmav[latest]
else: flatornot[latest]=0
Uvolt=(hhv-(2.0*atrv[latest]))
Dvolt=(llv+(2.0*atrv[latest]))
#---- FOR PLOT -----------
if graph==1:
tech_one[i]= Uvolt #hhv-(3*atrv[latest])#upordown_ltplong[latest]
tech_two[i]=Dvolt #atrv[latest-1]
tech_three[i]=flatornot[latest]#adxv[latest] #atrv[latest-1]
#print tech_three[i]
#d = mibian.GK([8395, 8700, 6, 0, 11], volatility=15.7)
#print d.callPrice
# ------------------------ ENTRY --------------------------------------
#-------------------Uptrend ------------------------------
'''if (status == 0 and tdata_hi[latest]<tdata_hi[latest-1]) and \
(status == 0 and tdata_hi[latest]<tdata_hi[latest-2]) and \
(status == 0 and tdata_hi[latest]<tdata_hi[latest-3]) and \
(status == 0 and upordown_ltplong[latest]>upordown_ltplong[latest-1]) :'''
#if (status == 0 and upordown_ltpl[latest]>upordown_ltpl[latest-1]):
if (status == 0 and tdata_ltp[latest]>tdata_op[latest] and tdata_ltp[latest-1]>tdata_op[latest-1]) and \
(status == 0 and tdata_ltp[latest]>Dvolt):
trend=1
if flatornot[latest]>0: trend=1
if(trend>=1):
status=1
lot_size,StopPrice=funct.PositionSizing_Method(status,AccountSize,stop_loss,tdata_ltp[latest],atrv[latest],Uvolt,Dvolt,alltrades,PL,i)
leftamt=AccountSize-(tdata_ltp[latest]*lot_size)
if (tick_analysis_display==1):print " BUY - %.2f, Investment - %.2f, Lots - %.2f, Accountsize - %.2f, StopPrice - %.2f"%(tdata_ltp[latest],(lot_size*tdata_ltp[latest]),lot_size,AccountSize,StopPrice)
bprice=tdata_ltp[latest]
if graph==1:buy[i]=bprice
tprice=bprice
#-------------------Down trend (sell buy strategy)------------------------------
'''if (status == 0 and tdata_hi[latest]>tdata_hi[latest-1]) and \
(status == 0 and tdata_hi[latest]>tdata_hi[latest-2]) and \
(status == 0 and tdata_hi[latest]>tdata_hi[latest-3]) and \
(status == 0 and upordown_ltplong[latest]<upordown_ltplong[latest-1]) :'''
#if (status == 0 and upordown_ltpl[latest]<upordown_ltpl[latest-1]):
if (status == 0 and tdata_ltp[latest]<tdata_op[latest] and tdata_ltp[latest-1]<tdata_op[latest-1]) and \
(status == 0 and tdata_ltp[latest]<Uvolt):
trend=1
if flatornot[latest]>0: trend=1
if(trend>=1):
status=2
lot_size,StopPrice=funct.PositionSizing_Method(status,AccountSize,stop_loss,tdata_ltp[latest],atrv[latest],Uvolt,Dvolt,alltrades,PL,i)
leftamt=AccountSize-(tdata_ltp[latest]*lot_size)
if (tick_analysis_display==1):print " Short - %.2f, Investment - %.2f, Lots - %.2f, Accountsize - %.2f, StopPrice - %.2f"%(tdata_ltp[latest],(lot_size*tdata_ltp[latest]),lot_size,AccountSize,StopPrice)
stprice=tdata_ltp[latest]
if graph==1:short[i]=stprice
tprice=stprice
# ------------------------ EXIT STRATEGY -----------------------------------------
#------------------------ Trailing Stop Loss ---------------------------
# SELL trailing for uptrend buy strategy
'''if (status == 1 and ((tdata_ltp[latest] >= bprice*target_profit))): # if only bought
bprice=tdata_ltp[latest]
target_profit=1.0025
stop_loss=0.9975'''
#if (status == 1 and tick_analysis_display==1): print "%d, LTP - %.2f, StopPrice - %.2f, Uvolt- %.2f"%(i+maxtick,tdata_ltp[latest],StopPrice,Uvolt)
# --------------------------SELL STRATEGY --------------------------------------------------------
'''if (status == 1 and tdata_ltp[latest]<Uvolt) or \
(status == 1 and i==(numb-maxtick-1)) or \
(status == 1 and StopPrice > tdata_ltp[latest]) or \
(status == 1 and stop_loss*tprice > tdata_ltp[latest]) '''
if (status == 1 and tdata_ltp[latest]<tdata_op[latest] and tdata_ltp[latest-1]<tdata_op[latest-1]):
#(status == 1 and tdata_ltp[latest] >= tprice*target_profit) :
bk_amt=((tprice+tdata_ltp[latest])*lot_size)
broker=(bk_amt*exchange_charges/100000.0) # 16.69 for equities. 90 for option. 8 for futures
PL[i]=(lot_size*(tdata_ltp[latest]-tprice))-broker
PLP[i]=(PL[i]*100/(tprice*lot_size))
fp=fp+PLP[i]
pot=pot+PL[i]
trade=trade+1
if PLP[i]>0.0: alltrades[i]=1
elif PLP[i]<0.0:alltrades[i]=-1
else:pass
if graph==1:sell[i]=tdata_ltp[latest]
if (tick_analysis_display==1):print " SELL - %.2f, Percentage %.2f"%(tdata_ltp[latest],PLP[i])
if (tick_analysis_display==1):print "-------------------------------"
stop_loss=tsl
target_profit=tpf
AccountSize=leftamt+PL[i]+(tprice*lot_size)
AS[i]=AccountSize
status=0
#----------------------------------------------
# Cover trailing for downtrend
'''if (status == 2 and ((tdata_ltp[latest]*target_profit <= stprice))): # if only bought
stprice=tdata_ltp[latest]
target_profit=1.0025
stop_loss=0.9975'''
#if (status == 2 and tick_analysis_display==1): print "%d, LTP - %.2f, StopPrice - %.2f,Dvolt- %.2f"%(i+maxtick,tdata_ltp[latest],StopPrice,Dvolt)
# -----------------COVER strategy ---------------------------------------
'''if (status == 2 and tdata_ltp[latest]>Dvolt) or \
(status == 2 and i==(numb-maxtick-1)) or \
(status == 2 and StopPrice < tdata_ltp[latest]) or \
(status == 2 and tprice < stop_loss*tdata_ltp[latest]) or \ '''
if (status == 2 and tdata_ltp[latest]>tdata_op[latest] and tdata_ltp[latest-1]>tdata_op[latest-1]):
#(status == 2 and tdata_ltp[latest]*target_profit <= tprice):
bk_amt=((tprice+tdata_ltp[latest])*lot_size)
broker=(bk_amt*exchange_charges/100000.0) # 16.69 for equities. 90 for option. 8 for futures
PL[i]=-((lot_size*(tdata_ltp[latest]-tprice))+broker)
PLP[i]=(PL[i]*100/(tprice*lot_size))
fp=fp+PLP[i]
pot=pot+PL[i]
trade=trade+1
if PLP[i]>0.0: alltrades[i]=1
elif PLP[i]<0.0:alltrades[i]=-1
else:pass
if graph==1:cover[i]=tdata_ltp[latest]
if (tick_analysis_display==1):print " Cover - %.2f, Percentage %.2f"%(tdata_ltp[latest],PLP[i])
if (tick_analysis_display==1):print "-------------------------------"
stop_loss=tsl
target_profit=tpf
AccountSize=leftamt+PL[i]+(tprice*lot_size)
AS[i]=AccountSize
status=0
if AS[i]==0:AS[i]=AccountSize
#-------------------------------------------------------------------
#if trade>=20 and (pot-broker>=3 or pot-broker<=-15):break
#if fp<=0 and trade>5: break
#print "-----------------------------------------------------------------"
#print " %d. Final Per = %.2f,Final Amt = %.2f, Trades = %d"%(k+1,fp, pot,trade)
#print "-----------------------------------------------------------------"
kfp=kfp+fp
kpot=kpot+pot
ktrades=ktrades+trade
#print "-------------------------------"
#print " %d. File= %s, Final Per = %.2f,Final Amt = %.2f, Trades = %d"%(k+1, fname,kfp,kpot,ktrades)
print " %d. Final Amt = %.2f, Trades = %d"%(k+1, kpot,ktrades)
#---------------Expectancy ---------------------------
funct.Expectancy(alltrades,PLP,PL,numb-maxtick)
#------------- PLOTS ----------------------------------------
if graph==1:
#---------------- CANDLE STICK PLOTS ----------------------------------------------------------------------------
quotes=np.zeros((numb-1,5))
for i in range(0,numb-1):
if file_format==1 or file_format==3:quotes[i]=(minute[i],price_open[i],price_high[i],price_low[i],price_close[i])
if file_format==2:quotes[i]=(minute[i],price_ohlc.open[i],price_ohlc.high[i],price_ohlc.low[i],price_ohlc.close[i])
#axes = plt.gca()
#axes.set_xlim([0,numb])
#axes.set_ylim([xoi,yoi])
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.set_xlim([0,numb])
ax2.set_xlim([0,numb])
ax1.set_ylim([xoi,yoi])
ax2.set_ylim([xoi,yoi])
candlestick_ohlc(ax1,quotes,width=0.6, colorup=u'g', colordown=u'r', alpha=1.0)
#ax2.plot(minute,price_ohlc.close,'gray',bstime,buy,'ko', marker=r'$\downarrow$', markersize=20,bstime,sell,'ro',bstime,short,'r*',bstime,cover,'g*')
ax2.plot(bstime,buy-1,'go', marker=r'$\Uparrow$', markersize=8)
ax2.plot(bstime,sell+1,'ro', marker=r'$\Downarrow$', markersize=8)
ax2.plot(bstime,short+1,'ro', marker=r'$\blacktriangledown$', markersize=8)
ax2.plot(bstime,cover-1,'go', marker=r'$\blacktriangle$', markersize=8)
ax2.plot(bstime,tech_one,'blue',bstime,tech_two,'orange')
plt.fill_between(bstime,tech_three,facecolor='seagreen', alpha=0.5, interpolate=True)
plt.grid(b=True, which='major', color='grey', linestyle='--')
plt.show()
#---------------------------------------- NORMAL LINE PLOTS ----------------------------------------------------
'''
plt.figure(1)
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax1=plt.subplot(gs[0])
axes = plt.gca()
axes.set_xlim([0,numb])
axes.set_ylim([xoi,yoi])
#figure,ax1 = plt.subplots()
ax2 = ax1.twinx()
if file_format==1 or file_format==3:ax1.plot(minute,price_close,'black',bstime,buy,'go',bstime,sell,'ro',bstime,short,'bo',bstime,cover,'ro',bstime,tech_one,'seagreen',bstime,tech_two,'lightcoral')#,bstime,tech_three,'bo')
if file_format==2:ax1.plot(minute,price_ohlc.close,'black',bstime,buy,'go',bstime,sell,'ro',bstime,short,'bo',bstime,cover,'ro',bstime,tech_one,'seagreen',bstime,tech_two,'lightcoral')#,bstime,tech_three,'bo')
#ax2.plot(bstime,tech_three,'b-')
plt.fill_between(bstime,tech_three,facecolor='seagreen', alpha=0.5, interpolate=True)
plt.grid(b=True, which='major', color='grey', linestyle='-')
plt.subplot(gs[1])
axes = plt.gca()
axes.set_xlim([0,numb])
plt.plot(bstime,AS,'r-')
plt.grid(b=True, which='major', color='grey', linestyle='-')
plt.show() '''
|
[
"bharatkumar7@gmail.com"
] |
bharatkumar7@gmail.com
|
eab0841d48237a5fda2b01c56721707bb39a40fb
|
156cac8bf5192a3d0b93a105539f4e6d5108fa1c
|
/ciyunapi/ciyunapi/asgi.py
|
b9e43c8bfd8ab576339ef2ba6040afa333af0c34
|
[] |
no_license
|
yutu-75/ciyun
|
220de2c4e4c6365145c16d3bf634838ecb9921ca
|
c25b2555ac527097d4d27380ae75a47670f42687
|
refs/heads/main
| 2023-05-07T08:46:18.285524
| 2021-05-31T13:22:49
| 2021-05-31T13:22:49
| 365,187,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
ASGI config for ciyunapi project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ciyunapi.settings.dev')
application = get_asgi_application()
|
[
"xiao3952@foxmail.com"
] |
xiao3952@foxmail.com
|
d92df5cd630581d42b06e50bdc1070c5d414a17c
|
9647524c0f4d93fb1c8a992c20fe9f9d2710cde3
|
/2-content/Python/intro_programming-master/scripts/remove_input_references.py
|
2ab8878b1a362f079adf49a971ef71aa7677a4ea
|
[
"MIT"
] |
permissive
|
bgoonz/web-dev-notes-resource-site
|
16161aa68e8eecafeaba4dc7abeb957aaee864c5
|
e7dc9c30393597cb39830c49c3f51c1486b97584
|
refs/heads/master
| 2023-09-01T14:04:20.867818
| 2021-06-17T07:56:20
| 2021-06-17T07:56:20
| 329,194,347
| 7
| 5
|
MIT
| 2021-07-05T06:36:49
| 2021-01-13T04:34:20
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,306
|
py
|
# This script removes the input reference numbers from html pages.
# They play a useful role in scientific notebooks, but they are really
# just visual clutter in this project.
# Could be an nbconvert setting, but it's an easy enough scripting job.
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
6753822442fee034044704f8fce55be9c1448475
|
4128e5c41fabbe2289ea7d7faae3d970d0244514
|
/jeffy/sdk/kinesis.py
|
c8e3b18282e2c439b629b5950a6a35cb64a9d3c2
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sinofseven/jeffy-my-extended
|
d772e6a95e9551de637d5ec70dca1d65f27e60d5
|
036ef14e8be5c93f19af4fd0012cc482a77717bb
|
refs/heads/master
| 2021-01-15T06:07:33.383973
| 2020-02-25T03:37:42
| 2020-02-25T03:37:42
| 242,897,880
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,065
|
py
|
import boto3
import json
from typing import Any
class Kinesis():
"""
Kinesis Client.
"""
_resource = None
@classmethod
def get_resource(cls) -> boto3.client:
"""
Get boto3 client for Kinesis.
Usage::
>>> from jeffy.sdk.kinesis import Kinesis
>>> Kinesis.get_resource().put_record(...)
"""
if Kinesis._resource is None:
Kinesis._resource = boto3.client('kinesis')
return Kinesis._resource
@classmethod
def put_record(cls, stream_name: str, data: Any, partition_key: str, correlation_id: str = ''):
"""
Put recourd to Kinesis Stream with correlation_id.
Usage::
>>> from jeffy.sdk.kinesis import Kinesis
>>> Sqs.put_record(...)
"""
return cls.get_resource().put_record(
StreamName=stream_name,
Data=json.dumps({
'correlation_id': correlation_id,
'item': data
}),
PartitionKey=partition_key,
)
|
[
"info@serverless-operations.com"
] |
info@serverless-operations.com
|
d0a3f8fea955cd6b7239c30eb4bde72572683e27
|
f2f88a578165a764d2ebb4a022d19e2ea4cc9946
|
/pyvisdk/do/guest_authentication.py
|
f16ac39d82372db0665b605fca27476d5d281d82
|
[
"MIT"
] |
permissive
|
pombredanne/pyvisdk
|
1ecc68a1bf264095f72f274c776e5868fb302673
|
de24eb4426eb76233dc2e57640d3274ffd304eb3
|
refs/heads/master
| 2021-01-21T16:18:39.233611
| 2014-07-28T19:50:38
| 2014-07-28T19:50:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def GuestAuthentication(vim, *args, **kwargs):
'''GuestAuthentication is an abstract base class for authentication in the guest.'''
obj = vim.client.factory.create('ns0:GuestAuthentication')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'interactiveSession' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"guy@rzn.co.il"
] |
guy@rzn.co.il
|
d384f24b5c0b0b257f66b1db1a63854c59b95395
|
3e4c69317323bca865b025503b60bf83d3ae65f8
|
/tests/server/blueprints/variants/test_variant_views_variant.py
|
c1fd7fe078f8967099df90b24cb215c5a79a60ac
|
[
"BSD-3-Clause"
] |
permissive
|
tapaswenipathak/scout
|
f59beaa997a45487ac96c3b3e560b5e5aa9b30ae
|
c9b3ec14f5105abe6066337110145a263320b4c5
|
refs/heads/master
| 2020-05-30T11:13:25.662300
| 2019-05-28T09:26:25
| 2019-05-28T09:26:25
| 189,694,812
| 1
| 0
|
BSD-3-Clause
| 2019-06-01T05:36:35
| 2019-06-01T05:36:34
| null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
# -*- coding: utf-8 -*-
import logging
from flask import url_for
log = logging.getLogger(__name__)
def test_server_variant(app, real_adapter):
# GIVEN an initialized app
# GIVEN a valid user, institute, case and variant
adapter = real_adapter
variant_obj = adapter.variant_collection.find_one()
assert variant_obj
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
internal_case_id = variant_obj['case_id']
case = adapter.case(internal_case_id)
case_name = case['display_name']
owner = case['owner']
# NOTE needs the actual document_id, not the variant_id
variant_id = variant_obj['_id']
log.debug('Inst {} case {} variant {}'.format(owner,case_name,
variant_id))
# WHEN accessing the variant page
resp = client.get(url_for('variants.variant',
institute_id=owner,
case_name=case_name,
variant_id=variant_id))
log.debug("{}",resp.data)
# THEN it should return a page
assert resp.status_code == 200
|
[
"rasi.chiara@gmail.com"
] |
rasi.chiara@gmail.com
|
068ea44d67a7cb5384e48561163e2762d5fce31c
|
8565e0f12bb11e14096964eeef3e34535c513d7f
|
/LunarisBot.py
|
029724af17fd46a914baa8e334a908ee3611a044
|
[] |
no_license
|
No17Namsan/NightSkyK
|
2be02740cd3520bf8216abe6069721a39ee0ad4b
|
73aa3890780814726f5dc23bfe4416f75df200cb
|
refs/heads/main
| 2023-07-18T04:41:12.381368
| 2021-09-06T11:21:00
| 2021-09-06T11:21:00
| 403,484,701
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,855
|
py
|
import asyncio
import discord
import random
import re
import urllib.request
import urllib.parse
from urllib.parse import quote
import json
from datetime import datetime, timedelta
client = discord.Client()
# 봇이 구동되었을 때 동작되는 코드입니다.
@client.event
async def on_ready():
print("Logged in as ") # 화면에 봇의 아이디, 닉네임이 출력됩니다.
print(client.user.name)
print(client.user.id)
print(datetime.now())
print("===========")
# 봇이 새로운 메시지를 수신했을때 동작되는 코드입니다.
@client.event
async def on_message(message):
if message.author.bot: # 만약 메시지를 보낸사람이 봇일 경우에는
return None # 동작하지 않고 무시합니다.
dice = ['<:d1:683941256711241744>', '<:d2:683941257000910869>', '<:d3:683941256606253068>',
'<:d4:683941256937734203>', '<:d5:683941256862105620>', '<:d6:683941256891858964>']
food = ['워..... 워.... 네 체중을 생각해!', '너구리 순한맛! (그후 봇을 볼 수 없었다고 한다)', '굶어ㅋㅋ', '피자사주세요!', '오늘은 치느님을 영접할 시간입니다!',
'갓스터치가 있는데 버거킹이 없을리가 없잖아! 버거킹?',
'밥버거', '집밥이 최고!', '빵야! 빵야! 빵!', '루나랑 라면먹고 가실래요?',
'수르스트뢰밍 https://namu.wiki/w/%EC%88%98%EB%A5%B4%EC%8A%A4%ED%8A%B8%EB%A2%B0%EB%B0%8D',
'진리 콩 까네~ 진리 콩 까네~ 칠리 콘 카르네~', '김밥말아서 소풍가요!', '스시....? 라고 불리는 초밥!',
'이봐, 친구! 그거 알아? 레몬 한 개엔 자그마치 레몬 한 개 분량의 비타민 C가 있다는 놀라운 사실을!', '일단 3D안경을 쓰고..:popcorn:',
'부어라! 마셔라! 죽어라! :beer:', '도넛!',
'커피는 좋은 도핑제입니다.', '넌... 아직... 준비가... 안되었다..!:baby_bottle: ', '후헤헤헷 숙제(일)을 머거랑 헤헷', '까까먹어', '빵과 계란후라이!',
':pig2: 족발! 돼지고기! 보쌈!', ':fish: 회?', '술에 부대찌개먹고싶... 아니 부대찌개에 술마시고싶다.', '어제도 오늘도 내일도 마라탕. 당연한거잖아요?',
'떡뽀끼? 떡뽁이? 알게뭐람, 떡볶이 주세요!', '버거킹이 있는데 갓스터치가 없을리가 없잖아! 갓스터치!', '워워... 진정해..! 빡친 당신을 위한 엽떡을 가져왔다구!',
'발은 역시 닭발이지!:chicken:', '말해 뭐해 곱창이 최고 아니야?', '삶은 감자.... Life is Egg... Egg...?', '아야어여오요**우유** :milk:',
'쌀국수 뚝배기!', '아... 시리얼에 우유부어먹고싶다... ... ...?', '풀리와 웰시가 맛나게 먹는 밀웜 한번 먹어보실?', '민트초코가 치약맛일까 치약이 민트초코맛일까?']
do = ['잠만보처럼 잠만 자던가! :zzz:', '톰 아저씨의 무지개 여섯 공성할래?', '데스티니 가디언즈는 죽었지만 우리 케이드는 마음 속에 살아있어!',
'생존마 낚으러 희생제갈까나~ 살인마 괴롭히러 희생제갈까나~',
'WINNER WINNER CHICKEN DINNER!', '느껴지지않아..? 우리의 심장에 뛰고있는 이 뜨거운 :cy:가!', '역시 힐링은 마인크래프트', '나만 없어 모동숲...ㅠ',
'오늘도 싱글벙글 롤 협곡생활!',
'우리집에서 넷플릭스보고갈래?(으브븝)', '밥머겅 많이머겅', '저 오늘떠나요~ 공항으로~ :airplane:', 'TFT 모바일 ㄷㄷㄷㅈ, ㅇㅍㄷㄷ', '타르코프에서 도망쳐! 도망치라구!']
fates = [':spades:', ':clubs:', ':diamonds:', ':hearts:']
fatecall = ['!합기', '!gkqrl', '!GKQRL']
num = [':regional_indicator_a:', ':two:', ':three:', ':four:', ':five:', ':six:', ':seven:', ':eight:', ':nine:',
':keycap_ten:', ':regional_indicator_j:', ':regional_indicator_q:', ':regional_indicator_k:']
result = []
eat = ['!뭐먹지', '!뭐먹지?', '!머먹지?', '!머먹지', '!멀먹징?', '!멀먹징', '!뭐먹징?', '!뭐먹징', '!뭐먹제?', '!뭐먹지?', '!뭐먹']
doing = ['!뭐하지?', '!뭐하지', '!뭐할까?', '!뭐할까']
up = ['!업', '!djq', '!DJQ', '!up', '!UP']
meow = ['애옹', '야옹', '먀옹', 'meow', 'moew', '냐오', '냐옹', '냥', '미야옹', '마오', '앩옹', '이얏호응', '애-옹', '야-옹']
meowe = ['<:meow1:682071155943014415>', '<:meow2:682071408540647563>', '<:meow3:684983336178810888>',
'<:meow4:684983336824733697>', '<:meow5:684984172963692545>']
blackcow = ['음머', '살고시퍼여ㅠㅠㅠ', '음머어어어어어엉']
guild = message.author.guild # id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.
textchannel = message.channel # textchannel이라는 변수에는 메시지를 받은 채널의 ID를 담습니다.
member = message.author
now = datetime.now()
if message.content == ('!패치노트'):
print(member, guild, now, '!패치노트')
print('==========')
await textchannel.send(embed=discord.Embed(title='Ver.1.0.3a',
description='0.재구동 시작했습니다.\n',
colour=0xe3da13))
if message.content == ("!도움말"):
print(member, now, guild, '!도움말')
print('==========')
await textchannel.send(embed=discord.Embed(title='도움말',
description='1. !루나리스: 봇이 인사를 합니다.\n2. !뭐먹지?, !머먹지?, !멀먹징?, !뭐먹징?, !뭐먹제?: 봇이 음식을 추천합니다.\n3. 야옹, 애옹, 냥 등등: 고양이 이모지를 가져옵니다!\n'
'4. !n(dDㅇ)N: N면체 주사위를 n개 던집니다. (N=1~6,n=1~9)',
colour=0xe3da13))
if message.content.startswith('!루나리스'): # 인사
print(member, guild, now, '!루나리스')
print('==========')
await textchannel.send('안녕하세요. 여러분!')
return None
if message.content in doing:
print(member, guild, now, '!뭐하지?')
print('==========')
await textchannel.send(do[random.randint(0, len(do) - 1)])
if message.content in eat:
print(member, guild, now, '!뭐먹지?')
print('==========')
await textchannel.send(food[random.randint(0, len(food) - 1)])
if message.content in meow:
print(member, guild, now, '야옹')
print('==========')
await textchannel.send(meowe[random.randint(0, len(meowe) - 1)])
if message.content.startswith("!갈고리"):
print(member, guild, now, '!갈고리')
print('==========')
await textchannel.send(
'<:QuestionSpam:767992761491259443><:QuestionSpam:767992761491259443><:QuestionSpam:767992761491259443><:QuestionSpam:767992761491259443><:QuestionSpam:767992761491259443>')
if message.content == ("!나스"):
print(member, guild, now, '!나스')
print('==========')
await textchannel.send("<:NBSB:766596746762649621> sp? 잠깐만요. 아니, 잠깐만 sp?")
if message.content == ("!나스바보"):
print(member, guild, now, '!나스바보')
print('==========')
await textchannel.send(
"<:NBSB:766596746762649621> ㄴ <:UnIm:684328036065476613> ㄱ <:NBSB:766596746762649621>")
if message.content == ("!나바스보"):
print(member, guild, now, '!나바스보')
print('==========')
await textchannel.send(
"<:NBSB:766596746762649621><:NBSB:766596746762649621>\n<:NBSB:766596746762649621><:NBSB:766596746762649621>")
if message.content.startswith("!멜라"):
print(member, guild, now, '!멜라')
print('==========')
await textchannel.send('<:D2Ghost:685817640366768174> **현실**을 사세요, 수호자!!')
if message.content.startswith("!흑우"):
print(member, guild, now, '!흑우')
print('==========')
await textchannel.send(blackcow[random.randint(0, len(blackcow) - 1)])
if message.content.startswith("!힐카"):
print(member, guild, now, '!힐카')
print('==========')
await textchannel.send('힐카는 힝해')
dice_set = re.findall('^!([0-9]+)[dDㅇ]([1-6])$', message.content)
if len(dice_set) != 0:
dice_set = dice_set[0]
for _ in range(int(dice_set[0])):
result.append(dice[random.randint(0, int(dice_set[1]) - 1)])
print(member, now, guild, '!주사위', result)
print('==========')
await textchannel.send(result)
# if message.content in fatecall:
# result = (fates[random.randint(0, 3)]), (num[random.randint(0, 12)])
# print(member, guild, now, '!합기', result)
# print('==========')
# await textchannel.send(result)
# if message.content in up:
# result = (dice[random.randint(0, 5)])
# print(member, guild, now, '!업', result)
# print('==========')
# await textchannel.send(result)
if message.content.startswith("!이스포츠"):
print(member, guild, now, '!이스포츠')
print('==========')
await textchannel.send(embed=discord.Embed(title='이스포츠 일정 정리 21년 07월 4주차'
, colour=0xe3da13))
client.run("Discord_API")
|
[
"noreply@github.com"
] |
noreply@github.com
|
dfc0cc855a774de8fa89bf5d0af2e7761c1399da
|
cf0ab8503d4d704045070deea1e2125375711e86
|
/apps/apikeys/v1/urls.py
|
1a8b15c264dc105260d2432da2775b98a3fb3a99
|
[] |
no_license
|
faierbol/syncano-platform
|
c3c6468600115752fd9fa5e46a0ad59f75f6bc9c
|
879111874d1ef70418b4890cf970720b0a2be4d8
|
refs/heads/master
| 2023-07-20T10:13:40.066127
| 2021-02-08T15:01:13
| 2021-02-08T15:01:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
# coding=UTF8
from rest_framework.routers import SimpleRouter
from apps.apikeys.v1 import views
router = SimpleRouter()
router.register('api_keys', views.ApiKeyViewSet)
urlpatterns = router.urls
|
[
"rk@23doors.com"
] |
rk@23doors.com
|
93013a6c44645ef61cb45e633030c20663c3fde6
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/classification/torchvision/pytorch/train.py
|
1c16c81bc51ace035a2653350c088a3888b0904f
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 15,577
|
py
|
# Copyright (c) 2022 Iluvatar CoreX. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import warnings
warnings.filterwarnings('ignore')
import datetime
import os
import logging
import time
import torch
import torch.utils.data
try:
from apex import amp as apex_amp
except:
apex_amp = None
try:
from torch.cuda.amp import autocast, GradScaler
scaler = GradScaler()
except:
autocast = None
scaler = None
from torch import nn
import torch.distributed as dist
import torchvision
import utils
from utils import (MetricLogger, SmoothedValue, accuracy, mkdir,\
init_distributed_mode, manual_seed,\
is_main_process, save_on_master, write_on_master)
from dataloader.classification import get_datasets, create_dataloader
def compute_loss(model, image, target, criterion):
output = model(image)
if not isinstance(output, (tuple, list)):
output = [output]
losses = []
for out in output:
losses.append(criterion(out, target))
loss = sum(losses)
return loss, output[0]
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, print_freq, use_amp=False, use_dali=False):
model.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}'))
header = 'Epoch: [{}]'.format(epoch)
all_fps = []
for data in metric_logger.log_every(data_loader, print_freq, header):
if use_dali:
image, target = data[0]["data"], data[0]["label"][:, 0].long()
else:
image, target = data
start_time = time.time()
image, target = image.to(device, non_blocking=True), target.to(device, non_blocking=True)
loss, output = compute_loss(model, image, target, criterion)
if use_amp:
with apex_amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
optimizer.zero_grad()
end_time = time.time()
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
fps = batch_size / (end_time - start_time) * utils.get_world_size()
metric_logger.meters['img/s'].update(fps)
all_fps.append(fps)
fps = round(sum(all_fps) / len(all_fps), 2)
print(header, 'Avg img/s:', fps)
return fps
def evaluate(model, criterion, data_loader, device, print_freq=100, use_dali=False):
model.eval()
metric_logger = MetricLogger(delimiter=" ")
header = 'Test:'
with torch.no_grad():
for data in metric_logger.log_every(data_loader, print_freq, header):
if use_dali:
image, target = data[0]["data"], data[0]["label"][:, 0].long()
else:
image, target = data
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5))
return round(metric_logger.acc1.global_avg, 2)
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt")
cache_path = os.path.expanduser(cache_path)
return cache_path
def create_optimzier(params, args):
opt_name = args.opt.lower()
if opt_name == 'sgd':
optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif opt_name == 'rmsprop':
optimizer = torch.optim.RMSprop(params, lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay, eps=0.0316, alpha=0.9)
elif opt_name == "fused_sgd":
from apex.optimizers import FusedSGD
optimizer = FusedSGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
else:
raise RuntimeError("Invalid optimizer {}. Only SGD and RMSprop are supported.".format(args.opt))
return optimizer
def main(args):
init_distributed_mode(args)
print(args)
device = torch.device(args.device)
manual_seed(args.seed, deterministic=args.deterministic)
# WARN:
if dist.is_initialized():
num_gpu = dist.get_world_size()
else:
num_gpu = 1
global_batch_size = num_gpu * args.batch_size
train_dir = os.path.join(args.data_path, 'train')
val_dir = os.path.join(args.data_path, 'val')
num_classes = len(os.listdir(train_dir))
if 0 < num_classes < 13:
if global_batch_size > 512:
if is_main_process():
print("WARN: Updating global batch size to 512, avoid non-convergence when training small dataset.")
args.batch_size = 512 // num_gpu
if args.pretrained:
num_classes = 1000
args.num_classes = num_classes
print("Creating model")
if hasattr(args, "model_cls"):
model = args.model_cls(args)
else:
model = torchvision.models.__dict__[args.model](pretrained=args.pretrained, num_classes=num_classes)
if args.padding_channel:
print("WARN: Cannot convert first conv to N4HW.")
data_loader, data_loader_test = create_dataloader(train_dir, val_dir, args)
if args.padding_channel and isinstance(data_loader, torch.utils.data.DataLoader):
data_loader.collate_fn = utils.nhwc_collect_fn(data_loader.collate_fn, fp16=args.amp, padding=args.padding_channel)
data_loader_test.collate_fn = utils.nhwc_collect_fn(data_loader_test.collate_fn, fp16=args.amp, padding=args.padding_channel)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
if args.nhwc:
model = model.cuda().to(memory_format=torch.channels_last)
optimizer = create_optimzier(model.parameters(), args)
if args.amp:
model, optimizer = apex_amp.initialize(model, optimizer, opt_level="O2",
loss_scale="dynamic",
master_weights=True)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
best_acc1 = 0
best_epoch = 0
for epoch in range(args.start_epoch, args.epochs):
epoch_start_time = time.time()
if args.distributed and not args.dali:
data_loader.sampler.set_epoch(epoch)
fps = train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq, args.amp, use_dali=args.dali)
lr_scheduler.step()
acc1 = evaluate(model, criterion, data_loader_test, device=device, use_dali=args.dali)
if acc1 > best_acc1:
best_acc1 = acc1
best_epoch = epoch
if args.output_dir is not None:
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args}
save_on_master(
checkpoint,
os.path.join(args.output_dir, 'best.pth'.format(epoch)))
save_on_master(
checkpoint,
os.path.join(args.output_dir, 'latest.pth'))
epoch_total_time = time.time() - epoch_start_time
epoch_total_time_str = str(datetime.timedelta(seconds=int(epoch_total_time)))
print('epoch time {}'.format(epoch_total_time_str))
if args.dali:
data_loader.reset()
data_loader_test.reset()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('* Acc@1: {} at epoch {}'.format(round(best_acc1, 2), best_epoch))
print('Training time {}'.format(total_time_str))
if args.output_dir:
write_on_master({"Name":os.path.basename(args.output_dir),
"Model": args.model, "Dataset": os.path.basename(args.data_path), "AMP":args.amp,
"Acc@1":best_acc1, "FPS":fps, "Time": total_time_str}, os.path.join(args.output_dir, 'result.json'))
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description='PyTorch Classification Training', add_help=add_help)
parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', help='dataset')
parser.add_argument('--model', default='resnet18', help='model')
parser.add_argument('--device', default='cuda', help='device')
parser.add_argument('-b', '--batch-size', default=32, type=int)
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--opt', default='sgd', type=str, help='optimizer')
parser.add_argument('--lr', default=0.128, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-step-size', default=30, type=int, help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
parser.add_argument('--output-dir', default=None, help='path where to save')
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument(
"--cache-dataset",
dest="cache_dataset",
help="Cache the datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--deterministic",
help="Do not benchmark conv algo",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--pretrained",
dest="pretrained",
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument('--auto-augment', default=None, help='auto augment policy (default: None)')
parser.add_argument('--random-erase', default=0.0, type=float, help='random erasing probability (default: 0.0)')
parser.add_argument(
"--dali",
help="Use dali as dataloader",
default=False,
action="store_true",
)
# distributed training parameters
parser.add_argument('--local_rank', default=-1, type=int,
help='Local rank')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# other
parser.add_argument('--amp', action='store_true', help='Automatic Mixed Precision training')
parser.add_argument('--nhwc', action='store_true', help='Use NHWC')
parser.add_argument('--padding-channel', action='store_true', help='Padding the channels of image to 4')
parser.add_argument('--dali-cpu', action='store_true')
parser.add_argument('--seed', default=42, type=int, help='Random seed')
parser.add_argument('--crop-size', default=224, type=int)
parser.add_argument('--base-size', default=256, type=int)
return parser
def check_agrs(args):
if args.nhwc:
args.amp = True
if args.output_dir:
prefix=args.output_dir
names = [args.model, os.path.basename(args.data_path)]
if args.amp:
names.append("amp")
if torch.cuda.device_count():
names.append(f"dist_{utils.get_world_size()}x{torch.cuda.device_count()}")
exp_dir = "_".join(map(str, names))
args.output_dir = os.path.join(prefix, exp_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
if args.amp:
if apex_amp is None:
raise RuntimeError("Not found apex in installed packages, cannot enable amp.")
def train_model(model_cls=None):
args = get_args_parser().parse_args()
check_agrs(args)
if utils.is_main_process():
setup_logging(args.output_dir)
if hasattr(torch, "corex") and args.dali:
args.dali_cpu = True
if model_cls is not None:
args.model_cls = model_cls
main(args)
def setup_logging(prefix):
if prefix:
handlers=[
logging.FileHandler(os.path.join(prefix, "train.log"), mode='w'),
logging.StreamHandler(),
]
else:
handlers = None
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=handlers
)
if __name__ == "__main__":
args = get_args_parser().parse_args()
check_agrs(args)
if utils.is_main_process():
setup_logging(args.output_dir)
try:
main(args)
except Exception as e:
logging.exception(e)
|
[
"jia.guo@iluvatar.ai"
] |
jia.guo@iluvatar.ai
|
bd8a2b6e104c59c4b57a5cac5db23d29db1db3ec
|
908e60d308ca9458b89980be1095f58a07fce0bb
|
/playNFS.py
|
7b26a28595bb3a090c047db2100d5e011b07db5b
|
[] |
no_license
|
KartikPatelOfficial/Ai-Nfs
|
f6220d2f1cf479ce613aa206a9150c4dd88602a5
|
00e39a9d3ab4bec6d26987c6849799587f3d02cc
|
refs/heads/master
| 2021-07-19T14:57:30.108037
| 2017-10-28T16:07:32
| 2017-10-28T16:07:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,365
|
py
|
import numpy as np
from PIL import ImageGrab
import cv2
import pyautogui
import time
from directKeys import ReleaseKey, PressKey, W, A, S, D
def drawLines(img, lines):
try:
for line in lines:
coords = line[0]
cv2.line(img, (coords[0],coords[1]), (coords[2],coords[3]), [255,255,255], 3)
except:
pass
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask,vertices,255)
masked = cv2.bitwise_and(img, mask)
return masked
def processImg(originalImage):
processedImg = cv2.cvtColor(originalImage,cv2.COLOR_BGR2GRAY)
processedImg = cv2.Canny(processedImg,threshold1=300,threshold2=400)
processedImg = cv2.GaussianBlur(processedImg, (5,5),0)
verticies = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,400]])
processedImg = roi(processedImg,[verticies])
lines = cv2.HoughLinesP(processedImg, 1, np.pi/180, 180, 20, 15)
drawLines(processedImg,lines)
return processedImg
def main():
lastTime = time.time()
while(True):
screen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
newScreen = processImg(screen)
print('Loop took {} seconds'.format(time.time()-lastTime))
lastTime = time.time()
cv2.imshow('window',newScreen)
# cv2.imshow('window',cv2.cvtColor(screen,cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
|
[
"patelkartik1910@gmail.com"
] |
patelkartik1910@gmail.com
|
cab5f90d462f2fc8397fd3e49936dc077c525358
|
b37601e91fc9e6d4ad7dab19880747bca82f2e50
|
/General_Practice/listdictionary_practice.py
|
3d78becc649383215c0f2fd6cabe3711c437197e
|
[] |
no_license
|
sarahannali/pythoncourse
|
d26389cd3fda830af3778051d7205021d5e150b3
|
b9ab543c19511e00a7b3e174b817c5eb0706a49a
|
refs/heads/master
| 2020-08-29T05:53:21.388641
| 2019-12-05T22:47:56
| 2019-12-05T22:47:56
| 217,947,564
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,226
|
py
|
# #List Comprehension Practice
a = input("List 5 numbers less than 10, separated by a space: ")
b = input("List 5 more numbers less than 10: ")
a = a.replace(" ", "")
answer = [item for item in a and b if item in a and b] #Shared in both
inputOne = input("What is your name? ")
inputTwo = input("What is your pet's name? ")
inputThree = input("What is your favorite color? ")
listNames = [inputOne, inputTwo, inputThree]
answer2 = [name[::-1].lower() for name in listNames] #Reverse a string
print(f"\nYour number inputs shared the following numbers: {answer}")
print(f"\nThose words backward are {answer2}")
#Dictionary Practice
import random
options = ["pizza", "bread", "cookies", "cakes", "coffee"]
print(f"\nWELCOME! We sell {options}")
choice = input("What would you like to buy? ")
stock = {
"pizza" : random.randint(1,10),
"bread": random.randint(1,20),
"cookies": random.randint(1,100),
"cakes": random.randint(1,5),
"coffee": random.randint(1,100)
}
amount = stock.get(choice)
if choice in stock:
if amount > 1:
choiceFix = choice + "s"
else:
choiceFix = choice
print(f"\nWe have {amount} {choiceFix} left")
else:
print("\nSorry, we don't make that!")
|
[
"asarahali00@gmail.com"
] |
asarahali00@gmail.com
|
8a438d371dcd47d1c7a958b870491293517d1a86
|
cf39aeabaae2fc0a16ddf4b458308d5ebde10a33
|
/modules/grindhold_plainhtml/__init__.py
|
a969ecaf93e1343f741158687ed33350effc5806
|
[] |
no_license
|
joker234/skarphed
|
03dbb774a7605d6523926d082009a24f29455fd7
|
a7dc2bd758bf24cf8819d36e67633e68e87cf008
|
refs/heads/master
| 2021-01-18T06:36:20.996757
| 2013-09-06T02:20:25
| 2013-09-06T02:20:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
###########################################################
# © 2011 Daniel 'grindhold' Brendle and Team
#
# This file is part of Skarphed.
#
# Skarphed is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# Skarphed is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with Skarphed.
# If not, see http://www.gnu.org/licenses/.
###########################################################
import os
from StringIO import StringIO
from module import AbstractModule
class ModuleException(Exception):
ERRORS = {
0:"""This instance does not have a WidgetId. Therefore, Widget-bound methods cannot be used"""
}
@classmethod
def get_msg(cls,nr, info=""):
return "DB_"+str(nr)+": "+cls.ERRORS[nr]+" "+info
class Module(AbstractModule):
def __init__(self, core):
AbstractModule.__init__(self,core)
self._path = os.path.dirname(__file__)
self._load_manifest()
"""
BEGIN IMPLEMENTING YOUR MODULE HERE
"""
def render_pure_html(self,widget_id,args={}):
content = self.get_content(widget_id)
return"<h2>%s</h2>%s"%(content['title'],content['html'])
def render_html(self,widget_id,args={}):
return self.render_pure_html()
def render_javascript(self,widget_id,args={}):
return ""
def set_content(self, widget_id, content="", title=""):
title = str(title)
content = StringIO(str(content))
db = self._core.get_db()
stmnt = "UPDATE OR INSERT INTO ${html} (MOD_INSTANCE_ID, HTM_TITLE, HTM_HTML) \
VALUES (?,?,?) MATCHING (MOD_INSTANCE_ID) ;"
db.query(self, stmnt, (widget_id, title, content), commit=True)
def get_content(self, widget_id):
db = self._core.get_db()
stmnt = "SELECT HTM_TITLE, HTM_HTML FROM ${html} WHERE MOD_INSTANCE_ID = ? ;"
cur = db.query(self, stmnt, (widget_id,))
row = cur.fetchonemap()
if row is not None:
return {'title':row["HTM_TITLE"],
'html':row["HTM_HTML"]}
else:
return {"title":"Widget not found",
"html":"<p>This widget does apparently not exist</p>"}
|
[
"grindhold@gmx.net"
] |
grindhold@gmx.net
|
120ee9bc839ff0a3903105aacf109d63e4c539be
|
7774f3549007ea06046ff06abe85efb6433062b9
|
/textapp/forms.py
|
fe60215b1bf50eafe9943fed60819425e3743837
|
[] |
no_license
|
rikuriku1999/textbook
|
92be520f064c74986c62437fe585db0d39216a61
|
49814eac2ed318c13882966a35bec60760084a2b
|
refs/heads/master
| 2021-01-01T12:39:22.769329
| 2020-03-15T13:18:42
| 2020-03-15T13:18:42
| 239,282,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,025
|
py
|
from django import forms
from . import models
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm, UserCreationForm
)
User=get_user_model()
COLLEGE_CHOICES = (
('慶應義塾大学','慶應義塾大学'),
('早稲田大学','早稲田大学'),
('青山学院大学','青山学院大学'))
GENDER_CHOICES = (
('男性','男性'),
('女性','女性'))
STATUS_CHOICES = (
('e','全て'),
(False,'販売中のみ')
)
QUALITY_CHOICES = (
('','状態を選択'),
('きれい','きれい'),
('少し書き込みあり','少し書き込みあり'),
('かなり書き込みあり','かなり書き込みあり'),
('あまりきれいでない','あまりきれいでない')
)
class CommentForm(forms.ModelForm):
class Meta:
model = models.Commentmodel
fields = ('text',)
text = forms.CharField(
widget=forms.Textarea(),
required=False,
max_length=30,
initial=''
)
class UserForm(forms.Form):
class Meta:
model = models.Usermodel
fields = ('username','college','gender','intro')
username = forms.CharField(
widget=forms.TextInput(
attrs={'placeholder':'ユーザー名を入力(変更不可)'}
),
required=True,
max_length=20,
)
college = forms.CharField(
widget=forms.TextInput(
attrs={'placeholder':'大学名、学部学科を入力'}
),
#choices=COLLEGE_CHOICES,
required=True,
max_length=20,
)
intro = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={'placeholder':'自己紹介文を入力'}
),
max_length=1000
)
gender = forms.ChoiceField(
widget=forms.Select,
choices=GENDER_CHOICES
)
class UserForm2(forms.Form):
class Meta:
model = models.Usermodel
fields = ('college','intro')
college = forms.CharField(
widget=forms.TextInput(),
#choices=COLLEGE_CHOICES,
required=True,
max_length=20,
)
intro = forms.CharField(
required=False,
widget=forms.Textarea(
attrs={'placeholder':'自己紹介文を入力'}
),
max_length=1000
)
class DetailForm(forms.Form):
class Meta:
model = models.Textbookmodel
fields = ('title','content','images','collegecategory','status','price','campus',)
images = forms.ImageField(
required=True,
)
title = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={'placeholder':'例 経済学入門'}
),
max_length=30,
)
content = forms.CharField(
max_length=200,
required=True,
widget=forms.Textarea(
attrs={'placeholder':'例 経済学部の必修科目の教科書です。定価5000円です。テストに出そうな部分も書き込んであります。よろしくお願いします。'}
)
)
collegecategory = forms.CharField(
max_length=20,
required=True,
widget=forms.TextInput(),
)
status = forms.ChoiceField(
widget=forms.Select,
choices=QUALITY_CHOICES,
)
price = forms.IntegerField(
required=True,
widget=forms.NumberInput(
attrs={'placeholder':'例 2000'}
)
)
campus = forms.CharField(
max_length=30,
required=False,
widget=forms.TextInput(
attrs={'placeholder':'例 渋谷キャンパス'}
)
)
class ChatForm(forms.ModelForm):
class Meta:
model = models.Chatmodel
fields = ('text',)
text = forms.CharField(
initial='',
max_length=32,
required = True,
widget = forms.Textarea(
attrs={'placeholder':'メッセージを入力'}
)
)
class SearchForm(forms.Form):
search = forms.CharField(
initial='',
label='search',
required = False, # 必須ではない
widget=forms.TextInput(
attrs={'placeholder':'キーワードを入力'}
))
class SqueezeForm(forms.Form):
title = forms.CharField(
initial='',
label='title',
required = False,
widget=forms.TextInput(
attrs={'placeholder':'タイトルを入力'}
)
)
college = forms.CharField(
initial='',
label='college',
required = False,
widget = forms.TextInput(
attrs={'placeholder':'大学名等を入力'}
)
)
price = forms.IntegerField(
initial='',
label='price',
required = False,
widget=forms.NumberInput(
attrs={'placeholder':'数字のみ ~円以下'}
)
)
sellstatus = forms.ChoiceField(
label='sellstatus',
widget = forms.Select,
choices = STATUS_CHOICES
)
class LoginForm(AuthenticationForm):
"""ログインフォーム"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['class'] = 'form-control'
field.widget.attrs['placeholder'] = field.label # placeholderにフィールドのラベルを入れる
class UserCreateForm(UserCreationForm):
"""ユーザー登録用フォーム"""
class Meta:
model = User
fields = ('email',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['placeholder'] = field.label # placeholderにフィールドのラベルを入れる
field.widget.attrs['class'] = 'form-control'
def clean_email(self):
email = self.cleaned_data['email']
User.objects.filter(email=email, is_active=False).delete()
return email
|
[
"kalashnikova1120@gmail.com"
] |
kalashnikova1120@gmail.com
|
90eb5fd3f16495104732f25945188ffbca0336ac
|
95d7484f512f2ef0b62a0da1feb900e436214c8b
|
/models/contact_template.py
|
1bcc94bd095d43236881fd97e45402ddc616740d
|
[] |
no_license
|
Sundaya-Indo/product_test
|
d3b090384284b3efe449952b63f7face735a0ccf
|
e1d7a6dd40b224024df9955e67e50c0a09cf7b8b
|
refs/heads/master
| 2021-07-31T14:00:13.001861
| 2021-07-22T08:42:57
| 2021-07-22T08:42:57
| 248,193,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
from odoo import models, fields, api, _
from odoo.exceptions import UserError
class Partners(models.Model):
_inherit = 'res.partner'
file_datasheet_partner = fields.Many2many(
comodel_name="ir.attachment",
relation="m2m_ir_file_datasheet_partner",
column1="m2m_id",
column2="attachment_id",
string="Datasheet File")
weblinks_partner = fields.Many2many(comodel_name="ir.attachment",
relation="m2m_ir_file_weblinks_partner",
column1="m2m_id",
column2="attachment_id",
string="Weblinks")
is_employee = fields.Boolean('Is an Employee', default=False)
|
[
"agis@sundaya.com"
] |
agis@sundaya.com
|
40ccefb509a8910430c7b9e7396fe596022f0fdf
|
3de6b4bbaf86afe1ff87b0d829bcba014a8a9696
|
/.history/home/models_20200512135208.py
|
77a219ab1076ce6b855a9fb57b4f9eeb54707ffb
|
[] |
no_license
|
codermythbuster/bs
|
f0b758ab396e50f744aa29c7ecd58354b7df06df
|
3687302da8f9fe5a8f75d52ba429f14e2f09c67e
|
refs/heads/master
| 2022-08-04T22:27:59.987215
| 2020-05-26T11:24:01
| 2020-05-26T11:24:01
| 263,302,705
| 0
| 0
| null | 2020-05-26T09:58:03
| 2020-05-12T10:16:30
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
from django.db import models
from django.contrib.auth.models import User
# book keeping record model
class Book(models.Model):
book_id = models.IntegerField(primary_key=True,verbose_name="BOOK ID")
book_name = models.TextField(verbose_name="BOOK NAME")
book_category = models.CharField(max_length=100,verbose_name="BOOK CATEGORY")
book_description = models.TextField(verbose_name="BOOK DESCRIPTION")
book_author_name = models.CharField(max_length=256, verbose_name="AUTHOR Name")
book_price_to_sell = models.FloatField(verbose_name="Selling Price")
username = models.ForeignKey(User,verbose_name="USERNAME")
book_status = models.BooleanField(default=False, verbose_name="AVAILABILITY")
book_image = models.ImageField(verbose_name="IMAGE URL")
def __str__(self):
return " {} {} {} {} {} {} ".format(self.book_id,self.book_name,self.book_author_name,self.book_category,self.book_status,self.book_image)
class Books_purchased (models.Model):
trans_id = models.AutoField(verbose_name="Transaction ID")
user1 = models.ForeignKey(to_field=User.USERNAME_FIELD)
user2 = models.ForeignKey(to_field=User.USERNAME_FIELD)
book_id = models.ForeignKey(Book)
|
[
"namdev373@gmail.com"
] |
namdev373@gmail.com
|
c207cd4f3194bfde257b57a18093edeb474a8c31
|
9a5fa4f1fcd2d335347dda8b6672ff0392003823
|
/backend/exprgram/migrations/0006_auto_20180724_0331.py
|
6f9ef516992a157f06519ae48738df51e0240ff7
|
[] |
no_license
|
kyungjejo/exprgram-evaluation
|
ea9db3defc215f0f7ff4fde878267dcb7618e6c7
|
d0a549fbe3072bfc5aa20d645e3e4cfb0ba60f06
|
refs/heads/master
| 2023-01-13T03:09:26.063495
| 2018-09-10T11:16:08
| 2018-09-10T11:16:08
| 146,848,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 874
|
py
|
# Generated by Django 2.0.6 on 2018-07-24 03:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exprgram', '0005_auto_20180719_1659'),
]
operations = [
migrations.AlterField(
model_name='emotionlables',
name='count',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='intentionlables',
name='count',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='locationlables',
name='count',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='relationshiplables',
name='count',
field=models.IntegerField(default=1),
),
]
|
[
"kyungjejo@gmail.com"
] |
kyungjejo@gmail.com
|
42bdb6a885ac58d51bad36beea8877307f7902a5
|
eda9187adfd53c03f55207ad05d09d2d118baa4f
|
/algo/Transfer_Learning/Transfer_learning.py
|
725a6e82bceb8aa1d09e9cb263fc2fdf9da6aea1
|
[] |
no_license
|
HuiZhaozh/python_tutorials
|
168761c9d21ad127a604512d7c6c6b38b4faa3c7
|
bde4245741081656875bcba2e4e4fcb6b711a3d9
|
refs/heads/master
| 2023-07-07T20:36:20.137647
| 2020-04-24T07:18:25
| 2020-04-24T07:18:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,586
|
py
|
# -*- coding:utf-8 -*-
# /usr/bin/python
'''
-------------------------------------------------
File Name : Transfer_learning
Description : 迁移学习
Envs : pytorch
Author : yanerrol
Date : 2020/2/17 09:58
-------------------------------------------------
Change Activity:
2020/2/17 : new
-------------------------------------------------
'''
__author__ = 'yanerrol'
import torch
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
#######################################
### PRE-TRAINED MODELS AVAILABLE HERE
## https://pytorch.org/docs/stable/torchvision/models.html
from torchvision import models
#######################################
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
NUM_CLASSES = 10
# Hyperparameters
random_seed = 1
learning_rate = 0.0001
num_epochs = 10
batch_size = 128
##########################
### MNIST DATASET
##########################
custom_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
## Note that this particular normalization scheme is
## necessary since it was used for pre-training
## the network on ImageNet.
## These are the channel-means and standard deviations
## for z-score normalization.
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=custom_transform,
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=custom_transform)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=8,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
##########################
### Loading Pre-Trained Model
##########################
model = models.vgg16(pretrained=True)
##########################
### Freezing Model
##########################
for param in model.parameters():
param.requires_grad = False
model.classifier[3].requires_grad = True
model.classifier[6] = nn.Sequential(
nn.Linear(4096, 512),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(512, NUM_CLASSES))
##########################
### Training as usual
##########################
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters())
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
_, predicted_labels = torch.max(logits, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float() / num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
% (epoch + 1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % (
epoch + 1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time) / 60))
print('Total Training Time: %.2f min' % ((time.time() - start_time) / 60))
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader))
##########################
### Training as usual
##########################
import matplotlib.pyplot as plt
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
for batch_idx, (features, targets) in enumerate(test_loader):
features = features
targets = targets
break
logits = model(features.to(DEVICE))
_, predicted_labels = torch.max(logits, 1)
def unnormalize(tensor, mean, std):
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
n_images = 10
fig, axes = plt.subplots(nrows=1, ncols=n_images,
sharex=True, sharey=True, figsize=(20, 2.5))
orig_images = features[:n_images]
for i in range(n_images):
curr_img = orig_images[i].detach().to(torch.device('cpu'))
curr_img = unnormalize(curr_img,
torch.tensor([0.485, 0.456, 0.406]),
torch.tensor([0.229, 0.224, 0.225]))
curr_img = curr_img.permute((1, 2, 0))
axes[i].imshow(curr_img)
axes[i].set_title(classes[predicted_labels[i]])
|
[
"2681506@gmail.com"
] |
2681506@gmail.com
|
dd42b52d712e69767f647a33a975f897d68b913f
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/OssDirectoryDetail.py
|
7b7aed746981c86b4885e7159246c6f7d6a7017c
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OssDirectoryDetail(object):
def __init__(self):
self._acl = None
self._file_id = None
self._file_name = None
self._last_modified = None
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, value):
self._acl = value
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self, value):
self._file_name = value
@property
def last_modified(self):
return self._last_modified
@last_modified.setter
def last_modified(self, value):
self._last_modified = value
def to_alipay_dict(self):
params = dict()
if self.acl:
if hasattr(self.acl, 'to_alipay_dict'):
params['acl'] = self.acl.to_alipay_dict()
else:
params['acl'] = self.acl
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_name:
if hasattr(self.file_name, 'to_alipay_dict'):
params['file_name'] = self.file_name.to_alipay_dict()
else:
params['file_name'] = self.file_name
if self.last_modified:
if hasattr(self.last_modified, 'to_alipay_dict'):
params['last_modified'] = self.last_modified.to_alipay_dict()
else:
params['last_modified'] = self.last_modified
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OssDirectoryDetail()
if 'acl' in d:
o.acl = d['acl']
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_name' in d:
o.file_name = d['file_name']
if 'last_modified' in d:
o.last_modified = d['last_modified']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
13a0070714639e0e12858309ad4019cba7a00079
|
b0d91025a0c188b8ecc4c328a38e3d0c158309a3
|
/mysite/settings.py
|
96b107a7f1405ad0f5f26aafebb1e9461262af79
|
[] |
no_license
|
sdumi/dj_tut
|
26e9c742e2d72f5ccb0ae08e4a65bbacabf25192
|
fb7c0c979372da9de0ed30068fc12fe891ab6d12
|
refs/heads/master
| 2020-12-24T16:06:15.022099
| 2010-11-23T12:04:14
| 2010-11-23T12:04:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,241
|
py
|
# Django settings for mysite project.
import os.path
DEBUG = True
#DEBUG = False
TEMPLATE_DEBUG = DEBUG
PROJECT_DIR = os.path.dirname(__file__)
ADMINS = (
('Dumi', 'dumitru.sipos@gmail.com'),
('Dumitru Sipos', 'dumitru.sipos@alcatel-lucent.com')
)
EMAIL_HOST='smtp.tm.alcatel.ro'
EMAIL_PORT=25
MANAGERS = ADMINS
# shortcut for os.path.join
# called: pj("templates") ==> os.path.join(PROJECT_DIR, "templates")
pj = lambda filename: os.path.join(PROJECT_DIR, filename)
# just an alias for os.path.joi
j = os.path.join
# cannot use pj here: it takes only the new path to be added to PROJECT_DIR
# and I do not want to pass "../databases"... not sure how well that works on Windows...
dbname = j(j(j(PROJECT_DIR, ".."), "databases"), "mysite.db")
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
# 'NAME': '/home/dsipos/prg/web/dj_tut/databases/mysite.db', # Or path to database file if using sqlite3.
'NAME': dbname,
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Bucharest'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = j(j(PROJECT_DIR, ".."), "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://127.0.0.1:8080/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '%3g5=u^-vb-+xb&k$)zm(6z&n^^79k8m8f&5489z37hm9x3#w%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
#TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# "/home/dsipos/prg/web/dj_tut/templates",
#
TEMPLATE_DIRS = (
j(j(PROJECT_DIR, ".."), "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'mysite.polls',
'mysite.books',
'mysite.aprozar',
'mysite.notes',
'mysite.xo'
)
|
[
"dumitru.sipos@gmail.com"
] |
dumitru.sipos@gmail.com
|
3e993a5460ef71cfd268ce842d87012816743b71
|
f2656962b7bb2b0bb120718feb4b9f124b7ecc6c
|
/AssociationRuleMining/utils.py
|
27d5abd683c4cf625a574825a63f41d1380be05a
|
[] |
no_license
|
Fasgort/AIA-Recomendacion
|
24c42634953efaef2b650320f83a7a17c8a5bc06
|
2340b6196552083c8b9ef75c2be245881e1bd9d1
|
refs/heads/master
| 2021-01-17T23:21:09.207808
| 2017-03-15T11:27:51
| 2017-03-15T11:27:51
| 84,218,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python3
import logging
from tabulate import tabulate
from association_rule_tools import get_conviction, get_lift, get_confidence, get_support
def load_transaction_db(path, limit=0):
logging.debug("Start loading transactions database")
transaction_db = list()
loaded = 0
with open(path) as fd:
for line in fd:
transaction = frozenset([int(i_char) for i_char in line.split()])
transaction_db.append(transaction)
loaded += 1
if 0 < limit <= loaded:
break
return transaction_db
def association_rules_report(rules, transaction_db):
res = list()
for r in rules:
s = get_support((r[0]+r[1]), transaction_db)
c = get_confidence(r, transaction_db)
l = get_lift(r, transaction_db)
conv = get_conviction(r, transaction_db)
res.append(((sorted(r[0]),r[1]), s, c, l, conv))
res = sorted(res, key=lambda x: (len(x[0][0]) * 100000000) + (x[0][0][0]), reverse=False)
return tabulate(res, headers=['Rule X=>Y', 'Support', 'Confidence', 'Lift', 'Conviction'])
|
[
"valentin.sallop@gmail.com"
] |
valentin.sallop@gmail.com
|
1466dcdda5a60e77319ce48fd9043140a50877ca
|
1c6d58e5b2bbce4a457350302fa9845f43f076a2
|
/Python Programming/EXC 030 - 7-0-0.py
|
28a0e6b50ec8b2ba0c20dba9c68e67ff8b2174f4
|
[] |
no_license
|
Daviswww/Toys
|
fa8b481bf5106a0f984c6bfd5260f3ec55ccee1d
|
680c260ebb8d385a3dbcdd985a447fd5d2b74f3b
|
refs/heads/master
| 2022-07-21T03:35:41.590248
| 2020-01-11T11:04:02
| 2020-01-11T11:04:02
| 144,127,014
| 0
| 0
| null | 2022-06-22T00:05:19
| 2018-08-09T08:57:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,693
|
py
|
A = [89, 56, 92 ,79, 51]
B = [70, 86, 77, 83, '缺考']
C = [0, 0, 0, 0, 0]
D = [0, 0, 0, 0, 0]
print('期中考缺考: ')
for i in range(5):
if(A[i] =='缺考'):
print(i,'號缺考')
print('\n期末考缺考: ')
for i in range(5):
if(B[i] =='缺考'):
print(i,'號缺考')
print('\n平均分數: ')
for i in range(5):
if(A[i] == '缺考' or B[i] == '缺考'):
if(A[i] == '缺考'):
C[i] = B[i] / 2
elif(B[i] == '缺考'):
C[i] = A[i] / 2
else:
C[i] = 0
print('第',i,'號\t期中考:', A[i], '\t期末考:',B[i], '\t平均:', C[i])
else:
C[i] = (A[i] + B[i]) / 2
if(C[i] > 100):
C[i] = 100
print('第',i,'號\t期中考:', A[i], '\t期末考:',B[i], '\t平均:', C[i])
print(C.index(max(C[0:5])),'號學期成績最高分',max(C[0:5]))
print(C.index(min(C[0:5])),'號學期成績最低分',min(C[0:5]))
print('\n條分後平均分數: ')
for i in range(5):
if(A[i] == '缺考' or B[i] == '缺考'):
if(A[i] == '缺考'):
D[i] = ((B[i]*1.5)) / (1 + 1.5)
elif(B[i] == '缺考'):
D[i] = ((A[i])) / (1 + 1.5)
else:
D[i] = 0
print('第',i,'號\t期中考:', A[i], '\t期末考:',B[i], '\t平均:', D[i])
else:
D[i] = (A[i] + (B[i]*1.5)) / (1 + 1.5)
if(D[i] > 100):
D[i] = 100
print('第',i,'號\t期中考:', A[i], '\t期末考:',B[i], '\t平均:', D[i])
print(D.index(max(D[0:5])),'號學期成績最高分',max(D[0:5]))
print(D.index(min(D[0:5])),'號學期成績最低分',min(D[0:5]))
|
[
"noreply@github.com"
] |
noreply@github.com
|
01e5eebcd277d1c73463b5afec695c16d394fa57
|
b5aea34ce585e4462775a838c27135f92c5a852d
|
/portfolio/migrations/0001_initial.py
|
152480a5681f5374338900b09a932854832e0688
|
[] |
no_license
|
theethaj/portfolio-project
|
80851eaf4ed61eec8861a98398d0bbcad5223eb7
|
a954d3b1d7f1ec17eefb8c8e67e7ce801dc5f201
|
refs/heads/main
| 2023-02-25T12:16:45.619964
| 2021-01-31T10:59:04
| 2021-01-31T10:59:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
# Generated by Django 3.1.5 on 2021-01-30 13:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=200)),
('subject', models.CharField(max_length=200)),
('message', models.TextField()),
],
),
]
|
[
"ding533@hotmail.com"
] |
ding533@hotmail.com
|
e173a1acb6f004419a36a21a69349963e12720f5
|
1f1de940fd030db12ece5a4037fc1b9291f884cf
|
/src/main/python/config.py
|
d14218a38f34ce6ddfe81273e18ef2c5b271faf0
|
[] |
no_license
|
psy2013GitHub/sklearn-utils
|
1ea747827f7a36332a049388b4abc03c43501d94
|
acea4bf3423883dd8e6782741234c6493648c820
|
refs/heads/master
| 2020-06-12T10:52:47.160022
| 2016-12-05T06:42:12
| 2016-12-05T06:42:12
| 75,586,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24
|
py
|
__author__ = 'flappy'
|
[
"deng.zhou@immomo.com"
] |
deng.zhou@immomo.com
|
8d70adcda10da24ef6cac3a8733ad6a6cd20df78
|
d53e9e90cb085046a3419be05966d3f0eef5b8e9
|
/v2/ovn_ovs.py
|
1e5a00c5a57e5b064d6629554e7a3c337619e2ff
|
[] |
no_license
|
e2e-win/k8s-ovn-ovs
|
99ba544485e726919b2aa1cfe83865c5152cd0a8
|
ed21beeb2ddf30fc94555ba99745a2f8c9963de2
|
refs/heads/master
| 2020-03-28T23:56:20.237212
| 2020-01-03T10:19:59
| 2020-01-03T10:19:59
| 149,316,665
| 0
| 10
| null | 2020-01-03T10:20:01
| 2018-09-18T16:08:41
|
Python
|
UTF-8
|
Python
| false
| false
| 16,023
|
py
|
import ci
import configargparse
import openstack_wrap as openstack
import log
import utils
import os
import time
import shutil
import constants
import yaml
p = configargparse.get_argument_parser()
p.add("--linuxVMs", action="append", help="Name for linux VMS. List.")
p.add("--linuxUserData", help="Linux VMS user-data.")
p.add("--linuxFlavor", help="Linux VM flavor.")
p.add("--linuxImageID", help="ImageID for linux VMs.")
p.add("--windowsVMs", action="append", help="Name for Windows VMs. List.")
p.add("--windowsUserData", help="Windows VMS user-data.")
p.add("--windowsFlavor", help="Windows VM flavor.")
p.add("--windowsImageID", help="ImageID for windows VMs.")
p.add("--keyName", help="Openstack SSH key name")
p.add("--keyFile", help="Openstack SSH private key")
p.add("--internalNet", help="Internal Network for VMs")
p.add("--externalNet", help="External Network for floating ips")
p.add("--ansibleRepo", default="http://github.com/openvswitch/ovn-kubernetes", help="Ansible Repository for ovn-ovs playbooks.")
p.add("--ansibleBranch", default="master", help="Ansible Repository branch for ovn-ovs playbooks.")
class OVN_OVS_CI(ci.CI):
DEFAULT_ANSIBLE_PATH="/tmp/ovn-kubernetes"
ANSIBLE_PLAYBOOK="ovn-kubernetes-cluster.yml"
ANSIBLE_PLAYBOOK_ROOT="%s/contrib" % DEFAULT_ANSIBLE_PATH
ANSIBLE_HOSTS_TEMPLATE=("[kube-master]\nKUBE_MASTER_PLACEHOLDER\n\n[kube-minions-linux]\nKUBE_MINIONS_LINUX_PLACEHOLDER\n\n"
"[kube-minions-windows]\nKUBE_MINIONS_WINDOWS_PLACEHOLDER\n")
ANSIBLE_HOSTS_PATH="%s/contrib/inventory/hosts" % DEFAULT_ANSIBLE_PATH
DEFAULT_ANSIBLE_WINDOWS_ADMIN="Admin"
DEFAULT_ANSIBLE_HOST_VAR_WINDOWS_TEMPLATE="ansible_user: USERNAME_PLACEHOLDER\nansible_password: PASS_PLACEHOLDER\n"
DEFAULT_ANSIBLE_HOST_VAR_DIR="%s/contrib/inventory/host_vars" % DEFAULT_ANSIBLE_PATH
HOSTS_FILE="/etc/hosts"
ANSIBLE_CONFIG_FILE="%s/contrib/ansible.cfg" % DEFAULT_ANSIBLE_PATH
KUBE_CONFIG_PATH="/root/.kube/config"
KUBE_TLS_SRC_PATH="/etc/kubernetes/tls/"
def __init__(self):
self.opts = p.parse_known_args()[0]
self.cluster = {}
self.default_ansible_path = OVN_OVS_CI.DEFAULT_ANSIBLE_PATH
self.ansible_playbook = OVN_OVS_CI.ANSIBLE_PLAYBOOK
self.ansible_playbook_root = OVN_OVS_CI.ANSIBLE_PLAYBOOK_ROOT
self.ansible_hosts_template = OVN_OVS_CI.ANSIBLE_HOSTS_TEMPLATE
self.ansible_hosts_path = OVN_OVS_CI.ANSIBLE_HOSTS_PATH
self.ansible_windows_admin = OVN_OVS_CI.DEFAULT_ANSIBLE_WINDOWS_ADMIN
self.ansible_host_var_windows_template = OVN_OVS_CI.DEFAULT_ANSIBLE_HOST_VAR_WINDOWS_TEMPLATE
self.ansible_host_var_dir = OVN_OVS_CI.DEFAULT_ANSIBLE_HOST_VAR_DIR
self.ansible_config_file = OVN_OVS_CI.ANSIBLE_CONFIG_FILE
self.logging = log.getLogger(__name__)
self.post_deploy_reboot_required = True
def _add_linux_vm(self, vm_obj):
if self.cluster.get("linuxVMs") == None:
self.cluster["linuxVMs"] = []
self.cluster["linuxVMs"].append(vm_obj)
def _add_windows_vm(self, vm_obj):
if self.cluster.get("windowsVMs") == None:
self.cluster["windowsVMs"] = []
self.cluster["windowsVMs"].append(vm_obj)
def _get_windows_vms(self):
return self.cluster.get("windowsVMs")
def _get_linux_vms(self):
return self.cluster.get("linuxVMs")
def _get_all_vms(self):
return self._get_linux_vms() + self._get_windows_vms()
def _get_vm_fip(self, vm_obj):
return vm_obj.get("FloatingIP")
def _set_vm_fip(self, vm_obj, ip):
vm_obj["FloatingIP"] = ip
def _create_vms(self):
self.logging.info("Creating Openstack VMs")
vmPrefix = self.opts.cluster_name
for vm in self.opts.linuxVMs:
openstack_vm = openstack.server_create("%s-%s" % (vmPrefix, vm), self.opts.linuxFlavor, self.opts.linuxImageID,
self.opts.internalNet, self.opts.keyName, self.opts.linuxUserData)
fip = openstack.get_floating_ip(openstack.floating_ip_list()[0])
openstack.server_add_floating_ip(openstack_vm['name'], fip)
self._set_vm_fip(openstack_vm, fip)
self._add_linux_vm(openstack_vm)
for vm in self.opts.windowsVMs:
openstack_vm = openstack.server_create("%s-%s" % (vmPrefix, vm), self.opts.windowsFlavor, self.opts.windowsImageID,
self.opts.internalNet, self.opts.keyName, self.opts.windowsUserData)
fip = openstack.get_floating_ip(openstack.floating_ip_list()[0])
openstack.server_add_floating_ip(openstack_vm['name'], fip)
self._set_vm_fip(openstack_vm, fip)
self._add_windows_vm(openstack_vm)
self.logging.info("Succesfuly created VMs %s" % [ vm.get("name") for vm in self._get_all_vms()])
def _wait_for_windows_machines(self):
self.logging.info("Waiting for Windows VMs to obtain Admin password.")
for vm in self._get_windows_vms():
openstack.server_get_password(vm['name'], self.opts.keyFile)
self.logging.info("Windows VM: %s succesfully obtained password." % vm.get("name"))
def _prepare_env(self):
self._create_vms()
self._wait_for_windows_machines()
def _destroy_cluster(self):
vmPrefix = self.opts.cluster_name
for vm in self.opts.linuxVMs:
openstack.server_delete("%s-%s" % (vmPrefix, vm))
for vm in self.opts.windowsVMs:
openstack.server_delete("%s-%s" % (vmPrefix, vm))
def _prepare_ansible(self):
utils.clone_repo(self.opts.ansibleRepo, self.opts.ansibleBranch, self.default_ansible_path)
# Creating ansible hosts file
linux_master = self._get_linux_vms()[0].get("name")
linux_minions = [vm.get("name") for vm in self._get_linux_vms()[1:]]
windows_minions = [vm.get("name") for vm in self._get_windows_vms()]
hosts_file_content = self.ansible_hosts_template.replace("KUBE_MASTER_PLACEHOLDER", linux_master)
hosts_file_content = hosts_file_content.replace("KUBE_MINIONS_LINUX_PLACEHOLDER", "\n".join(linux_minions))
hosts_file_content = hosts_file_content.replace("KUBE_MINIONS_WINDOWS_PLACEHOLDER","\n".join(windows_minions))
self.logging.info("Writing hosts file for ansible inventory.")
with open(self.ansible_hosts_path, "w") as f:
f.write(hosts_file_content)
# Creating hosts_vars for hosts
for vm in self._get_windows_vms():
vm_name = vm.get("name")
vm_username = self.ansible_windows_admin # TO DO: Have this configurable trough opts
vm_pass = openstack.server_get_password(vm_name, self.opts.keyFile)
hosts_var_content = self.ansible_host_var_windows_template.replace("USERNAME_PLACEHOLDER", vm_username).replace("PASS_PLACEHOLDER", vm_pass)
filepath = os.path.join(self.ansible_host_var_dir, vm_name)
with open(filepath, "w") as f:
f.write(hosts_var_content)
# Populate hosts file
with open(OVN_OVS_CI.HOSTS_FILE,"a") as f:
for vm in self._get_all_vms():
vm_name = vm.get("name")
if vm_name.find("master") > 0:
vm_name = vm_name + " kubernetes"
hosts_entry=("%s %s\n" % (self._get_vm_fip(vm), vm_name))
self.logging.info("Adding entry %s to hosts file." % hosts_entry)
f.write(hosts_entry)
# Enable ansible log and set ssh options
with open(self.ansible_config_file, "a") as f:
log_file = os.path.join(self.opts.log_path, "ansible-deploy.log")
log_config = "log_path=%s\n" % log_file
# This probably goes better in /etc/ansible.cfg (set in dockerfile )
ansible_config="\n\n[ssh_connection]\nssh_args=-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\n"
f.write(log_config)
f.write(ansible_config)
full_ansible_tmp_path = os.path.join(self.ansible_playbook_root, "tmp")
utils.mkdir_p(full_ansible_tmp_path)
# Copy kubernetes prebuilt binaries
for file in ["kubelet","kubectl","kube-apiserver","kube-controller-manager","kube-scheduler","kube-proxy"]:
full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_LINUX_BINS_LOCATION, file)
self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path))
shutil.copy(full_file_path, full_ansible_tmp_path)
for file in ["kubelet.exe", "kubectl.exe", "kube-proxy.exe"]:
full_file_path = os.path.join(utils.get_k8s_folder(), constants.KUBERNETES_WINDOWS_BINS_LOCATION, file)
self.logging.info("Copying %s to %s." % (full_file_path, full_ansible_tmp_path))
shutil.copy(full_file_path, full_ansible_tmp_path)
def _deploy_ansible(self):
self.logging.info("Starting Ansible deployment.")
cmd = "ansible-playbook %s -v" % self.ansible_playbook
cmd = cmd.split()
cmd.append("--key-file=%s" % self.opts.keyFile)
out, _ ,ret = utils.run_cmd(cmd, stdout=True, cwd=self.ansible_playbook_root)
if ret != 0:
self.logging.error("Failed to deploy ansible-playbook with error: %s" % out)
raise Exception("Failed to deploy ansible-playbook with error: %s" % out)
self.logging.info("Succesfully deployed ansible-playbook.")
def _waitForConnection(self, machine, windows):
self.logging.info("Waiting for connection to machine %s." % machine)
cmd = ["ansible"]
cmd.append(machine)
if not windows:
cmd.append("--key-file=%s" % self.opts.keyFile)
cmd.append("-m")
cmd.append("wait_for_connection")
cmd.append("-a")
cmd.append("'connect_timeout=5 sleep=5 timeout=600'")
out, _, ret = utils.run_cmd(cmd, stdout=True, cwd=self.ansible_playbook_root, shell=True)
return ret, out
def _copyTo(self, src, dest, machine, windows=False, root=False):
self.logging.info("Copying file %s to %s:%s." % (src, machine, dest))
cmd = ["ansible"]
if root:
cmd.append("--become")
if not windows:
cmd.append("--key-file=%s" % self.opts.keyFile)
cmd.append(machine)
cmd.append("-m")
module = "win_copy" if windows else "copy"
cmd.append(module)
cmd.append("-a")
cmd.append("'src=%(src)s dest=%(dest)s flat=yes'" % {"src": src, "dest": dest})
ret, _ = self._waitForConnection(machine, windows=windows)
if ret != 0:
self.logging.error("No connection to machine: %s", machine)
raise Exception("No connection to machine: %s", machine)
# Ansible logs everything to stdout
out, _, ret = utils.run_cmd(cmd, stdout=True, cwd=self.ansible_playbook_root, shell=True)
if ret != 0:
self.logging.error("Ansible failed to copy file to %s with error: %s" % (machine, out))
raise Exception("Ansible failed to copy file to %s with error: %s" % (machine, out))
def _copyFrom(self, src, dest, machine, windows=False, root=False):
self.logging.info("Copying file %s:%s to %s." % (machine, src, dest))
cmd = ["ansible"]
if root:
cmd.append("--become")
if not windows:
cmd.append("--key-file=%s" % self.opts.keyFile)
cmd.append(machine)
cmd.append("-m")
cmd.append("fetch")
cmd.append("-a")
cmd.append("'src=%(src)s dest=%(dest)s flat=yes'" % {"src": src, "dest": dest})
# TO DO: (atuvenie) This could really be a decorator
ret, _ = self._waitForConnection(machine, windows=windows)
if ret != 0:
self.logging.error("No connection to machine: %s", machine)
raise Exception("No connection to machine: %s", machine)
out, _, ret = utils.run_cmd(cmd, stdout=True, cwd=self.ansible_playbook_root, shell=True)
if ret != 0:
self.logging.error("Ansible failed to fetch file from %s with error: %s" % (machine, out))
raise Exception("Ansible failed to fetch file from %s with error: %s" % (machine, out))
def _runRemoteCmd(self, command, machine, windows=False, root=False):
self.logging.info("Running cmd on remote machine %s." % (machine))
cmd=["ansible"]
if root:
cmd.append("--become")
if windows:
task = "win_shell"
else:
task = "shell"
cmd.append("--key-file=%s" % self.opts.keyFile)
cmd.append(machine)
cmd.append("-m")
cmd.append(task)
cmd.append("-a")
cmd.append("'%s'" % command)
ret, _ = self._waitForConnection(machine, windows=windows)
if ret != 0:
self.logging.error("No connection to machine: %s", machine)
raise Exception("No connection to machine: %s", machine)
out, _, ret = utils.run_cmd(cmd, stdout=True, cwd=self.ansible_playbook_root, shell=True)
if ret != 0:
self.logging.error("Ansible failed to run command %s on machine %s with error: %s" % (cmd, machine, out))
raise Exception("Ansible failed to run command %s on machine %s with error: %s" % (cmd, machine, out))
def _prepullImages(self):
# TO DO: This path should be passed as param
prepull_script="/tmp/k8s-ovn-ovs/v2/prepull.ps1"
for vm in self._get_windows_vms():
self.logging.info("Copying prepull script to node %s" % vm["name"])
self._copyTo(prepull_script, "c:\\", vm["name"], windows=True)
self._runRemoteCmd("c:\\prepull.ps1", vm["name"], windows=True)
def _prepareTestEnv(self):
# For OVN-OVS CI: copy config file from .kube folder of the master node
# Replace Server in config with dns-name for the machine
# Export appropriate env vars
linux_master = self._get_linux_vms()[0].get("name")
self.logging.info("Copying kubeconfig from master")
self._copyFrom("/root/.kube/config","/tmp/kubeconfig", linux_master, root=True)
self._copyFrom("/etc/kubernetes/tls/ca.pem","/etc/kubernetes/tls/ca.pem", linux_master, root=True)
self._copyFrom("/etc/kubernetes/tls/admin.pem","/etc/kubernetes/tls/admin.pem", linux_master, root=True)
self._copyFrom("/etc/kubernetes/tls/admin-key.pem","/etc/kubernetes/tls/admin-key.pem", linux_master, root=True)
with open("/tmp/kubeconfig") as f:
content = yaml.load(f)
for cluster in content["clusters"]:
cluster["cluster"]["server"] = "https://kubernetes"
with open("/tmp/kubeconfig", "w") as f:
yaml.dump(content, f)
os.environ["KUBE_MASTER"] = "local"
os.environ["KUBE_MASTER_IP"] = "kubernetes"
os.environ["KUBE_MASTER_URL"] = "https://kubernetes"
os.environ["KUBECONFIG"] = "/tmp/kubeconfig"
try:
if self.post_deploy_reboot_required:
for vm in self._get_windows_vms():
openstack.reboot_server(vm["name"])
self._prepullImages()
except:
pass
def up(self):
self.logging.info("Bringing cluster up.")
try:
self._prepare_env()
self._prepare_ansible()
self._deploy_ansible()
except Exception as e:
raise e
def build(self):
self.logging.info("Building k8s binaries.")
utils.get_k8s(repo=self.opts.k8s_repo, branch=self.opts.k8s_branch)
utils.build_k8s_binaries()
def down(self):
self.logging.info("Destroying cluster.")
try:
self._destroy_cluster()
except Exception as e:
raise e
|
[
"atuvenie@cloudbasesolutions.com"
] |
atuvenie@cloudbasesolutions.com
|
f281fed287dbd357fea0ab3bb3bd35efc0794cf4
|
51d65cbed3df1e9e3a0d51f79590ee12f88291d1
|
/object_detection/inference_over_image.py
|
0bbbdb9954ca69ffd0cf92de7a7cbb7577cf8043
|
[
"MIT"
] |
permissive
|
apacha/Mensural-Detector
|
f9332c23854263c6a3f89e8b92f3f666f8377ed8
|
05c91204cf268feaae84cd079dbe7a1852fba216
|
refs/heads/master
| 2022-09-23T21:20:53.376367
| 2022-08-31T08:36:35
| 2022-08-31T08:36:35
| 137,372,669
| 12
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,444
|
py
|
import numpy as np
import tensorflow as tf
import argparse
from PIL import Image
from object_detection.utils import ops as utils_ops, label_map_util, visualization_utils as vis_util
if tf.__version__ < '1.4.0':
raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def load_detection_graph(path_to_checkpoint):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_checkpoint, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def load_category_index(path_to_labels, number_of_classes):
# Load label map
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=number_of_classes,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Performs detection over input image given a trained detector.')
parser.add_argument('--inference_graph', dest='inference_graph', type=str, required=True,
help='Path to the frozen inference graph.')
parser.add_argument('--label_map', dest='label_map', type=str, required=True,
help='Path to the label map, which is json-file that maps each category name to a unique number.',
default="mapping.txt")
parser.add_argument('--number_of_classes', dest='number_of_classes', type=int, default=32,
help='Number of classes.')
parser.add_argument('--input_image', dest='input_image', type=str, required=True, help='Path to the input image.')
parser.add_argument('--output_image', dest='output_image', type=str, default='detection.jpg',
help='Path to the output image.')
args = parser.parse_args()
# Path to frozen detection graph. This is the actual model that is used for the object detection.
# PATH_TO_CKPT = '/home/jcalvo/Escritorio/Current/Mensural Detector/mensural-detector/output_inference_graph.pb/frozen_inference_graph.pb'
path_to_frozen_inference_graph = args.inference_graph
path_to_labels = args.label_map
number_of_classes = args.number_of_classes
input_image = args.input_image
output_image = args.output_image
# Read frozen graph
detection_graph = load_detection_graph(path_to_frozen_inference_graph)
category_index = load_category_index(path_to_labels, number_of_classes)
image = Image.open(input_image)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Actual detection.
output_dict = run_inference_for_single_image(image_np, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=2)
Image.fromarray(image_np).save(output_image)
|
[
"alexander.pacha@gmail.com"
] |
alexander.pacha@gmail.com
|
480449f9654885b91bebc78cabcb012d20f26abb
|
6d0d27377707b4ac36d5d7858a590869e4eeff7c
|
/src/backend/utils/upload.py
|
ba670386065fdb7720aee57d3337ffee6efe620b
|
[] |
no_license
|
Hiraishi-Ryota/mediado-hack-2021-a
|
6ce03f63a3b254c80e615248e85834df2af8718a
|
1c570a2d807c4531949639846220cbde15beada9
|
refs/heads/main
| 2023-08-06T21:10:57.354095
| 2021-09-15T05:50:54
| 2021-09-15T05:50:54
| 404,998,912
| 0
| 0
| null | 2021-09-15T05:50:55
| 2021-09-10T07:45:52
|
Python
|
UTF-8
|
Python
| false
| false
| 559
|
py
|
import os
from pathlib import Path
import shutil
import sys
from tempfile import NamedTemporaryFile, SpooledTemporaryFile
BASE_DIR = os.getcwd()
def upload(filename: str, file: SpooledTemporaryFile, dir: str):
"""e_pub_fileをstaticに保存し、そのBASEDIR下のパスを返す"""
try:
with NamedTemporaryFile(delete=False, suffix=Path(filename).suffix, dir=dir) as tmp:
shutil.copyfileobj(file, tmp)
e_pub_path = Path(tmp.name)
finally:
file.close()
return str(e_pub_path.relative_to(BASE_DIR))
|
[
"leonard.t1028@gmail.com"
] |
leonard.t1028@gmail.com
|
5f064814d535825806233dee2469180a93dec6bb
|
7df7eb32424b40fa98378af298716759ae39d198
|
/Laboratorios/Lab8/Lab8Ejercicio1.py
|
3a0679cfc04af8dd79cbb5820216ec63516cbcbc
|
[] |
no_license
|
tratohecho3/Algoritmos-1
|
289b2ee990da943d7a1622af04d815839882937a
|
b009a780de7e56dccf4d7832010b06d0254a35c1
|
refs/heads/master
| 2021-01-12T00:48:36.848983
| 2017-01-07T20:19:24
| 2017-01-07T20:19:24
| 78,298,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,170
|
py
|
"""Autores: Cesar Colina 13-10299
Francisco Marquez 12-11163
Lab8Ejercicio.py
Descripcion: Programa que lee una matriz en un archivo .txt y devuelve en otro
archivo .txt una serie de datos de interes.
"""
#CALCULOS
import pygame,sys, os.path
from pygame.locals import *
pygame.init()
while True:
tiempo = pygame.time.get_ticks() / 1000
if tiempo > 360:
sys.exit()
pygame.quit()
x = int(input("Introduzca el valor de la fila donde se encuentra su numero: "))
y = int(input("introduzca el valor de la columna donde se encuentra su numero: "))
with open("matrix-entry.txt") as f:
lineas = f.readlines()
f.closed
matriz = []
for i in range(len(lineas)):
matriz.append(lineas[i].split())
for i in range(len(matriz)):
for j in range(len(matriz[i])):
matriz[i][j] = int(matriz[i][j])
Diag_ppal = []
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if i == j:
if matriz[i][j] > 0 and matriz[i][j]% 2 == 0:
Diag_ppal.append(matriz[i][j])
Diag_sec = []
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if i + j == 5:
if matriz[i][j] < 0 and matriz[i][j]% 2 != 0:
Diag_sec.append(matriz[i][j])
minimos = []
for i in range(len(lineas)):
for j in matriz[i]:
if j % 2 != 0:
minimos.append(j)
valor = 1000
for i in minimos:
if i % 2 != 0:
if i < valor:
valor = i
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if matriz[i][j] == valor:
#RESULTADO3
posicion_impar_menor = [i, j]
maximos = []
for i in range(len(lineas)):
for j in matriz[i]:
if j % 2 == 0:
maximos.append(j)
valor2 = max(maximos)
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if matriz[i][j] == valor2:
#RESULTADO3
posicion_par_mayor = [i, j]
columna0 = []
columna1 = []
columna2 = []
columna3 = []
columna4 = []
columna5 = []
for i in range(len(matriz)):
columna0.append(matriz[i][0])
for i in range(len(matriz)):
columna1.append(matriz[i][1])
for i in range(len(matriz)):
columna2.append(matriz[i][2])
for i in range(len(matriz)):
columna3.append(matriz[i][3])
for i in range(len(matriz)):
columna4.append(matriz[i][4])
for i in range(len(matriz)):
columna5.append(matriz[i][5])
area1 = []
for i in range(3):
for j in range(3):
area1.append(matriz[i][j])
area2 = []
for i in range(3):
for j in range(3, 6):
area2.append(matriz[i][j])
area3 = []
for i in range(3, 6):
for j in range(3):
area3.append(matriz[i][j])
area4 = []
for i in range(3, 6):
for j in range(3, 6):
area4.append(matriz[i][j])
total = (sum(area1) + sum(area2) + sum(area3) + sum(area4))/ 4
promedio = sum(Diag_ppal) / len(Diag_ppal)
sumatoria = sum(Diag_sec)
with open("report-output.txt", "w") as f:
f.write("El promedio de los valores pares positivos de la diagonal principal es\n")
f.write(str(promedio) + "\n")
f.write("La suma de los valores impares negativos de la diagonal secundaria es \n")
f.write(str(sumatoria) + "\n")
f.write("El valor del impar menor de la matriz es" + "\n")
f.write(str(valor) + "\n")
f.write("La posicion, o posiciones, del impar menor es, o son" + "\n")
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if matriz[i][j] == valor:
#RESULTADO3
f.write("La fila" + "\n")
f.write(str(i) + "\n")
f.write("La columna" + "\n")
f.write(str(j) + "\n")
f.write("El valor del par mayor de la matriz es" + "\n")
f.write(str(valor2) + "\n")
f.write("La posicion, o posiciones, del par mayor es, o son" + "\n")
for i in range(len(lineas)):
for j in range(len(matriz[i])):
if matriz[i][j] == valor2:
#RESULTADO3
f.write("La fila" + "\n")
f.write(str(i) + "\n")
f.write("La columna" + "\n")
f.write(str(j) + "\n")
f.write("El promedio de los valores de cada fila es" + "\n")
for i in range(len(matriz)):
#RESULTADO4
f.write("El promedio de la fila " + str(i) + " es" + "\n")
f.write(str(sum(matriz[i]) / len(matriz[i])) + "\n")
f.write("El promedio de los valores de cada columna es" + "\n")
f.write("Para la columna 0" + "\n")
f.write(str(sum(columna0) / len(columna0)) + "\n")
f.write("Para la columna 1" + "\n")
f.write(str(sum(columna1) / len(columna1)) + "\n")
f.write("Para la columna 2" + "\n")
f.write(str(sum(columna2) / len(columna2)) + "\n")
f.write("Para la columna 3" + "\n")
f.write(str(sum(columna3) / len(columna3)) + "\n")
f.write("Para la columna 4" + "\n")
f.write(str(sum(columna4) / len(columna4)) + "\n")
f.write("Para la columna 5" + "\n")
f.write(str(sum(columna5) / len(columna5)) + "\n")
f.write("La suma de los valores de cada una de las subregiones de la matriz es" + "\n")
f.write("Para la subregion A" + "\n")
f.write(str(sum(area1)) + "\n")
f.write("Para la subregion B" + "\n")
f.write(str(sum(area2)) + "\n")
f.write("Para la subregion C" + "\n")
f.write(str(sum(area3)) + "\n")
f.write("Para la subregion D" + "\n")
f.write(str(sum(area4)) + "\n")
f.write("El promedio de la suma total de las subregiones es" + "\n")
f.write(str(total))
f.closed
print(matriz[x][y])
|
[
"noreply@github.com"
] |
noreply@github.com
|
57bfefceefd25252047dcd608dff497f0c347b82
|
988dd821269be12c2f56f62b0c35546fd3050537
|
/python/quaternions/rotations.py
|
852c8839c1435519fcbc0675bd055c4d8af732b7
|
[] |
no_license
|
gdiazh/adcs_models
|
fb19f541eeb9b01ae49ec98719c508d084e4fd7a
|
51d0829cc777d2e345e4fabe406ec7f54e661117
|
refs/heads/master
| 2020-03-28T13:04:56.174852
| 2018-09-28T22:08:25
| 2018-09-28T22:08:25
| 148,364,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,050
|
py
|
#!/usr/bin/python
__author__ = 'gdiaz'
import matplotlib as mpl
from plotVectors import PlotVectors
import numpy as np
class Rotation(object):
def __init__(self):
self.vectors = PlotVectors()
self.a = [0, 0, 0]
def rotate_z(self, a, yaw):
Az = np.matrix([[np.cos(yaw), -np.sin(yaw), 0],
[np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
a_ = np.matrix([[a[0]],
[a[1]],
[a[2]]])
u = Az*a_
return [u.item(0), u.item(1), u.item(2)]
def rotate_frame_z(self, I, J, K, yaw):
Az = np.matrix([[np.cos(yaw), np.sin(yaw), 0],
[-np.sin(yaw), np.cos(yaw), 0],
[0, 0, 1]])
I_ = np.matrix([I[0], I[1], I[2]])
J_ = np.matrix([J[0], J[1], J[2]])
K_ = np.matrix([K[0], K[1], K[2]])
i_ = I_*Az
j_ = J_*Az
k_ = K_*Az
i = [i_.item(0), i_.item(1), i_.item(2)]
j = [j_.item(0), j_.item(1), j_.item(2)]
k = [k_.item(0), k_.item(1), k_.item(2)]
return [i, j, k]
def vectorRotationTest(self):
# Calcs
p1 = [2, 0, 0]
yaw = 90*np.pi/180
p1_rot = self.rotate_z(p1, yaw)
print p1_rot
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(p1)
self.vectors.plot(p1_rot)
self.vectors.show()
def frameRotationTest(self):
# Calcs
I = [1, 0, 0]
J = [0, 1, 0]
K = [0, 0, 1]
yaw = 45*np.pi/180
ijk = self.rotate_frame_z(I, J, K, yaw)
print ijk
# Plot
self.vectors.plotAxes()
self.vectors.config()
self.vectors.plot(ijk[0])
self.vectors.plot(ijk[1])
self.vectors.plot(ijk[2])
self.vectors.show()
def get_qT(self, yawT): #Return quaternion target given yaw target
AT = np.matrix([[np.cos(yawT), np.sin(yawT), 0],
[-np.sin(yawT), np.cos(yawT), 0],
[0, 0, 1]])
q4 = 0.5*np.sqrt(1+AT[0,0]+AT[1,1]+AT[2,2])
q1 = 0.25*(AT[1,2]-AT[2,1])/q4
q2 = 0.25*(AT[2,0]-AT[0,2])/q4
q3 = 0.25*(AT[0,1]-AT[1,0])/q4
return [q4, q1, q2, q3]
def get_qE_(self, qT, qS):
qT_ = np.matrix([[qT[0], qT[3], -qT[2], qT[1]],
[-qT[3], qT[0], qT[1], qT[2]],
[qT[2], -qT[1], qT[0], qT[3]],
[-qT[1], -qT[2], -qT[3], qT[0]]])
qS_ = np.matrix([[-qS[1]],
[-qS[2]],
[-qS[3]],
[qS[0]]])
qE = qT_*qS_
return [qE.item(0), qE.item(1), qE.item(2), qE.item(3)]
def get_qE(self, yawT, qS):
qT = self.get_qT(yawT)
qE = self.get_qE_(qT, qS)
return qE
if __name__ == '__main__':
rotation = Rotation()
# Test Example
# rotation.vectorRotationTest()
rotation.frameRotationTest()
|
[
"g.hernan.diaz@gmail.com"
] |
g.hernan.diaz@gmail.com
|
a717e726db9b4dae2b92ba552163a7eb6f742c6f
|
711ebd8c54be73934154d3f3b325bbc2a13a5fde
|
/weather app.py
|
6dc4cd8ffc05b6af1499c0d481df5b6bdd07ec51
|
[] |
no_license
|
Balajiigor/python
|
3b5ed72c57002eec7d2eebc8a1f1f84277eb5da8
|
3e6817b2447c5b0265147870758d00e30dbbc238
|
refs/heads/using-json-in-python
| 2023-07-08T19:08:40.490641
| 2021-08-16T02:32:03
| 2021-08-16T02:32:03
| 382,550,766
| 0
| 0
| null | 2021-08-16T02:34:18
| 2021-07-03T07:11:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,192
|
py
|
import requests
from bs4 import BeautifulSoup
from tkinter import Tk
from tkinter import Label
from PIL import ImageTk, Image
Tamil_nadu = "https://weather.com/en-IN/weather/today/l/4a5f6abb61cf684f3b18578ada1c5647346a0c273b8d5cd86c1eb48842d572e5"
Seattle = "https://weather.com/en-IN/weather/today/l/ced0de18c1d771856e6012f3abf0a952cfe22952e72e516e6e098d54ca737114"
Dubai = "https://weather.com/en-IN/weather/today/l/af60f113ba123ce93774fed531be2e1e51a1666be5d6012f129cfa27bae1ee6c"
Paris = "https://weather.com/en-IN/weather/today/l/501361e097b79e8221d5c0b1447e80a0bf1c48b8fee1e4d98d4dad397ba2f204"
Norway = "https://weather.com/en-IN/weather/today/l/cb003c6f366a3ae14b6a78ac5f2cfd18285fa02d15f892112dd00961afcb043b"
Los_Angeles = "https://weather.com/en-IN/weather/today/l/0f4e045fdd139c3280846cf4eaae5b3f1c6ca58d13169016e6209f7b86872fc1"
Arab = "https://weather.com/en-IN/weather/today/l/9eb72583100b2852c7d0da1a9f6d6d523dc38cfeb848d2ba82517e7f8bb44626"
Moscow = "https://weather.com/en-IN/weather/today/l/34f2aafc84cff75ae0b014754856ea5e7f8ddf618cf9735549dfb5e016c28e10"
master = Tk()
master.title("Weather app")
master.config(background = "black")
img = Image.open("/home/balaji/Pictures/weather.png")
img = img.resize((150, 150))
img = ImageTk.PhotoImage(img)
def getWeather():
page = requests.get(Tamil_nadu)
soup = BeautifulSoup(page.content, "html.parser")
location =soup.find("h1", class_="CurrentConditions--location--2_osB").text
time = soup.find("div",class_="CurrentConditions--timestamp--3_-CV" ).text
temperature = soup.find("span",class_="CurrentConditions--tempValue--1RYJJ" ).text
weatherPrediction = soup.find("div",class_="CurrentConditions--phraseValue--17s79" ).text
alert = soup.find("div", class_="CurrentConditions--precipValue--1RgXi").text
print(location)
print(time)
print(temperature)
print(weatherPrediction)
print(alert)
locationLabel.config(text = location)
timeLabel.config(text = time)
temperaturLabel.config(text = temperature)
weatherPredictionLabel.config(text = weatherPrediction)
alertLabel.config(text = alert)
timeLabel.after(60000, getWeather)
temperaturLabel.after(60000, getWeather)
weatherPredictionLabel.after(60000, getWeather)
alertLabel.after(60000, getWeather)
locationLabel = Label(master, font = ("calibri bold", 30), background = "black", foreground = "white")
locationLabel.grid(row = 0, sticky="W", padx=40)
timeLabel = Label(master, font =("calibri bold", 20), background = "black", foreground = "white")
timeLabel.grid(row = 1, sticky = "W", padx = 40)
temperaturLabel = Label(master, font=("calibri bold", 70), background="black", foreground = "white")
temperaturLabel.grid(row =2, sticky = "W", padx = 40)
Label(master, image = img, background = "black").grid(row = 2, sticky = "E")
weatherPredictionLabel = Label(master, font= ("calibri bold", 40), background = "black", foreground ="white")
weatherPredictionLabel.grid(row = 3, sticky = "W", padx=40)
alertLabel = Label(master, font = ("calibri bold", 15), background = "black", foreground = "white")
alertLabel.grid(row= 4, sticky ="W", padx = 40)
getWeather()
master.mainloop()
|
[
"noreply@github.com"
] |
noreply@github.com
|
de57cedbc86dec255b93ebc77daf153a873f5256
|
1422a57e98aba02321b772d72f8f0ada6d8b8cba
|
/friday/friday-vendor/vendor-scripts/test-resources/scripts/pylib/hue_turn_on_light.py
|
152b15f1a6ee7c7306946bab089ea4f1578d9421
|
[
"MIT"
] |
permissive
|
JonasRSV/Friday
|
e1908a411aa133bc5bd2f383b0a995f7e028092d
|
f959eff95ba7b11525f97099c8f5ea0e325face7
|
refs/heads/main
| 2023-05-15T03:33:21.542621
| 2021-06-12T10:34:50
| 2021-06-12T10:34:50
| 315,309,991
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
import phue
import sys
if __name__ == "__main__":
b = phue.Bridge(config_file_path="credentials.json")
b.set_light(int(sys.argv[1]), parameter={"on": True, "bri": 200}, transitiontime=5)
|
[
"jonas@valfridsson.net"
] |
jonas@valfridsson.net
|
35b57a408d049fe970e3d7bb1fcf28c9e89d7f4c
|
faa3c49ce63590c298ffcd5ecc4c4b1808efb5db
|
/docker-images/docker-madminer-all/code/configurate.py
|
d540ebcfa111bf9b88f45e96d77090405b44974c
|
[
"MIT"
] |
permissive
|
johannbrehmer/workflow-madminer
|
bf4063e3793db2f0340ed4c0fe4e13052d7f6d06
|
bb648503bc5b6df301dea7708cc05fb567a4be57
|
refs/heads/master
| 2020-04-28T12:11:37.970977
| 2020-04-09T19:13:04
| 2020-04-09T19:13:04
| 175,268,087
| 0
| 0
|
MIT
| 2019-03-12T17:52:33
| 2019-03-12T17:52:33
| null |
UTF-8
|
Python
| false
| false
| 2,821
|
py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
#import matplotlib
#from matplotlib import pyplot as plt
#%matplotlib inline
import sys
import yaml
import inspect
from madminer.core import MadMiner
from madminer.plotting import plot_2d_morphing_basis
from madminer.sampling import combine_and_shuffle
from madminer.sampling import SampleAugmenter
from madminer.sampling import benchmark, benchmarks
from madminer.sampling import morphing_point, morphing_points, random_morphing_points
mg_dir = '/home/software/MG5_aMC_v2_6_2'
miner = MadMiner()#(debug=False)
input_file = str(sys.argv[1])
print('inputfile: ',input_file)
########### ADD parameters and benchmarks from input file
with open(input_file) as f:
# use safe_load instead load
dict_all = yaml.safe_load(f)
#get default values of miner.add_parameters()
default_arr = inspect.getargspec(miner.add_parameter)
default = dict(zip(reversed(default_arr.args), reversed(default_arr.defaults)))
#ADD PARAMETERS
for parameter in dict_all['parameters']:
#format range_input to tuple
range_input = parameter['parameter_range']
range_tuple = map(float, range_input.replace('(','').replace(')','').split(','))
miner.add_parameter(
lha_block=parameter['lha_block'], #required
lha_id=parameter['lha_id'], #required
parameter_name=parameter.get('parameter_name', default['parameter_name']), #optional
morphing_max_power=int( parameter.get('morphing_max_power', default['morphing_max_power']) ), #optional
param_card_transform=parameter.get('param_card_transform',default['param_card_transform']), #optional
parameter_range=range_tuple #optional
)
n_parameters = len(dict_all['parameters'])
#ADD BENCHMARKS
for benchmark in dict_all['benchmarks']:
dict_of_parameters_this_benchmark = dict()
for i in range(1, n_parameters+1):
try:
#add to the dictionary: key is parameter name, value is value
dict_of_parameters_this_benchmark[ benchmark['parameter_name_'+str(i)] ] = float(benchmark['value_'+str(i)])
except KeyError as e:
print('Number of benchmark parameters does not match number of global parameters in input file')
raise e
#add
miner.add_benchmark(
dict_of_parameters_this_benchmark,
benchmark['name']
)
###########
#SET morphing
settings = dict_all['set_morphing']
miner.set_morphing(
include_existing_benchmarks=True,
max_overall_power=int(settings['max_overall_power'])
)
#fig = plot_2d_morphing_basis(
# miner.morpher,
# xlabel=r'$c_{W} v^2 / \Lambda^2$',
# ylabel=r'$c_{\tilde{W}} v^2 / \Lambda^2$',
# xrange=(-10.,10.),
# yrange=(-10.,10.)
#)
miner.save('/home/data/madminer_example.h5')
|
[
"iem244@nyu.edu"
] |
iem244@nyu.edu
|
f0f4aaf831a274f5022dbc1d1fa68fe08e28c63e
|
a2fd9491d11d9982d1ce82765b6dbed7653a954d
|
/Adpy/Lesson 1.5/Lesson 1.5.py
|
cefec4862090dd0924ccf7bd7ccd0636ff58ea19
|
[] |
no_license
|
nikolaydmukha/Netology
|
bc3dc541c1de9329671daeacca385de3b55d9c48
|
b3577a4d414e76876412de01c2120e81ba82c697
|
refs/heads/master
| 2022-12-11T04:34:27.270476
| 2019-06-30T09:04:02
| 2019-06-30T09:04:02
| 167,036,184
| 1
| 0
| null | 2022-11-22T03:15:02
| 2019-01-22T17:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
import email
import smtplib
import imaplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Email:
def __init__(self, sender, password, subject, recipients, message, header=None):
self.sender = sender
self.password = password
self.subject = subject
self.recipients = recipients
self.message = message
self.header = header
self.gmail_smtp = "smtp.gmail.com"
self.gmail_imap = "imap.gmail.com"
# send message
def send_message(self):
msg = MIMEMultipart()
msg['From'] = self.sender
msg['To'] = ', '.join(self.recipients)
msg['Subject'] = self.subject
msg.attach(MIMEText(self.message))
ms = smtplib.SMTP(self.gmail_smtp, 587)
# identify ourselves to smtp gmail client
ms.ehlo()
# secure our email with tls encryption
ms.starttls()
# re-identify ourselves as an encrypted connection
ms.ehlo()
ms.login(self.sender, self.password)
ms.sendmail(self.sender, ms, msg.as_string())
ms.quit()
# receive messages
def receive(self):
mail = imaplib.IMAP4_SSL(self.gmail_imap)
mail.login(self.sender, self.password)
mail.list()
mail.select("inbox")
criterion = '(HEADER Subject "%s")' % self.header if self.header else 'ALL'
result, data = mail.uid('search', None, criterion)
assert data[0], 'There are no letters with current header'
latest_email_uid = data[0].split()[-1]
result, data = mail.uid('fetch', latest_email_uid, '(RFC822)')
raw_email = data[0][1]
email_message = email.message_from_string(raw_email)
mail.logout()
if __name__ == '__main__':
send_mail = Email('login@gmail.com', 'qwerty', 'Subject', ['vasya@email.com', 'petya@email.com'], 'Message',
header='Refactoring')
|
[
"45710335+nikolaydmukha@users.noreply.github.com"
] |
45710335+nikolaydmukha@users.noreply.github.com
|
6d346848a2eed9d5be67fdb017a17285227f874a
|
bd5a3b59a5ca9f0c0394c8bf90e818c3967778d9
|
/vre/apps/xauth/urls.py
|
2ba5dfc62bf27aafa163e3cf36365c4b0ea01be0
|
[] |
no_license
|
BlickLabs/vre
|
85f377c04406c163464f7ddade7eafb579f1dfb1
|
6f3644fb9295f6355057cfa64a1156a329b4b4b8
|
refs/heads/develop
| 2020-05-22T04:28:31.913667
| 2018-07-06T21:12:14
| 2018-07-06T21:12:14
| 62,763,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views
urlpatterns = [
url(regex=r'^login/$',
view=views.LoginView.as_view(),
name='login'),
url(regex=r'^logout/$',
view=views.logout_view,
name='logout'),
]
|
[
"mauriciodinki@gmail.com"
] |
mauriciodinki@gmail.com
|
4679f028fe213a090bbd604db9707043887751db
|
8459dc3a3edebdd27b5589e9aa7215a55a42055b
|
/report/Lib/site-packages/wx/lib/mixins/listctrl.py
|
95ac8fbe9a65036e7334cd84fbfa901f88e58e2e
|
[] |
no_license
|
lyj21803/ReportsTest
|
adb5ef9c057d0bd0669ed9807eccc7edb77655d7
|
37df534d61f7d1f781dd91299ffa99fba4ba0e49
|
refs/heads/master
| 2022-12-02T08:59:27.587331
| 2020-07-24T07:29:50
| 2020-07-24T07:29:50
| 278,009,685
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,250
|
py
|
#----------------------------------------------------------------------------
# Name: wx.lib.mixins.listctrl
# Purpose: Helpful mix-in classes for wxListCtrl
#
# Author: Robin Dunn
#
# Created: 15-May-2001
# Copyright: (c) 2001-2020 by Total Control Software
# Licence: wxWindows license
# Tags: phoenix-port, py3-port
#----------------------------------------------------------------------------
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatibility update.
# o ListCtrlSelectionManagerMix untested.
#
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxColumnSorterMixin -> ColumnSorterMixin
# o wxListCtrlAutoWidthMixin -> ListCtrlAutoWidthMixin
# ...
# 13/10/2004 - Pim Van Heuven (pim@think-wize.com)
# o wxTextEditMixin: Support Horizontal scrolling when TAB is pressed on long
# ListCtrls, support for WXK_DOWN, WXK_UP, performance improvements on
# very long ListCtrls, Support for virtual ListCtrls
#
# 15-Oct-2004 - Robin Dunn
# o wxTextEditMixin: Added Shift-TAB support
#
# 2008-11-19 - raf <raf@raf.org>
# o ColumnSorterMixin: Added GetSortState()
#
import locale
import wx
import six
if six.PY3:
# python 3 lacks cmp:
def cmp(a, b):
return (a > b) - (a < b)
#----------------------------------------------------------------------------
class ColumnSorterMixin:
"""
A mixin class that handles sorting of a wx.ListCtrl in REPORT mode when
the column header is clicked on.
There are a few requirments needed in order for this to work genericly:
1. The combined class must have a GetListCtrl method that
returns the wx.ListCtrl to be sorted, and the list control
must exist at the time the wx.ColumnSorterMixin.__init__
method is called because it uses GetListCtrl.
2. Items in the list control must have a unique data value set
with list.SetItemData.
3. The combined class must have an attribute named itemDataMap
that is a dictionary mapping the data values to a sequence of
objects representing the values in each column. These values
are compared in the column sorter to determine sort order.
Interesting methods to override are GetColumnSorter,
GetSecondarySortValues, and GetSortImages. See below for details.
"""
def __init__(self, numColumns):
self.SetColumnCount(numColumns)
list = self.GetListCtrl()
if not list:
raise ValueError("No wx.ListCtrl available")
list.Bind(wx.EVT_LIST_COL_CLICK, self.__OnColClick, list)
def SetColumnCount(self, newNumColumns):
self._colSortFlag = [0] * newNumColumns
self._col = -1
def SortListItems(self, col=-1, ascending=1):
"""Sort the list on demand. Can also be used to set the sort column and order."""
oldCol = self._col
if col != -1:
self._col = col
self._colSortFlag[col] = ascending
self.GetListCtrl().SortItems(self.GetColumnSorter())
self.__updateImages(oldCol)
def GetColumnWidths(self):
"""
Returns a list of column widths. Can be used to help restore the current
view later.
"""
list = self.GetListCtrl()
rv = []
for x in range(len(self._colSortFlag)):
rv.append(list.GetColumnWidth(x))
return rv
def GetSortImages(self):
"""
Returns a tuple of image list indexesthe indexes in the image list for an image to be put on the column
header when sorting in descending order.
"""
return (-1, -1) # (decending, ascending) image IDs
def GetColumnSorter(self):
"""Returns a callable object to be used for comparing column values when sorting."""
return self.__ColumnSorter
def GetSecondarySortValues(self, col, key1, key2):
"""Returns a tuple of 2 values to use for secondary sort values when the
items in the selected column match equal. The default just returns the
item data values."""
return (key1, key2)
def __OnColClick(self, evt):
oldCol = self._col
self._col = col = evt.GetColumn()
self._colSortFlag[col] = int(not self._colSortFlag[col])
self.GetListCtrl().SortItems(self.GetColumnSorter())
if wx.Platform != "__WXMAC__" or wx.SystemOptions.GetOptionInt("mac.listctrl.always_use_generic") == 1:
self.__updateImages(oldCol)
evt.Skip()
self.OnSortOrderChanged()
def OnSortOrderChanged(self):
"""
Callback called after sort order has changed (whenever user
clicked column header).
"""
pass
def GetSortState(self):
"""
Return a tuple containing the index of the column that was last sorted
and the sort direction of that column.
Usage:
col, ascending = self.GetSortState()
# Make changes to list items... then resort
self.SortListItems(col, ascending)
"""
return (self._col, self._colSortFlag[self._col])
def __ColumnSorter(self, key1, key2):
col = self._col
ascending = self._colSortFlag[col]
item1 = self.itemDataMap[key1][col]
item2 = self.itemDataMap[key2][col]
#--- Internationalization of string sorting with locale module
if isinstance(item1, six.text_type) and isinstance(item2, six.text_type):
# both are unicode (py2) or str (py3)
cmpVal = locale.strcoll(item1, item2)
elif isinstance(item1, six.binary_type) or isinstance(item2, six.binary_type):
# at least one is a str (py2) or byte (py3)
cmpVal = locale.strcoll(str(item1), str(item2))
else:
cmpVal = cmp(item1, item2)
#---
# If the items are equal then pick something else to make the sort value unique
if cmpVal == 0:
cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))
if ascending:
return cmpVal
else:
return -cmpVal
def __updateImages(self, oldCol):
sortImages = self.GetSortImages()
if self._col != -1 and sortImages[0] != -1:
img = sortImages[self._colSortFlag[self._col]]
list = self.GetListCtrl()
if oldCol != -1:
list.ClearColumnImage(oldCol)
list.SetColumnImage(self._col, img)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
class ListCtrlAutoWidthMixin:
""" A mix-in class that automatically resizes the last column to take up
the remaining width of the wx.ListCtrl.
This causes the wx.ListCtrl to automatically take up the full width of
the list, without either a horizontal scroll bar (unless absolutely
necessary) or empty space to the right of the last column.
NOTE: This only works for report-style lists.
WARNING: If you override the EVT_SIZE event in your wx.ListCtrl, make
sure you call event.Skip() to ensure that the mixin's
_OnResize method is called.
This mix-in class was written by Erik Westra <ewestra@wave.co.nz>
"""
def __init__(self):
""" Standard initialiser.
"""
self._resizeColMinWidth = None
self._resizeColStyle = "LAST"
self._resizeCol = 0
self.Bind(wx.EVT_SIZE, self._onResize)
self.Bind(wx.EVT_LIST_COL_END_DRAG, self._onResize, self)
def setResizeColumn(self, col):
"""
Specify which column that should be autosized. Pass either
'LAST' or the column number. Default is 'LAST'.
"""
if col == "LAST":
self._resizeColStyle = "LAST"
else:
self._resizeColStyle = "COL"
self._resizeCol = col
def resizeLastColumn(self, minWidth):
""" Resize the last column appropriately.
If the list's columns are too wide to fit within the window, we use
a horizontal scrollbar. Otherwise, we expand the right-most column
to take up the remaining free space in the list.
This method is called automatically when the wx.ListCtrl is resized;
you can also call it yourself whenever you want the last column to
be resized appropriately (eg, when adding, removing or resizing
columns).
'minWidth' is the preferred minimum width for the last column.
"""
self.resizeColumn(minWidth)
def resizeColumn(self, minWidth):
self._resizeColMinWidth = minWidth
self._doResize()
# =====================
# == Private Methods ==
# =====================
def _onResize(self, event):
""" Respond to the wx.ListCtrl being resized.
We automatically resize the last column in the list.
"""
if 'gtk2' in wx.PlatformInfo or 'gtk3' in wx.PlatformInfo:
self._doResize()
else:
wx.CallAfter(self._doResize)
event.Skip()
def _doResize(self):
""" Resize the last column as appropriate.
If the list's columns are too wide to fit within the window, we use
a horizontal scrollbar. Otherwise, we expand the right-most column
to take up the remaining free space in the list.
We remember the current size of the last column, before resizing,
as the preferred minimum width if we haven't previously been given
or calculated a minimum width. This ensure that repeated calls to
_doResize() don't cause the last column to size itself too large.
"""
if not self: # avoid a PyDeadObject error
return
if self.GetSize().height < 32:
return # avoid an endless update bug when the height is small.
numCols = self.GetColumnCount()
if numCols == 0: return # Nothing to resize.
if(self._resizeColStyle == "LAST"):
resizeCol = self.GetColumnCount()
else:
resizeCol = self._resizeCol
resizeCol = max(1, resizeCol)
if self._resizeColMinWidth is None:
self._resizeColMinWidth = self.GetColumnWidth(resizeCol - 1)
# Get total width
listWidth = self.GetClientSize().width
totColWidth = 0 # Width of all columns except last one.
for col in range(numCols):
if col != (resizeCol-1):
totColWidth = totColWidth + self.GetColumnWidth(col)
resizeColWidth = self.GetColumnWidth(resizeCol - 1)
if totColWidth + self._resizeColMinWidth > listWidth:
# We haven't got the width to show the last column at its minimum
# width -> set it to its minimum width and allow the horizontal
# scrollbar to show.
self.SetColumnWidth(resizeCol-1, self._resizeColMinWidth)
return
# Resize the last column to take up the remaining available space.
self.SetColumnWidth(resizeCol-1, listWidth - totColWidth)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
SEL_FOC = wx.LIST_STATE_SELECTED | wx.LIST_STATE_FOCUSED
def selectBeforePopup(event):
"""Ensures the item the mouse is pointing at is selected before a popup.
Works with both single-select and multi-select lists."""
ctrl = event.GetEventObject()
if isinstance(ctrl, wx.ListCtrl):
n, flags = ctrl.HitTest(event.GetPosition())
if n >= 0:
if not ctrl.GetItemState(n, wx.LIST_STATE_SELECTED):
for i in range(ctrl.GetItemCount()):
ctrl.SetItemState(i, 0, SEL_FOC)
#for i in getListCtrlSelection(ctrl, SEL_FOC):
# ctrl.SetItemState(i, 0, SEL_FOC)
ctrl.SetItemState(n, SEL_FOC, SEL_FOC)
def getListCtrlSelection(listctrl, state=wx.LIST_STATE_SELECTED):
""" Returns list of item indexes of given state (selected by defaults) """
res = []
idx = -1
while 1:
idx = listctrl.GetNextItem(idx, wx.LIST_NEXT_ALL, state)
if idx == -1:
break
res.append(idx)
return res
wxEVT_DOPOPUPMENU = wx.NewEventType()
EVT_DOPOPUPMENU = wx.PyEventBinder(wxEVT_DOPOPUPMENU, 0)
class ListCtrlSelectionManagerMix:
"""Mixin that defines a platform independent selection policy
As selection single and multi-select list return the item index or a
list of item indexes respectively.
"""
_menu = None
def __init__(self):
self.Bind(wx.EVT_RIGHT_DOWN, self.OnLCSMRightDown)
self.Bind(EVT_DOPOPUPMENU, self.OnLCSMDoPopup)
# self.Connect(-1, -1, self.wxEVT_DOPOPUPMENU, self.OnLCSMDoPopup)
def getPopupMenu(self):
""" Override to implement dynamic menus (create) """
return self._menu
def setPopupMenu(self, menu):
""" Must be set for default behaviour """
self._menu = menu
def afterPopupMenu(self, menu):
""" Override to implement dynamic menus (destroy) """
pass
def getSelection(self):
res = getListCtrlSelection(self)
if self.GetWindowStyleFlag() & wx.LC_SINGLE_SEL:
if res:
return res[0]
else:
return -1
else:
return res
def OnLCSMRightDown(self, event):
selectBeforePopup(event)
event.Skip()
menu = self.getPopupMenu()
if menu:
evt = wx.PyEvent()
evt.SetEventType(wxEVT_DOPOPUPMENU)
evt.menu = menu
evt.pos = event.GetPosition()
wx.PostEvent(self, evt)
def OnLCSMDoPopup(self, event):
self.PopupMenu(event.menu, event.pos)
self.afterPopupMenu(event.menu)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
from bisect import bisect
class TextEditMixin:
"""
A mixin class that enables any text in any column of a
multi-column listctrl to be edited by clicking on the given row
and column. You close the text editor by hitting the ENTER key or
clicking somewhere else on the listctrl. You switch to the next
column by hiting TAB.
To use the mixin you have to include it in the class definition
and call the __init__ function::
class TestListCtrl(wx.ListCtrl, TextEditMixin):
def __init__(self, parent, ID, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
TextEditMixin.__init__(self)
Authors: Steve Zatz, Pim Van Heuven (pim@think-wize.com)
"""
editorBgColour = wx.Colour(255,255,175) # Yellow
editorFgColour = wx.Colour(0,0,0) # black
def __init__(self):
#editor = wx.TextCtrl(self, -1, pos=(-1,-1), size=(-1,-1),
# style=wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB \
# |wx.TE_RICH2)
self.make_editor()
self.Bind(wx.EVT_TEXT_ENTER, self.CloseEditor)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDown)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self)
def make_editor(self, col_style=wx.LIST_FORMAT_LEFT):
style =wx.TE_PROCESS_ENTER|wx.TE_PROCESS_TAB|wx.TE_RICH2
style |= {wx.LIST_FORMAT_LEFT: wx.TE_LEFT,
wx.LIST_FORMAT_RIGHT: wx.TE_RIGHT,
wx.LIST_FORMAT_CENTRE : wx.TE_CENTRE
}[col_style]
editor = wx.TextCtrl(self, -1, style=style)
editor.SetBackgroundColour(self.editorBgColour)
editor.SetForegroundColour(self.editorFgColour)
font = self.GetFont()
editor.SetFont(font)
self.curRow = 0
self.curCol = 0
editor.Hide()
if hasattr(self, 'editor'):
self.editor.Destroy()
self.editor = editor
self.col_style = col_style
self.editor.Bind(wx.EVT_CHAR, self.OnChar)
self.editor.Bind(wx.EVT_KILL_FOCUS, self.CloseEditor)
def OnItemSelected(self, evt):
self.curRow = evt.GetIndex()
evt.Skip()
def OnChar(self, event):
''' Catch the TAB, Shift-TAB, cursor DOWN/UP key code
so we can open the editor at the next column (if any).'''
keycode = event.GetKeyCode()
if keycode == wx.WXK_TAB and event.ShiftDown():
self.CloseEditor()
if self.curCol-1 >= 0:
self.OpenEditor(self.curCol-1, self.curRow)
elif keycode == wx.WXK_TAB:
self.CloseEditor()
if self.curCol+1 < self.GetColumnCount():
self.OpenEditor(self.curCol+1, self.curRow)
elif keycode == wx.WXK_ESCAPE:
self.CloseEditor()
elif keycode == wx.WXK_DOWN:
self.CloseEditor()
if self.curRow+1 < self.GetItemCount():
self._SelectIndex(self.curRow+1)
self.OpenEditor(self.curCol, self.curRow)
elif keycode == wx.WXK_UP:
self.CloseEditor()
if self.curRow > 0:
self._SelectIndex(self.curRow-1)
self.OpenEditor(self.curCol, self.curRow)
else:
event.Skip()
def OnLeftDown(self, evt=None):
''' Examine the click and double
click events to see if a row has been click on twice. If so,
determine the current row and columnn and open the editor.'''
if self.editor.IsShown():
self.CloseEditor()
x,y = evt.GetPosition()
row,flags = self.HitTest((x,y))
if row != self.curRow: # self.curRow keeps track of the current row
evt.Skip()
return
# the following should really be done in the mixin's init but
# the wx.ListCtrl demo creates the columns after creating the
# ListCtrl (generally not a good idea) on the other hand,
# doing this here handles adjustable column widths
self.col_locs = [0]
loc = 0
for n in range(self.GetColumnCount()):
loc = loc + self.GetColumnWidth(n)
self.col_locs.append(loc)
col = bisect(self.col_locs, x+self.GetScrollPos(wx.HORIZONTAL)) - 1
self.OpenEditor(col, row)
def OpenEditor(self, col, row):
''' Opens an editor at the current position. '''
# give the derived class a chance to Allow/Veto this edit.
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.GetId())
evt.Index = row
evt.Column = col
item = self.GetItem(row, col)
evt.Item.SetId(item.GetId())
evt.Item.SetColumn(item.GetColumn())
evt.Item.SetData(item.GetData())
evt.Item.SetText(item.GetText())
ret = self.GetEventHandler().ProcessEvent(evt)
if ret and not evt.IsAllowed():
return # user code doesn't allow the edit.
if self.GetColumn(col).Align != self.col_style:
self.make_editor(self.GetColumn(col).Align)
x0 = self.col_locs[col]
x1 = self.col_locs[col+1] - x0
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
# scroll forward
if x0+x1-scrolloffset > self.GetSize()[0]:
if wx.Platform == "__WXMSW__":
# don't start scrolling unless we really need to
offset = x0+x1-self.GetSize()[0]-scrolloffset
# scroll a bit more than what is minimum required
# so we don't have to scroll everytime the user presses TAB
# which is very tireing to the eye
addoffset = self.GetSize()[0]/4
# but be careful at the end of the list
if addoffset + scrolloffset < self.GetSize()[0]:
offset += addoffset
self.ScrollList(offset, 0)
scrolloffset = self.GetScrollPos(wx.HORIZONTAL)
else:
# Since we can not programmatically scroll the ListCtrl
# close the editor so the user can scroll and open the editor
# again
self.editor.SetValue(self.GetItem(row, col).GetText())
self.curRow = row
self.curCol = col
self.CloseEditor()
return
y0 = self.GetItemRect(row)[1]
def _activate_editor(editor):
editor.SetSize(x0-scrolloffset,y0, x1,-1, wx.SIZE_USE_EXISTING)
editor.SetValue(self.GetItem(row, col).GetText())
editor.Show()
editor.Raise()
editor.SetSelection(-1,-1)
editor.SetFocus()
wx.CallAfter(_activate_editor, self.editor)
self.curRow = row
self.curCol = col
# FIXME: this function is usually called twice - second time because
# it is binded to wx.EVT_KILL_FOCUS. Can it be avoided? (MW)
def CloseEditor(self, evt=None):
''' Close the editor and save the new value to the ListCtrl. '''
if not self.editor.IsShown():
return
text = self.editor.GetValue()
self.editor.Hide()
self.SetFocus()
# post wxEVT_COMMAND_LIST_END_LABEL_EDIT
# Event can be vetoed. It doesn't has SetEditCanceled(), what would
# require passing extra argument to CloseEditor()
evt = wx.ListEvent(wx.wxEVT_COMMAND_LIST_END_LABEL_EDIT, self.GetId())
evt.Index = self.curRow
evt.Column = self.curCol
item = wx.ListItem(self.GetItem(self.curRow, self.curCol))
item.SetText(text)
evt.SetItem(item)
ret = self.GetEventHandler().ProcessEvent(evt)
if not ret or evt.IsAllowed():
if self.IsVirtual():
# replace by whather you use to populate the virtual ListCtrl
# data source
self.SetVirtualData(self.curRow, self.curCol, text)
else:
self.SetItem(self.curRow, self.curCol, text)
self.RefreshItem(self.curRow)
def _SelectIndex(self, row):
listlen = self.GetItemCount()
if row < 0 and not listlen:
return
if row > (listlen-1):
row = listlen -1
self.SetItemState(self.curRow, ~wx.LIST_STATE_SELECTED,
wx.LIST_STATE_SELECTED)
self.EnsureVisible(row)
self.SetItemState(row, wx.LIST_STATE_SELECTED,
wx.LIST_STATE_SELECTED)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
"""
FILENAME: CheckListCtrlMixin.py
AUTHOR: Bruce Who (bruce.who.hk at gmail.com)
DATE: 2006-02-09
DESCRIPTION:
This script provide a mixin for ListCtrl which add a checkbox in the first
column of each row. It is inspired by limodou's CheckList.py(which can be
got from his NewEdit) and improved:
- You can just use InsertStringItem() to insert new items;
- Once a checkbox is checked/unchecked, the corresponding item is not
selected;
- You can use SetItemData() and GetItemData();
- Interfaces are changed to OnCheckItem(), IsChecked(), CheckItem().
You should not set a imagelist for the ListCtrl once this mixin is used.
HISTORY:
1.3 - You can check/uncheck a group of sequential items by <Shift-click>:
First click(or <Shift-Click>) item1 to check/uncheck it, then
Shift-click item2 to check/uncheck it, and you'll find that all
items between item1 and item2 are check/unchecked!
1.2 - Add ToggleItem()
1.1 - Initial version
"""
class CheckListCtrlMixin(object):
"""
This is a mixin for ListCtrl which add a checkbox in the first
column of each row. It is inspired by limodou's CheckList.py(which
can be got from his NewEdit) and improved:
- You can just use InsertStringItem() to insert new items;
- Once a checkbox is checked/unchecked, the corresponding item
is not selected;
- You can use SetItemData() and GetItemData();
- Interfaces are changed to OnCheckItem(), IsChecked(),
CheckItem().
You should not set a imagelist for the ListCtrl once this mixin is used.
"""
def __init__(self, check_image=None, uncheck_image=None, imgsz=(16,16)):
if check_image is not None:
imgsz = check_image.GetSize()
elif uncheck_image is not None:
imgsz = check_image.GetSize()
self.__imagelist_ = wx.ImageList(*imgsz)
# Create default checkbox images if none were specified
if check_image is None:
check_image = self.__CreateBitmap(wx.CONTROL_CHECKED, imgsz)
if uncheck_image is None:
uncheck_image = self.__CreateBitmap(0, imgsz)
self.uncheck_image = self.__imagelist_.Add(uncheck_image)
self.check_image = self.__imagelist_.Add(check_image)
self.AssignImageList(self.__imagelist_, wx.IMAGE_LIST_SMALL)
self.__last_check_ = None
self.Bind(wx.EVT_LEFT_DOWN, self.__OnLeftDown_)
# Monkey-patch in a new InsertItem so we can also set the image ID for the item
self._origInsertItem = self.InsertItem
self.InsertItem = self.__InsertItem_
def __InsertItem_(self, *args, **kw):
index = self._origInsertItem(*args, **kw)
self.SetItemImage(index, self.uncheck_image)
return index
def __CreateBitmap(self, flag=0, size=(16, 16)):
"""Create a bitmap of the platforms native checkbox. The flag
is used to determine the checkboxes state (see wx.CONTROL_*)
"""
bmp = wx.Bitmap(*size)
dc = wx.MemoryDC(bmp)
dc.SetBackground(wx.WHITE_BRUSH)
dc.Clear()
wx.RendererNative.Get().DrawCheckBox(self, dc,
(0, 0, size[0], size[1]), flag)
dc.SelectObject(wx.NullBitmap)
return bmp
def __OnLeftDown_(self, evt):
(index, flags) = self.HitTest(evt.GetPosition())
if flags == wx.LIST_HITTEST_ONITEMICON:
img_idx = self.GetItem(index).GetImage()
flag_check = img_idx == 0
begin_index = index
end_index = index
if self.__last_check_ is not None \
and wx.GetKeyState(wx.WXK_SHIFT):
last_index, last_flag_check = self.__last_check_
if last_flag_check == flag_check:
# XXX what if the previous item is deleted or new items
# are inserted?
item_count = self.GetItemCount()
if last_index < item_count:
if last_index < index:
begin_index = last_index
end_index = index
elif last_index > index:
begin_index = index
end_index = last_index
else:
assert False
while begin_index <= end_index:
self.CheckItem(begin_index, flag_check)
begin_index += 1
self.__last_check_ = (index, flag_check)
else:
evt.Skip()
def OnCheckItem(self, index, flag):
pass
def IsChecked(self, index):
return self.GetItem(index).GetImage() == 1
def CheckItem(self, index, check=True):
img_idx = self.GetItem(index).GetImage()
if img_idx == 0 and check:
self.SetItemImage(index, 1)
self.OnCheckItem(index, True)
elif img_idx == 1 and not check:
self.SetItemImage(index, 0)
self.OnCheckItem(index, False)
def ToggleItem(self, index):
self.CheckItem(index, not self.IsChecked(index))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# Mode Flags
HIGHLIGHT_ODD = 1 # Highlight the Odd rows
HIGHLIGHT_EVEN = 2 # Highlight the Even rows
class ListRowHighlighter:
"""Editra Control Library: ListRowHighlighter
Mixin class that handles automatic background highlighting of alternate
rows in the a ListCtrl. The background of the rows are highlighted
automatically as items are added or inserted in the control based on the
mixins Mode and set Color. By default the Even rows will be highlighted with
the systems highlight color.
"""
def __init__(self, color=None, mode=HIGHLIGHT_EVEN):
"""Initialize the highlighter mixin
@keyword color: Set a custom highlight color (default uses system color)
@keyword mode: HIGHLIGHT_EVEN (default) or HIGHLIGHT_ODD
"""
# Attributes
self._color = color
self._defaultb = wx.SystemSettings.GetColour(wx.SYS_COLOUR_LISTBOX)
self._mode = mode
# Event Handlers
self.Bind(wx.EVT_LIST_INSERT_ITEM, lambda evt: self.RefreshRows())
self.Bind(wx.EVT_LIST_DELETE_ITEM, lambda evt: self.RefreshRows())
def RefreshRows(self):
"""Re-color all the rows"""
if self._color is None:
if wx.Platform in ('__WXGTK__', '__WXMSW__'):
color = wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DLIGHT)
else:
color = wx.Colour(237, 243, 254)
else:
color = self._color
local_defaultb = self._defaultb
local_mode = self._mode
for row in range(self.GetItemCount()):
if local_mode & HIGHLIGHT_EVEN:
dohlight = not row % 2
else:
dohlight = row % 2
if dohlight:
self.SetItemBackgroundColour(row, color)
elif local_defaultb:
self.SetItemBackgroundColour(row, local_defaultb)
else: # This part of the loop should only happen once if self._defaultb is None.
local_defaultb = self._defaultb = self.GetItemBackgroundColour(row)
self.SetItemBackgroundColour(row, local_defaultb)
def SetHighlightColor(self, color):
"""Set the color used to highlight the rows. Call :meth:`RefreshRows` after
this if you wish to update all the rows highlight colors.
@param color: wx.Color or None to set default
"""
self._color = color
def SetHighlightMode(self, mode):
"""Set the highlighting mode to either HIGHLIGHT_EVEN or to
HIGHLIGHT_ODD. Call :meth:`RefreshRows` afterwards to update the list
state.
@param mode: HIGHLIGHT_* mode value
"""
self._mode = mode
#----------------------------------------------------------------------------
|
[
"lyj218@qq.com"
] |
lyj218@qq.com
|
753b5b2ec561ad28d7410e49144c4be4fac47627
|
28f52b0e9c8f7fe15a008127f2a76c8854efaa7e
|
/pkgs/clean-pkg/src/genie/libs/clean/stages/nxos/aci/image_handler.py
|
2c5fe6e13db35982ecd8d592e92175249ab32a43
|
[
"Apache-2.0"
] |
permissive
|
dthangap/genielibs
|
cb5098e675c51f2c2c46a929faf630cbad6aa7b3
|
778edb3b310bac960f507dae55e82ac027d8c6c8
|
refs/heads/master
| 2023-02-01T05:18:04.575913
| 2020-12-16T12:55:54
| 2020-12-16T12:55:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,399
|
py
|
""" NXOS ACI: Image Handler Class """
import yaml
from genie.libs.clean.stages.image_handler import BaseImageHandler
from pyats.utils.schemaengine import Schema, ListOf, Optional
class ImageLoader(object):
EXPECTED_IMAGE_STRUCTURE_MSG = """\
Expected one of the following structures for 'images' in the clean yaml
Structure #1
------------
images:
- /path/to/controller_image.bin
- /path/to/switch_image.bin
Structure #2
------------
images:
controller:
- /path/to/controller_image.bin
switch:
- /path/to/switch_image.bin
Structure #3
------------
images:
controller:
file:
- /path/to/controller_image.bin
switch:
file:
- /path/to/switch_image.bin
But got the following structure
-------------------------------
{}"""
def load(self, images):
if (not self.valid_structure_1(images) and
not self.valid_structure_2(images) and
not self.valid_structure_3(images)):
raise Exception(self.EXPECTED_IMAGE_STRUCTURE_MSG.format(
yaml.dump({'images': images})))
def valid_structure_1(self, images):
schema = ListOf(str)
try:
Schema(schema).validate(images)
except Exception:
return False
if len(images) == 1:
# This is not a bug. It is optional to clean only switches or only
# controllers but we do not know what type of image the user
# provided if they only provide 1.
setattr(self, 'controller', images)
setattr(self, 'switch', images)
return True
if len(images) == 2:
setattr(self, 'controller', images[:1])
setattr(self, 'switch', images[1:])
return True
else:
return False
def valid_structure_2(self, images):
schema = {
Optional('controller'): ListOf(str),
Optional('switch'): ListOf(str)
}
try:
Schema(schema).validate(images)
except Exception:
return False
if ('controller' in images and
'switch' in images and
len(images['controller']) == 1 and
len(images['switch']) == 1):
setattr(self, 'controller', images['controller'])
setattr(self, 'switch', images['switch'])
return True
elif ('controller' in images and
len(images['controller']) == 1):
setattr(self, 'controller', images['controller'])
return True
elif ('switch' in images and
len(images['switch']) == 1):
setattr(self, 'switch', images['switch'])
return True
else:
return False
def valid_structure_3(self, images):
schema = {
Optional('controller'): {
'file': ListOf(str)
},
Optional('switch'): {
'file': ListOf(str)
}
}
try:
Schema(schema).validate(images)
except Exception:
return False
if ('controller' in images and
'switch' in images and
len(images['controller']['file']) == 1 and
len(images['switch']['file']) == 1):
setattr(self, 'controller', images['controller']['file'])
setattr(self, 'switch', images['switch']['file'])
return True
elif ('controller' in images and
len(images['controller']['file']) == 1):
setattr(self, 'controller', images['controller']['file'])
return True
elif ('switch' in images and
len(images['switch']['file']) == 1):
setattr(self, 'switch', images['switch']['file'])
return True
else:
return False
class ImageHandler(BaseImageHandler, ImageLoader):
def __init__(self, device, images, *args, **kwargs):
# Set defaults
self.controller = []
self.switch = []
# Check if images is one of the valid structures and
# load into a consolidated structure
ImageLoader.load(self, images)
# Temp workaround for XPRESSO
if self.controller:
self.controller = [self.controller[0].replace('file://', '')]
if self.switch:
self.switch = [self.switch[0].replace('file://', '')]
super().__init__(device, images, *args, **kwargs)
def update_image_references(self, section):
if 'image_mapping' in section.parameters:
for index, image in enumerate(self.controller):
# change the saved image to the new image name/path
self.controller[index] = section.parameters['image_mapping'].get(
image, self.controller[index])
for index, image in enumerate(self.switch):
# change the saved image to the new image name/path
self.switch[index] = section.parameters['image_mapping'].get(
image, self.switch[index])
def update_copy_to_linux(self):
'''Update clean section 'copy_to_linux' with image information'''
# Init 'copy_to_linux' defaults
origin = self.device.clean.setdefault('copy_to_linux', {}).\
setdefault('origin', {})
origin.update({'files': self.controller + self.switch})
def update_copy_to_device(self):
'''Update clean stage 'copy_to_device' with image information'''
origin = self.device.clean.setdefault('copy_to_device', {}).\
setdefault('origin', {})
origin.update({'files': self.controller + self.switch})
def update_fabric_upgrade(self):
'''Update clean stage 'fabric_upgrade' with image information'''
fabric_upgrade = self.device.clean.setdefault('fabric_upgrade', {})
fabric_upgrade.update({'controller_image': self.controller})
fabric_upgrade.update({'switch_image': self.switch})
def update_fabric_clean(self):
'''Update clean stage 'fabric_clean' with image information '''
fabric_clean = self.device.clean.setdefault('fabric_clean', {})
if fabric_clean.get('copy_boot_image', {}).get('origin', {}):
fabric_clean['copy_boot_image']['origin'].update({'files': self.switch})
|
[
"tahigash@cisco.com"
] |
tahigash@cisco.com
|
1a94d4955bc1347ae86d5992a523abcfbfb17267
|
5da2c116d3d0dc4f3811cec144c9f8b5a74afede
|
/lncrawl/assets/user_agents.py
|
fbec17aabe02c7b79f52106cf5ee397fca225e17
|
[
"Apache-2.0"
] |
permissive
|
NNTin/lightnovel-crawler
|
a08bd252f2e72f41f931f0b2165f906b64d33692
|
451e816ab03c8466be90f6f0b3eaa52d799140ce
|
refs/heads/master
| 2021-06-23T12:07:43.668329
| 2021-04-25T01:51:26
| 2021-04-25T01:51:26
| 361,695,538
| 2
| 0
|
Apache-2.0
| 2021-04-26T16:48:21
| 2021-04-26T09:40:46
| null |
UTF-8
|
Python
| false
| false
| 6,302
|
py
|
# -*- coding: utf-8 -*-
user_agents = [
# "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0",
# "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1",
# "Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36",
# "Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.3",
# "Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36",
# "Mozilla/5.0 (Linux; Android 6.0.1; SHIELD Tablet K1 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Safari/537.36",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/69.0.3497.105 Mobile/15E148 Safari/605.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/13.2b11866 Mobile/16A366 Safari/605.1.15",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A5370a Safari/604.1",
# "Mozilla/5.0 (iPhone9,3; U; CPU iPhone OS 10_0_1 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) Version/10.0 Mobile/14A403 Safari/602.1",
# "Mozilla/5.0 (Windows Phone 10.0; Android 6.0.1; Microsoft; RM-1152) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Mobile Safari/537.36 Edge/15.15254",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.86 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36"
]
|
[
"dipu.sudipta@gmail.com"
] |
dipu.sudipta@gmail.com
|
f3e768e706777c6ba9fb873bf9632cbe6fddb951
|
ebb4fcf4b95e9143136f78aa3cba426829b1b2ff
|
/urls.py
|
be6411b662095da3d336edc4302660a233c3f043
|
[] |
no_license
|
brasky/scheduler
|
26881e1056753d26f2deecbb5920d40f3642b628
|
4adbd99a629d4b9eab0d56fc8b8e5ef14b366354
|
refs/heads/master
| 2021-01-20T17:33:49.134819
| 2013-11-13T18:35:05
| 2013-11-13T18:35:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$','scheduler.views.home_view'),
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'scheduler/login.html'}),
url(r'^accounts/logout/$','scheduler.views.logout_view'),
url(r'^accounts/is_loggedin/$','scheduler.views.is_loggedin_view'),
url(r'^accounts/register/$','scheduler.views.register_view'),
url(r'^accounts/profile/$','scheduler.views.account_view'),
url(r'^createevent/(?P<scheduleid>\d+)$','scheduler.views.create_event_view'),
url(r'^schedule/(?P<scheduleid>\d+)$','scheduler.views.schedule_view'),
url(r'^accounts/createschedule/$','scheduler.views.create_schedule_view'),
url(r'^friends/$','scheduler.views.friends_view'),
url(r'^friends/accept/(?P<friendid>\d+)$','scheduler.views.friends_accept_view'),
url(r'^friends/add/$','scheduler.views.friends_add_view'),
)
|
[
"redx47@gmail.com"
] |
redx47@gmail.com
|
da41b6d51ae8d2de2dd7a45e0120555d35750c8d
|
f0187406babf1be73626fa2a4fbeb790e177dd7a
|
/assignment2/assignment2_2015004120.py
|
708e78808230d9427a30fe1383f4dfb056f5b6ec
|
[] |
no_license
|
CameliaOvO/CSE4007
|
bc150084f00f3ab484a8194002022cc96d169414
|
89929cb6f5c61b9f89de06f14a2a03f2a43e5378
|
refs/heads/master
| 2020-03-19T03:32:53.572535
| 2018-05-29T04:38:28
| 2018-05-29T04:38:28
| 135,737,859
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,084
|
py
|
from bisect import bisect_left
from collections import Counter
from math import log2
def complete_link_clustering(sim_name):
sim = cosine_similarity if sim_name == 'c' else euclidean_distance
most = min if sim == euclidean_distance else max
levels, clusters = [], [[x] for x in range(num_of_words)]
prox_mat = [[sim(vectors[i], vectors[j]) for j in range(num_of_words) if i > j] for i in range(num_of_words)][1:]
while len(clusters) > 1:
most_sim = most(enumerate([(i.index(most(i)), most(i)) for i in prox_mat]), key=lambda x: x[1][1])
r, s = clusters[most_sim[0] + 1], clusters[most_sim[1][0]]
levels.append([find_least_sim(r, s, sim), r + s])
clusters.remove(r), clusters.remove(s), clusters.append(r + s)
del prox_mat[most_sim[0]]
if most_sim[1][0] > 0:
del prox_mat[most_sim[1][0] - 1]
it = 0
while it < len(prox_mat):
if len(prox_mat[it]) > most_sim[0] + 1:
del prox_mat[it][most_sim[0] + 1]
if len(prox_mat[it]) > most_sim[1][0]:
del prox_mat[it][most_sim[1][0]]
if len(prox_mat[it]) == 0:
prox_mat.remove(prox_mat[it])
else:
it += 1
prox_mat.append([find_least_sim(r + s, clusters[t], sim) for t in range(len(clusters) - 1)])
return levels
def cosine_similarity(a, b):
return sum([x * y for x, y in zip(a, b)]) / ((sum([x ** 2 for x in a]) ** 0.5) * (sum([x ** 2 for x in b]) ** 0.5))
def euclidean_distance(x, y):
return sum([(xk - yk) ** 2 for xk, yk in zip(x, y)]) ** 0.5
def find_least_sim(c1, c2, sim):
least = max if sim == euclidean_distance else min
return least([sim(vectors[p1], vectors[p2]) for p1 in c1 for p2 in c2])
def normalize(level):
max_val = max(level, key=lambda x: x[0])[0]
min_val = min(level, key=lambda x: x[0])[0]
for l in level:
l[0] = 1 - ((l[0] - min_val) / (max_val - min_val))
return level
def get_words_vectors():
word_list, vector_list = [], []
with open("WordEmbedding.txt", 'r') as f:
for word, vector in zip(*[f] * 2):
word_list.append(word.strip())
vector_list.append(list(map(float, vector.split(","))))
return word_list, vector_list, len(vector_list)
def divide_cluster(levels, threshold):
cluster_idx, cluster_num = 0, [0 for _ in range(num_of_words)]
limit = bisect_left([x[0] for x in levels], threshold)
levels = levels[limit:]
for level in levels:
cluster_idx += 1
flag = False
for c in level[1]:
if cluster_num[c] == 0:
flag = True
cluster_num[c] = cluster_idx
if not flag:
cluster_idx -= 1
for i in range(len(cluster_num)):
if cluster_num[i] == 0:
cluster_idx += 1
cluster_num[i] = cluster_idx
return cluster_idx, cluster_num
def write_on_file(result):
write_word, write_vector = [], []
with open("WordEmbedding.txt", 'r') as rf:
for word, vector in zip(*[rf] * 2):
write_word.append(word.strip())
write_vector.append(vector.strip())
with open("WordClustering.txt", 'w') as wf:
for word, vector, cluster in zip(write_word, write_vector, result):
wf.write(word + "\n" + vector + "\n" + str(cluster) + "\n")
def get_word_class():
with open("WordTopic.txt", 'r') as f:
whole = [x.strip().lower() for x in f.readlines()]
word_topic, topic, word_cls = [], [], []
for word in whole:
if (not word.isalnum()) and topic != []:
word_topic.append(topic)
topic = []
if word.isalnum():
topic.append(word)
word_topic.append(topic)
for word in words:
for cls in word_topic:
if word in cls:
word_cls.append(word_topic.index(cls))
break
return word_cls
def entropy_measure(n_clusters, c_list, word_cls):
clustered = [[] for _ in range(n_clusters)]
for i in range(len(c_list)):
clustered[c_list[i] - 1].append(word_cls[i])
counter_clustered = [[x[1] for x in Counter(clusters).items()] for clusters in clustered]
cluster_entropy = [sum([-(x / sum(lis)) * log2(x / sum(lis)) for x in lis]) for lis in counter_clustered]
cluster_size = [len(cluster) / num_of_words for cluster in clustered]
weighted_sum = sum([x * y for x, y in zip(cluster_size, cluster_entropy)])
return weighted_sum
def silhouette_measure(n_clusters, c_list):
silhouette_list = []
dist_mat = [[euclidean_distance(x, y) for x in vectors] for y in vectors]
clustered_idx = [[] for _ in range(n_clusters)]
for i in range(len(c_list)):
clustered_idx[c_list[i] - 1].append(i)
for i in range(num_of_words):
inner_cluster_idx = c_list[i] - 1
if len(clustered_idx[inner_cluster_idx]) == 1:
silhouette_list.append(0)
else:
c_i = clustered_idx[inner_cluster_idx]
a_i = sum([dist_mat[i][cx] for cx in c_i if cx != i]) / (len(c_i) - 1)
b_i = min([sum([dist_mat[i][cx] for cx in c]) / len(c) for c in clustered_idx if c != c_i])
silhouette_coef = (b_i - a_i) / max([a_i, b_i])
silhouette_list.append(silhouette_coef)
sil_measure = sum(silhouette_list) / len(silhouette_list)
return sil_measure
argument = ['e', 0.6]
words, vectors, num_of_words = get_words_vectors()
word_class = get_word_class()
level_cluster = complete_link_clustering(argument[0])[::-1]
if argument[0] != 'c':
level_cluster = normalize(level_cluster)
num_of_clusters, clustered_list = divide_cluster(level_cluster, argument[1])
write_on_file(clustered_list)
print('cosine similarity' if argument[0] == 'c' else 'euclidean distance')
print("divided into", num_of_clusters, "clusters with threshold", argument[1])
print("entropy : \t", entropy_measure(num_of_clusters, clustered_list, word_class))
print("silhouette : \t", silhouette_measure(num_of_clusters, clustered_list))
|
[
"camelia0858@gmail.com"
] |
camelia0858@gmail.com
|
198442838c9414d3f62f9b0af071a325589a66ae
|
8840b69e4341f4ed030c8b33151db205b8db3640
|
/flask_minijax.py
|
a5036e1c916ae910ed2af7e28ecdc01b86534110
|
[
"MIT"
] |
permissive
|
FidgetYou/proj3-anagrams
|
b5fe7ccc333bca0895c12590142b9f0e30f10b83
|
86923a696794b7098940023d57aaef679a52b3ac
|
refs/heads/master
| 2021-01-11T01:03:32.507679
| 2016-10-18T01:58:25
| 2016-10-18T01:58:25
| 70,846,302
| 0
| 0
| null | 2016-10-13T20:39:51
| 2016-10-13T20:39:50
| null |
UTF-8
|
Python
| false
| false
| 1,317
|
py
|
"""
Tiny demo of Ajax interaction
"""
import flask
from flask import request # Data from a submitted form
from flask import url_for
from flask import jsonify # For AJAX transactions
import json
import logging
import argparse # For the vocabulary list
import sys
###
# Globals
###
app = flask.Flask(__name__)
import CONFIG
app.secret_key = CONFIG.secret_key # Should allow using session variables
###
# Pages
###
@app.route("/")
def index():
return flask.render_template('minijax.html')
###############
# AJAX request handlers
# These return JSON to the JavaScript function on
# an existing page, rather than rendering a new page.
###############
@app.route("/_countem")
def countem():
text = request.args.get("text", type=str)
length = len(text)
rslt = { "long_enough": length >= 5 }
return jsonify(result=rslt)
#############
# Run locally
if __name__ == "__main__":
# Standalone.
app.debug = True
app.logger.setLevel(logging.DEBUG)
print("Opening for global access on port {}".format(CONFIG.PORT))
app.run(port=CONFIG.PORT, host="0.0.0.0")
# If we run 'python3 flask_minijax.py, we get the above 'main'.
# If we run 'gunicorn flask_minijax:app', we instead get a
# 'main' inside gunicorn, which loads this file as a module
# and accesses the Flask 'app' object.
#
|
[
"michal.young@gmail.com"
] |
michal.young@gmail.com
|
524db47926d6c1b18a65735cec61aad5f9e91b97
|
d2c163f246d28b8519f8c89de23556e43be91684
|
/www/ad_board/urls.py
|
9309b9dfb201f43c13a2ec3d393148de00aea612
|
[] |
no_license
|
boogiiieee/Iskcon
|
d7a2b8bdc3002ef3306fc5e7ddc577504d8533c9
|
b672dbafee06af3ee6d646c75f442d97133f5ec9
|
refs/heads/master
| 2021-09-04T03:11:06.770094
| 2018-01-15T04:21:36
| 2018-01-15T04:21:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 388
|
py
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('ad_board.views',
url(r'^$', 'full', name='ad_board_url'),
url(r'^category/(?P<id>[0-9]+)/$', 'category', name='category_ad_board_url'),
url(r'^(?P<id>[0-9]+)/$', 'item', name='ad_board_item_url'),
url(r'^category/(?P<id>[0-9]+)/add/$', 'add', name='add_ad_board_url'),
)
|
[
"shalyapinalexander@gmail.com"
] |
shalyapinalexander@gmail.com
|
ec02d70df62b2b0336e9a3155848509a1793fc6c
|
d31432d77d775bde32fe51e7584d68cfc465808a
|
/Tema_6_3_Patrones_organizacion_datos/jerarquico.py
|
e8eec174864fe48b3867697cc2ba65fad1b241e7
|
[] |
no_license
|
surtich/TFG_Manuel_R
|
ac8d4685afeb7180d8beb3169372a1e27e484eea
|
ec1fda88695dd95d1add70d4869ea1e6e623313e
|
refs/heads/main
| 2023-07-02T06:45:19.506802
| 2021-08-03T16:55:32
| 2021-08-03T16:55:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,676
|
py
|
#!/usr/bin/env python
#Usamos el archivo foros.csv
from mrjob.job import MRJob
from mrjob.protocol import RawValueProtocol
import xmlify
class jerarquico(MRJob):
OUTPUT_PROTOCOL = RawValueProtocol
def mapper(self,_, line):
linea=line.split(";")
mensaje=linea[4] # Recogemos el mensaje de la posición 4 de la línea
tipoMensaje=linea[5] #Recogemos de la posición 5, si es una pregunta o respuesta
if tipoMensaje=="question":
idMensaje=linea[0] #Almacenamos el id único del mensaje
yield idMensaje,(tipoMensaje,mensaje)
else:
idMensaje=linea[7] #Almacenamos el identificador del mensaje idMensaje
yield idMensaje,(tipoMensaje,mensaje)
def reducer(self, key, values):
listaValores=[]
listaPrincipal=[]
listaAuxiliar=[]
for v in values: #Metemos los valores que vienen en un matriz
listaValores.append(v) #Matriz que contiene el tipo de mensaje y el mensaje asociado
for valor in listaValores:
if valor[0]=="question":#Si es una pregunta la metemos en la lista principal
listaPrincipal.append(valor[1])
else:
listaAuxiliar.append(valor[1]) # Si son respuestas, las vamos agregando a una lista
listaPrincipal.append(listaAuxiliar) #agregamos la lista de respuestas a la lista principal
#Conversion a XML indicando en el raiz el id del mensaje
yield "Creada linea XML: " ,xmlify.dumps(listaPrincipal,root = key)
if __name__ == '__main__':
jerarquico.run()
|
[
"mrodrigue212@alumno.uned.es"
] |
mrodrigue212@alumno.uned.es
|
a79f0d2f37b1ef05ad04d86eb3a3f170aa616237
|
482ee9f8972bb01b0de68c921cddb27aa9470a8c
|
/raspi_server/servomot.py
|
f31c565c0db7dbe0c901a91e0bb113b35791e157
|
[] |
no_license
|
bhavika022/Multi-Terrain-Robot
|
1a793ad5ac7f6f0ddd20cbca4b9a035efe2f8ed9
|
d2aa39f4b471d126e928d4bc51bb88096f4c2cc3
|
refs/heads/master
| 2023-08-18T22:07:18.188112
| 2021-10-15T16:20:32
| 2021-10-15T16:20:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
import RPi.GPIO as GPIO
import time
def setup():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.OUT)
global p
p=GPIO.PWM(11, 50)
p.start(0)
p.ChangeDutyCycle(0)
def setangle_up():
global duty
duty=(90/18)+2
GPIO.output(11, True)
p.ChangeDutyCycle(duty)
time.sleep(0.5)
GPIO.output(11, False)
p.ChangeDutyCycle(0)
print('Degrees the Servo was rotated by: ')
print('90')
def setangle_down():
global duty
duty=(180/18)+2
GPIO.output(11, True)
p.ChangeDutyCycle(duty)
time.sleep(0.5)
GPIO.output(11, False)
p.ChangeDutyCycle(0)
print('Degrees the Servo was rotated by: ')
print('180')
def rotmul():
global i
i=0
for i in range (5):
global duty1, duty2
duty1=(90/18)+2
duty2=(180/18)+2
GPIO.output(11, True)
p.ChangeDutyCycle(duty1)
time.sleep(0.5)
GPIO.output(11, False)
p.ChangeDutyCycle(0)
GPIO.output(11, True)
p.ChangeDutyCycle(duty2)
time.sleep(0.5)
GPIO.output(11, False)
p.ChangeDutyCycle(0)
def close():
p.stop()
#GPIO.cleanup()
|
[
"ameya.k.kale@gmail.com"
] |
ameya.k.kale@gmail.com
|
c43501f1134f44d9e0c3c38a8ce719ea17e5bbcb
|
3253da5603971958d69df0ed442e3341a8d3bff4
|
/1-Iniciante/1914.py
|
67fa34c039b20ad33bd528808a4ce2d4016000af
|
[] |
no_license
|
CleitonSilvaT/URI_Python
|
1c73ec0852ae87c6138baa148ad8c2cb56bb723e
|
a8510bab2fa8f680b54058fafebff3a2727617d9
|
refs/heads/master
| 2021-06-20T08:18:50.104839
| 2021-05-20T08:59:19
| 2021-05-20T08:59:19
| 213,665,657
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 959
|
py
|
# -*- coding: utf-8 -*-
if __name__ == '__main__':
# Entrada
casos_teste = int(input())
while(casos_teste > 0):
# Entrada
dados = input()
escolha = dados.split(' ')
# nomepessoa1 - escolha[0]
# escolhapessoa1 - escolha[1]
# nomepessoa2 - escolha[2]
# escolhapessoa2 - escolha[3]
# Entrada
valores = input()
numeros = valores.split(' ')
# Calculando soma dos valores
total = int(numeros[0]) + int(numeros[1])
# Identificando se a soma eh PAR ou IMPAR
if((total % 2) == 0):
# Imprimindo o vencedor
if(escolha[1] == 'PAR'):
print(escolha[0])
else:
print(escolha[2])
else:
# Imprimindo o vencedor
if(escolha[1] == 'IMPAR'):
print(escolha[0])
else:
print(escolha[2])
casos_teste -= 1
|
[
"cleitonsilvatavares@gmail.com"
] |
cleitonsilvatavares@gmail.com
|
c0a4368dee98e726b28341c966b671e8d8ecab94
|
6295d1d4b48cafe702a08efd270aea47f6122722
|
/setup.py
|
dfc7e472398df49c5e01ff63b45c36ce99e0a948
|
[
"MIT"
] |
permissive
|
bipbopbot/radforest
|
84736a16dd6df45723c9da373004b801e153c7a8
|
fbccf2e13c58a320a7bf81bb72ad86963e0785bc
|
refs/heads/master
| 2020-08-19T01:29:30.739011
| 2019-10-17T20:27:26
| 2019-10-17T20:27:26
| 215,858,926
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
import setuptools
setuptools.setup(
name='radforest', # package name on PyPI
version='0.1.0',
description='A library of radiance forests.',
url='https://github.com/bipbopbot/radforest',
author='Loren Adams',
author_email='bipbopbot@gmail.com',
license='MIT',
packages=['radforest',]
)
|
[
"bipbopbot@users.noreply.github.com"
] |
bipbopbot@users.noreply.github.com
|
c9800b7561104d8aa6fcc841bb12aac744f3d879
|
4ee74237ad3230147674546223a0ff9644adf944
|
/quickstart/migrations/0024_poresizedistribution.py
|
8f479c54adbb664e1cb109894b3673f45dc70ec9
|
[] |
no_license
|
PMEAL/porespy-backend
|
d5641c8b1ae1930b5dd9185c43f74036d5e95f94
|
bb76cae9e752a95e428ec417bf1524f90b110790
|
refs/heads/master
| 2023-03-31T05:35:48.043698
| 2021-04-11T01:21:34
| 2021-04-11T01:21:34
| 323,444,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 529
|
py
|
# Generated by Django 3.1.3 on 2021-03-16 20:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quickstart', '0023_auto_20210314_2350'),
]
operations = [
migrations.CreateModel(
name='PoreSizeDistribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('psd_im', models.TextField(default='')),
],
),
]
|
[
"espitiaandres123@gmail.com"
] |
espitiaandres123@gmail.com
|
b672c87e3458490ceb0e8b3852355a8c15a2c399
|
d1fadc514274711a7986a6b3caaaee7e8d48b4a6
|
/plot_scripts/scratch29.py
|
9b454212d7485e7e1237f495490e6b1a3e2c0169
|
[
"MIT"
] |
permissive
|
lbaiao/sys-simulator-2
|
24d940db6423070818c23b6ffefbc5da4a1030a0
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
refs/heads/master
| 2021-08-20T08:30:06.864473
| 2021-06-30T10:37:26
| 2021-06-30T10:37:26
| 230,333,523
| 1
| 0
| null | 2021-06-30T10:37:27
| 2019-12-26T22:02:59
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
import pickle
import matplotlib.pyplot as plt
import numpy as np
filepath = 'D:/Dev/sys-simulator-2/data/scratch29.pickle'
file = open(filepath, 'rb')
data = pickle.load(file)
aux_range = [10,15,20]
action_counts_total = data['action_counts_total']
d2d_spectral_effs = data['d2d_speffs_avg_total']
mue_success_rate = data['mue_success_rate']
equals_counts_total = data['equals_counts_total']
d2d_speffs_avg = list()
for i, d in enumerate(d2d_spectral_effs):
d2d_speffs_avg.append(np.average(d))
fig2, ax1 = plt.subplots()
ax1.set_xlabel('Number of D2D pairs in the RB')
ax1.set_ylabel('D2D Average Spectral Efficiency [bps/Hz]', color='tab:blue')
ax1.plot(d2d_speffs_avg, '.', color='tab:blue')
ax2 = ax1.twinx()
ax2.set_ylabel('MUE Success Rate', color='tab:red')
ax2.plot(mue_success_rate, '.', color='tab:red')
fig2.tight_layout()
xi = list(range(len(aux_range)))
ax = [0,1,2,3,4]
axi = list(range(len(ax)))
for i, c in enumerate(action_counts_total):
if i in aux_range:
plt.figure()
plt.plot(np.mean(c, axis=0)/i*100, '*',label='mean')
plt.plot(np.std(c, axis=0)/i*100, 'x', label='std')
plt.legend()
plt.title(f'N={i}')
plt.xlabel('Action Index')
plt.ylabel('Average Action Ocurrency [%]')
plt.xticks(axi, ax)
mean_equals = np.array([np.mean(c) for c in equals_counts_total])
std_equals = np.array([np.std(c) for c in equals_counts_total])
plt.figure()
plt.plot(mean_equals[aux_range]*100, '*',label='mean')
plt.plot(std_equals[aux_range]*100, 'x', label='std')
plt.legend()
plt.xlabel('Amount of D2D Devices')
plt.ylabel('Average Equal Actions Ocurrency [%]')
plt.xticks(xi, aux_range)
plt.show()
|
[
"lucasbaiao@gmail.com"
] |
lucasbaiao@gmail.com
|
80c0bbc4b4f1a69547dbd865963783e862ce8f3d
|
09941ea4600314ed0381e123ffb9f851e34bade8
|
/HackerEarth/Basic Programming/split_houses.py
|
bd54aa3276fa411e001cffc2729abbb4624dca4e
|
[] |
no_license
|
piyushkumar102/Competitive-Programming
|
208d2f59ab097e68e627d8cdb74189c4efa618a8
|
31025da6a055036d66c289d4a6f64ab756fcf1c6
|
refs/heads/master
| 2023-03-24T23:43:28.198318
| 2021-03-25T15:39:26
| 2021-03-25T15:39:26
| 351,489,431
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
n = int(input())
grid = input()
grid = grid.replace('.', 'B')
if'HH' in grid:
print('NO')
else:
print('YES')
print(grid)
|
[
"piyush.kumarmaloo@gmail.com"
] |
piyush.kumarmaloo@gmail.com
|
36cfad3cd196894fc61a19c79f88a05598a87dd2
|
207d7f6d16a19bc78e27881841d7088b3eabc3c2
|
/day5/homework-ATM/modules/repay.py
|
ab498edd2effcf53edd276c143455289193ea672
|
[] |
no_license
|
pangguoping/python-study
|
b0c00f73177ec86148d06f780556a4340c45e1a8
|
769e828d41403b89d101c2ff915699bba91390cd
|
refs/heads/master
| 2021-01-20T19:33:52.601637
| 2017-03-07T23:34:04
| 2017-03-07T23:34:04
| 63,377,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 890
|
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Auther: pangguoping
import json
import os
from conf import setting
from modules.write_log import write_record
#还款函数
def repay(card_num,**userinfo):
qiankuan = userinfo['credit'] - userinfo['balance']
print('现在还欠:',qiankuan)
fee = int(input("请输入还款金额:"))
if fee <= qiankuan:
userinfo['balance'] += fee
balance = userinfo['balance']
#log(card_num,'信用卡还款',+fee,balance,**userinfo)
json.dump(userinfo,
open(os.path.join(setting.USER_DIR_FOLDER, card_num, "basic_info.json"), 'w', encoding='utf-8'))
write_record('%s - 信用卡账户还款:%f;本月额度:%f' % ("还款", fee, balance), card_num)
print('你成功还款%d,当前可用额度%s' %(fee,balance))
else:
print('输入还款金额错误')
|
[
"work2312@163.com"
] |
work2312@163.com
|
3ea0abb3bd098265da02a905ab08ccb1c2b9a663
|
aa8ca649dfe718398bc57ec00133e67df71ea407
|
/cbz_notes/wsgi.py
|
438f30169f7ec9e8bbed857bf5de8f73f7d5f0c9
|
[] |
no_license
|
habelash/cbz-notes
|
516a148f030b99a33445a06b1933b518a81d3667
|
d1f6ab75dde56f28fd421e4dcf77d69e7d491e54
|
refs/heads/master
| 2022-11-09T08:49:32.274230
| 2020-06-17T18:55:16
| 2020-06-17T18:55:16
| 271,054,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
WSGI config for cbz_notes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cbz_notes.settings')
application = get_wsgi_application()
|
[
"habelash@gmail.com"
] |
habelash@gmail.com
|
fbda68e6b6f7e3ff700522a92abb1b10d67623cd
|
3c7f640afd9dde53ea0616c7d8a03186a80401a9
|
/online_shop/main/migrations/0003_auto_20201129_2211.py
|
c30164ed8cf8d56a9c97a02fe815711a8bf4a521
|
[] |
no_license
|
meg97/django_projects
|
12caeb81dab0b7bd9c5ce5f6180fd89d7b83b6d8
|
f0a67588a3685ece28d6d1cdde63c590d08468aa
|
refs/heads/master
| 2023-01-21T04:43:15.188741
| 2020-12-06T21:00:36
| 2020-12-06T21:00:36
| 310,807,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
# Generated by Django 3.1.3 on 2020-11-29 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20201129_2158'),
]
operations = [
migrations.AlterField(
model_name='item',
name='item_image',
field=models.ImageField(default='models/no_image.jpg', upload_to='media'),
),
]
|
[
"maneyeganyan@yahoo.com"
] |
maneyeganyan@yahoo.com
|
ebb1c1e58b95234c46d4cd1a13b51cec064b0756
|
620a79597511cecd55cef6cdc9ac09062c1fe12b
|
/valid/valid/urls.py
|
85db8c10cd4cafc533bdf30a4ba59925b253b97f
|
[] |
no_license
|
ashfan6339/myfristproject
|
6e4a0d76a3232022e94c8706342f97e9ae6c65ff
|
535864d1179521144c2b518feddbbc9a43836f50
|
refs/heads/master
| 2021-04-20T22:29:03.952907
| 2020-05-21T10:51:57
| 2020-05-21T10:51:57
| 249,721,885
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 839
|
py
|
"""valid URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from registor import views as v
urlpatterns = [
path('admin/', admin.site.urls),
path("registor", v.registor, name="registor"),
]
|
[
"shaikashfan3@gmail.com"
] |
shaikashfan3@gmail.com
|
24f57a7e04375267f91a3c4ad24656b5ee311e60
|
ae0feb6bf0c3851ea1fcb57476a4012cdd09f0c9
|
/listMap.py
|
2fa46e48fd3ee50e5766208659d71aa385432771
|
[] |
no_license
|
beyondzhou/algorithms
|
f79e4832c53c528d972475aba5ab48bd33feefbb
|
a0f30d9a8efb51af7f58ad75b8a9f1ebb084d99f
|
refs/heads/master
| 2021-01-10T14:12:12.322067
| 2015-11-08T22:40:44
| 2015-11-08T22:40:44
| 44,276,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,372
|
py
|
class MyMap:
# init
def __init__(self):
self._entryList = list()
# length
def __len__(self):
return len(self._entryList)
# Contain
def __contains__(self, key):
ndx = self._findPosition(key)
return ndx is not None
# Add
def add(self, key, value):
ndx = self._findPosition(key)
if ndx is not None:
self._entryList[ndx].value = value
return False
else:
entry = _MapEntry(key, value)
self._entryList.append(entry)
return True
# Remove
def remove(self, key):
ndx = self._findPosition(key)
assert ndx is not None, "key is not in the map."
self._entryList.pop(ndx)
# valueOf
def valueOf(self, key):
ndx = self._findPosition(key)
assert ndx is not None, "key is not in the map."
return self._entryList[ndx].value
# find position
def _findPosition(self, key):
for i in range(len(self)):
if self._entryList[i].key == key:
return i
return None
# iter
def __iter__(self):
return _MapIterator(self._entryList)
# Map storage
class _MapEntry:
# init
def __init__(self, key, value):
self.key = key
self.value = value
# Map Iterator
class _MapIterator:
# init
def __init__(self, entryList):
self._entryList = entryList
self._ndx = 0
def __iter__(self):
return self
def next(self):
if self._ndx < len(self._entryList):
entry = self._entryList[self._ndx].key
self._ndx += 1
return entry
else:
raise StopIteration
# Test Map Function
def testMyMap():
# init
mapEntryList = MyMap()
# Add some key,value pair
mapEntryList.add('tim', 100)
mapEntryList.add('dog', 10)
mapEntryList.add('cat', 1)
# Print the length
print len(mapEntryList)
# Print all the item
for i in mapEntryList:
print i,
print ''
# In check
print 'dog' in mapEntryList
print 'cat1' in mapEntryList
# Remove
mapEntryList.remove('dog')
# Print all the item
for i in mapEntryList:
print i,
print ''
# Print the value
print mapEntryList.valueOf('cat')
if __name__ == "__main__":
testMyMap()
|
[
"guaguastd@gmail.com"
] |
guaguastd@gmail.com
|
e422554deab94dc0b4cbd8259b19d2efddecfe2b
|
bfc5eb03084f329755f40d54ebe2ce415c43ac88
|
/Tk多窗口/myWindow.py
|
d7c4bb7fa545cfa61432cffa1de259cfddb9d7c6
|
[] |
no_license
|
KronosOceanus/python
|
301d75ff161abd060faa552e48a08960eba22b51
|
ff6c378a2327dc0b3e6cba7ec0bca25cd52010cd
|
refs/heads/master
| 2023-03-04T02:48:46.286712
| 2021-02-15T11:10:43
| 2021-02-15T11:10:43
| 255,289,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 766
|
py
|
# 窗口类
import tkinter as tk
from tkinter import messagebox
class myWindow:
def __init__(self,root,myTitle,flag):
# 创建窗口
self.top=tk.Toplevel(root,width=300,height=200)
# 设置窗口标题
self.top.title(myTitle)
# 顶端显示
self.top.attributes('-topmost',1)
# 根据不同情况放置不同组件
if flag==1:
label=tk.Label(self.top,text=myTitle)
label.place(x=50,y=50)
elif flag==2:
def buttonOK():
tk.messagebox.showinfo(title='Pthon',
message='shit')
button=tk.Button(self.top,text=myTitle,command=buttonOK)
button.place(x=50,y=50)
|
[
"704690152@qq.com"
] |
704690152@qq.com
|
430a6533bd2d8961d48c25a18e940c170945ce5d
|
5284385c49a2601655f08e7110f122f93c00c99b
|
/article/migrations/0009_alter_post_image.py
|
268745fc6dc035b842c7a6336a2ab3d31b8774c4
|
[] |
no_license
|
riadelimemmedov/Sade-Sosial-media
|
50964d3efc7e82b061882af1428a2a69930826a0
|
a379c0a753c95d480e00ee0f6124f9f14547706b
|
refs/heads/master
| 2023-08-14T02:26:50.828195
| 2021-09-21T04:17:01
| 2021-09-21T04:17:01
| 408,682,275
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
# Generated by Django 3.2.4 on 2021-09-02 14:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('article', '0008_alter_post_image'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='users'),
),
]
|
[
"riad.elimemmedov@mail.ru"
] |
riad.elimemmedov@mail.ru
|
30297a7a22cfaf814fcfd2898ec4e7c856b4fb52
|
90ae2d02ff9a4dc01fd50f5c5db64acab27529ea
|
/research.py
|
a2ada317de2e86aeec5aa8f66a7e4af06e829736
|
[] |
no_license
|
hearable-labs/v2prototype
|
0fd28dafc92de036a7747c38ffbcca26a5ee1fb2
|
3b91b27397222a2b7695b3adaf591199de5d5391
|
refs/heads/master
| 2020-04-21T01:38:42.736345
| 2019-05-27T07:51:52
| 2019-05-27T07:51:52
| 169,229,798
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,895
|
py
|
import fileinput
counter = 0
number_from = 0
def setVariableNull():
global counter
global number_from
counter = 0
number_from = 0
#replace the filepath by the corresponding path to the gain_live.cfg
def replace_gain(number_to):
global counter
global number_from
if counter == 0:
variable_from = "mha.gain.gains = [ " + str(0) + " " + str(0) + " ]"
variable_to = "mha.gain.gains = [ " + str(number_to) + " " + str(number_to) + " ]"
with fileinput.FileInput('gain_live.cfg',
inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(variable_from,
variable_to), end='')
number_from = number_to
#print("variable_to", variable_to)
else :
variable_from = "mha.gain.gains = [ " + str(number_from) + " " + str(number_from) + " ]"
variable_to = "mha.gain.gains = [ " + str(number_to) + " " + str(number_to) + " ]"
with fileinput.FileInput('gain_live.cfg',
inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(variable_from,
variable_to), end='')
#if number_from < 0:
number_from = number_to
#print("variable_to", variable_to)
counter = counter + 1
print("counter = ", counter)
"""
def replace_gain(number_to):
global counter
global number_from
if counter == 0:
variable_from = "mha.transducers.mhachain.altplugs.dynamiccompression.fftlen = " + str(0)
variable_to = "mha.transducers.mhachain.altplugs.dynamiccompression.fftlen = " + str(number_to)
with fileinput.FileInput('C:/Octave/Octave-4.4.1/openMHA-master/mha/examples/000-start/openMHA_test.cfg',
inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(variable_from,
variable_to), end='')
number_from = number_to
else :
variable_from = "mha.transducers.mhachain.altplugs.dynamiccompression.fftlen = " + str(number_from)
variable_to = "mha.transducers.mhachain.altplugs.dynamiccompression.fftlen = " + str(number_to)
with fileinput.FileInput('C:/Octave/Octave-4.4.1/openMHA-master/mha/examples/000-start/openMHA_test.cfg',
inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(variable_from,
variable_to), end='')
#if number_from < 0:
number_from = number_to
counter = counter + 1
print("counter = ", counter)
"""
#replace_gain(2056)
|
[
"noreply@github.com"
] |
noreply@github.com
|
61105c7f5cc1fb8567c89d5cb17133852f50a7de
|
cbd33720c80ee2f7f0c846f966d00890a511728d
|
/User/migrations/0003_auto_20180518_1144.py
|
a9001f9602354a498d05276295e713df62cc1062
|
[] |
no_license
|
KyalSmith/Django_Auth_Project
|
a21f09965a47298b09e7f34c33b2c98f1a32114f
|
cdc209698c1260fba412c17d063dd9413352701b
|
refs/heads/master
| 2020-03-12T17:34:19.926894
| 2018-05-22T18:17:52
| 2018-05-22T18:17:52
| 130,739,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
# Generated by Django 2.0.5 on 2018-05-18 11:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('User', '0002_auto_20180517_1659'),
]
operations = [
migrations.RemoveField(
model_name='userprofileinfo',
name='id',
),
migrations.AlterField(
model_name='userprofileinfo',
name='username',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
|
[
"kyal.smith@gmail.com"
] |
kyal.smith@gmail.com
|
3a7cd410882b4da4bea9fbdc498e9b1fb105d1e0
|
6548ed03a7c8f3110aabda75f0c725cecb0a03da
|
/Ex087-Matriz2.py
|
3825344333684fe3f9daf2f47a35dda5874aa12d
|
[] |
no_license
|
MurilloFagundesAS/Exercicios-Resolvidos-Curso-em-Video
|
ac328a7cdca4ff262ddc6c29342816afd93478a2
|
d8ab260a8de62a975f9113877437cab785fa23db
|
refs/heads/master
| 2023-01-21T11:14:21.597135
| 2020-12-04T19:51:25
| 2020-12-04T19:51:25
| 318,572,956
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
lista = []
parte = []
par = 0
coluna3 = 0
count = 0
maior = 0
for i in range(0,3):
for j in range(0,3):
x = int(input(f'Digite um número da posição [{i},{j}]: '))
if x % 2 ==0:
par += x
if j == 2:
coluna3 += x
parte.append(x)
if count == 0:
maior = x
count += 1
if x > maior and i == 1:
maior = x
lista.append(parte[:])
parte.clear()
for i in range(0,3):
for j in range(0,3):
print(f'[{lista[i][j]}] ', end='')
print()
print(f'A soma dos valores pares é {par}')
print(f'A soma da 3ª coluna é {coluna3}')
print(f'E o maior número da segunda linha é {maior}')
|
[
"mll-fag@hotmail.com"
] |
mll-fag@hotmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.