blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
54711c7dd2324bbcab304447d698604a428107b0 | 9bbc2f3f4f69296ee9aef2970a0d7d7c4a2ab2ee | /Video Capture/video_stream_opencv-master/cfg/VideoStream.cfg | 272c16323b99f5d4e09e299c2b1bcd773ad9af6f | [] | no_license | rubis-lab/SML | 6b9702f6f13dbc779bfab34640052fdfc63127f0 | b3bbe6f9ff6967890dfce47ce9496f267ed909bc | refs/heads/master | 2020-04-26T22:59:44.709888 | 2019-09-02T17:14:19 | 2019-09-02T17:14:19 | 173,889,450 | 3 | 2 | null | 2019-03-07T12:09:12 | 2019-03-05T06:37:05 | C++ | UTF-8 | Python | false | false | 1,519 | cfg | #!/usr/bin/env python
from dynamic_reconfigure.parameter_generator_catkin import *
PKG = "video_stream_opencv"
gen = ParameterGenerator()
# name type level description default min max
gen.add("camera_name", str_t, 0, "Camera name", "camera")
gen.add("set_camera_fps", double_t, 0, "Image Publish Rate", 30.0, 0.0, 1000.0)
gen.add("buffer_queue_size", int_t, 0, "Buffer size for capturing frames", 100, 1, 1000)
gen.add("fps", double_t, 0, "Image Publish Rate", 240.0, 0.0, 1000.0)
gen.add("frame_id", str_t, 0, "Camera FrameID", "camera")
gen.add("camera_info_url", str_t, 0, "Camera info URL", "")
gen.add("flip_horizontal", bool_t, 0, "Flip image horizontally", False)
gen.add("flip_vertical", bool_t, 0, "Flip image vertically", False)
gen.add("width", int_t, 0, "Target width", 0, 0, 10000)
gen.add("height", int_t, 0, "Target height", 0, 0, 10000)
gen.add("brightness", double_t, 0, "Target brightness", 0.5019607843137255, 0.0, 1.0)
gen.add("contrast", double_t, 0, "Target contrast", 0.12549019607843137, 0.0, 1.0)
gen.add("hue", double_t, 0, "Target hue", 0.5, 0.0, 1.0)
gen.add("saturation", double_t, 0, "Target saturation", 0.64, 0.0, 1.0)
gen.add("auto_exposure", bool_t, 0, "Target auto exposure", True)
gen.add("exposure", double_t, 0, "Target exposure", 0.5, 0.0, 1.0)
gen.add("loop_videofile", bool_t, 0, "Loop videofile", False)
gen.add("reopen_on_read_failure", bool_t, 0, "Re-open camera device on read failure", False)
exit(gen.generate(PKG, PKG, "VideoStream"))
| [
"dohkim119@github.com"
] | dohkim119@github.com |
1c6e1b9c12df767cb7a6b9f2532ec92383cd2c87 | 739324fe968beecf2814792c0a85f6690e56a26a | /Codes/Dalily_Flash/Week_2/23Jan/Python/Program1.py | 3babf00ed107d5e5dfc944e100facdbb79e59e38 | [] | no_license | Kunal17sarpatil/kunal_personal | 76e5cd1002c23dc30c1d7d86d495b72992a4c24d | b1c4f8de8b73e34253743deb3c26e00b2b02ef76 | refs/heads/master | 2023-03-22T03:47:46.132195 | 2020-06-17T10:53:19 | 2020-06-17T10:53:19 | 151,773,565 | 0 | 0 | null | 2021-03-20T04:23:24 | 2018-10-05T20:18:31 | Java | UTF-8 | Python | false | false | 91 | py | for row in range(1,5):
for no in range(0,row):
print(row,end=" ")
print();
| [
"hawkeye@pop-os.localdomain"
] | hawkeye@pop-os.localdomain |
e781d00d0e62eba41dfcc5c1dbf741b1d1c2e5d3 | 3b40c1bc2a0616ae2ad1ab94f4b8c68ac43c5cb3 | /AmazonSpider/AmazonSpider/spiders/Amazon_spider.py | bc88f030efa0c3cacf552051d866105abf926021 | [] | no_license | clamli/Amazon-Dataset-API | f0341b54bb0bcbd1ee4d42b94485ff18e7d13a55 | ae68be8a5f311d0f6a11a3330b68b97875a46df1 | refs/heads/master | 2020-03-17T01:39:22.903599 | 2018-05-16T13:33:58 | 2018-05-16T13:33:58 | 133,161,573 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | import scrapy
class AmazonSpider(scrapy.Spider):
name = "Amazon"
def start_requests(self):
urls = [
"http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/"
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
for dataset, url in zip(response.css("table td a::text").extract(), response.css("table td a::attr(href)").extract()):
yield {
'dataset': dataset,
'url': url,
}
| [
"boyizjuer2017@gmail.com"
] | boyizjuer2017@gmail.com |
fbfb7242f59b6de918bafe71b564c981ed3c02db | decc60052f0d9e8c84bfae731a3a1e4f86da7659 | /classes/Instrument.py | 48c5ea07e8f97388fcef35fbd833b6e95caa0582 | [] | no_license | dxcv/research | 8370c2baac66b098f61424d6e323233f358239e2 | 153d7633f526a5eefd02c650b1c6264e1f93e73b | refs/heads/master | 2020-09-08T16:02:15.130239 | 2019-11-08T15:50:05 | 2019-11-08T15:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | import pandas as pd
class Instrument(object):
""" Individual Instrument """
def __init__(self, symbol, df_data, target_freq):
self.symbol = symbol
self.raw_data = df_data
self.date_index = df_data.index
self.frequency = target_freq
for key in df_data:
# todo: only set attribute for certain data (e.g. Close, Open, High,...)
setattr(self, key.replace(' ', ''), self.raw_data[key])
def get_df_view(self, attributes_list):
df = pd.DataFrame()
for attribute in attributes_list:
series = getattr(self, attribute)
series.name = attribute
df = pd.concat([df, series], axis=1)
return df
| [
"28ideas@gmail.com"
] | 28ideas@gmail.com |
9107cc0a1e028e20c237a6840a6fc50e903497c8 | ca18cd3b72d75e3877bcb8ba1fdccd95b188dc46 | /python_set.py | a03e891b245b87e37a4529f809fedac57138f47a | [] | no_license | ramshree123/ramya | e64490ea6828db1ee952bdbbd35034d0ed487885 | 88df88c198d29dcf50ac699962a21d20fc213b30 | refs/heads/master | 2021-05-16T14:09:42.601497 | 2018-03-19T10:01:17 | 2018-03-19T10:01:17 | 118,075,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | set1={1,2,3,4}
set2={3,4,6,8}
print("set1&set2")
| [
"noreply@github.com"
] | noreply@github.com |
2e73afe47864bd9ebaf4f8f37c8ee656dbe4e152 | 17713d586f680821759b0bba8e25046c2fa6a90b | /build/turtlebot3/turtlebot3_description/catkin_generated/pkg.develspace.context.pc.py | 949b141090dd163690ebe5bdd2ab60d94da91ef9 | [] | no_license | Saipriyavk/Delivery-Robot | fb899a8330815b02de118275dc6d7a1fad70bc8e | 9255791e683b32573227a1877b5fb92f000dfca2 | refs/heads/master | 2023-06-14T16:32:25.444170 | 2021-07-19T21:43:13 | 2021-07-19T21:43:13 | 387,573,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "urdf;xacro".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_description"
PROJECT_SPACE_DIR = "/home/saipriya/projects_ai_cse/devel"
PROJECT_VERSION = "1.2.1"
| [
"saipriya.vk31@gmail.com"
] | saipriya.vk31@gmail.com |
ef10e6dd4322d781b7c31c1a350b7397e6a752ac | 4ee1d690aee51b13091cb2397bcad8254da446f1 | /word_select.py | a23ad1608880d4e6f343eecaa86f7e0cae210973 | [] | no_license | xyl576807077/BackgroundRemove | 4a80752e09d6e3791f22e726cd4eef0154ec1216 | c6df04e764b3fd172caf89a90e53e1da62c077a7 | refs/heads/master | 2020-03-09T23:06:35.023744 | 2018-05-16T02:52:52 | 2018-05-16T02:52:52 | 129,051,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,116 | py | import copy
import json
import os
import sys
import numpy as np
from char_process import *
from langconv import Converter
from select_rule import *
from util import *
class WordSelect:
def __init__(self, label_path, frequency_path):
with open(label_path, 'r') as f:
_ = json.load(f)
labels = list(_.keys())
labels.remove('$$')
self.language_ratio = {}
for label in labels:
index = hard_encode(label)
self.language_ratio[index] = self.language_ratio.get(index, 0) + 1
self.freq = [{}, {}, {}, {}]
with open(frequency_path, 'r') as f:
tmp_freq = json.load(f)
tmp = classify(labels)
for i in range(len(tmp)):
words = tmp[i]
for w in words:
self.freq[i][w] = tmp_freq[w]
self.language_ratio = {0:1028916, 1:0, 2:0, 3:0}
self.len_ratio = {1: 22963, 2: 21997, 3: 73336, 4: 25366,
5: 12318, 6: 9778, 7: 7699, 8: 6191,
9: 4090, 10: 3356, 11: 2763, 12: 1841,
13: 1429, 14: 1052, 15: 900, 16: 700,
17: 593, 18: 454, 19: 405, 20: 295,
21: 2492}
def get_language(self):
language = random_interval_select(self.language_ratio)
# self.language_ratio[language] -= 1
return language
def update_freq(self, select_words):
for word in select_words:
code = hard_encode(word)
if self.freq[code][word] == 1:
self.language_ratio[code] -= 1
self.freq[code][word] = max(self.freq[code][word] - 1, 0)
def get_canditate(self):
res = [[], [], [], []]
for i in range(4):
words = self.freq[i]
for key, value in words.items():
if value != 0:
res[i].append(key)
return res
def get_seq_len(self):
length = random_interval_select(self.len_ratio)
return length
def get_word_language(self, flag=None):
# 检查能不能组成混合的
chinese = cal_dict_sum(self.freq[0]) + cal_dict_sum(self.freq[1])
char = cal_dict_sum(self.freq[2])
symbol = cal_dict_sum(self.freq[3])
# if flag == None:
# language = self.get_language()
# else:
# if flag <= 1028916:
# language = 0
# elif flag <= 1028916 + 409527:
# language = 2
# else:
# language = 3
# assert language != 1
language = 0
length = self.get_seq_len()
# print(language, length)
while language == 3 and length == 1:
# print('可能死循环')
length = self.get_seq_len()
res = {}
if language == 0:
simple = cal_dict_sum(self.freq[0])
tradition = cal_dict_sum(self.freq[1])
flag = -1
if simple > length:
flag = 0
elif tradition > length:
flag = 1
else:
if simple > tradition:
length = simple
flag = 0
else:
length = tradition
flag = 1
assert length != 0
for i in range(length):
res[flag] = res.get(flag, 0) + 1
print('***\n')
elif language == 2:
symbol_num = cal_dict_sum(self.freq[3])
length = length if length < symbol_num else symbol_num
for i in range(length):
res[3] = res.get(3, 0) + 1
elif language == 3:
combination = ['01', '02', '12', '012']
if chinese == 0:
combination = ['12']
if char == 0:
combination = ['02']
if symbol == 0:
combination = ['01']
print(chinese, char, symbol,combination)
assert chinese + char + symbol != 0
index = int(np.random.uniform(0, len(combination)))
select = combination[index]
for num in select:
if int(num) != 0:
res[int(num) + 1] = 1
else:
if cal_dict_sum(self.freq[0]) > 0 and cal_dict_sum(self.freq[1]) > 0:
prob = np.random.uniform(0, 1)
if prob > 0.5:
res[0] = 1
else:
res[1] = 1
elif cal_dict_sum(self.freq[0]) > 0:
res[0] = 1
else:
res[1] = 1
if length < len(select):
length = len(select)
else:
difference = length - len(select)
canditate = self.get_canditate()
tmp = []
for l in canditate:
tmp.extend(l)
canditate = tmp
cp = CharProcess()
for i in range(difference):
index = int(np.random.uniform(0, len(canditate)))
code = hard_encode(canditate[index])
res[code] = res.get(code, 0) + 1
canditate.pop(index)
return res
def get_words(self, func=[], flag=None):
lang_word_dic = self.get_word_language(flag)
canditate = self.get_canditate()
for f in func:
canditate = f(lang_word_dic, canditate)
flag = 0
for i in range(4):
flag += len(canditate[i])
if flag == 0:
return None
# if len(canditate[3]) == 0:
# raise NameError("used all symbols")
words = ''
# print(lang_word_dic)
for key, value in lang_word_dic.items():
for j in range(value):
w = random_interval_select(self.freq[key])
self.update_freq([w])
words += w
tmp_dict = {}
tmp_dict.update(self.freq[0])
tmp_dict.update(self.freq[1])
tmp_dict.update(self.freq[2])
tmp_dict.update(self.freq[3])
with open('./tmp-1.json', 'w') as f:
json.dump(tmp_dict, f, ensure_ascii=False)
return words
# wordselect = WordSelect('./data/all_chars_dict.json', './data/5_16_big.json')
# i = 1
# while(1):
# words = wordselect.get_words(func=[only_full, only_half])
# i += 1
# if words == None:
# break
# print(i)
# with open('./5_16_big.txt', 'a') as f:
# f.write(words + '\n')
# with open('./log.txt', 'a') as f:
# tmp = ''
# for key, value in wordselect.language_ratio.items():
# tmp = tmp + str(key) + ':' + str(value) + '\t'
# tmp += '\n'
# f.write(tmp)
| [
"xyl576807077@gmail.com"
] | xyl576807077@gmail.com |
dd4c5f0cf3d049124539bf2e96145945474a60c3 | 389569a591284a2adcdc38046114e7b1038afd94 | /python-script/trax/main.py | 9c8e8741e2a3e128b672f4596ae761a0f61aea50 | [] | no_license | xytysingle/AnnotationTool | b797daf2fd472f602341b16f24fb1ed9b702aef1 | a217d4376ceee739e0d8c43515c403133982e86e | refs/heads/master | 2020-04-11T18:16:10.438919 | 2019-07-31T10:21:18 | 2019-07-31T10:21:18 | 161,992,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,827 | py | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.command import Command
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
from selenium.webdriver.common.action_chains import ActionChains
import time
import requests
import json
import os
import math
from urllib import request
def login():
global lastPageScenceId
getLogin_url = 'https://services.trax-cloud.cn'
username = wait.until(EC.presence_of_element_located((By.NAME, "username")))
# username = browser.find_element_by_name("username")
# submit_next = browser.find_element_by_name("login")
submit_next = wait.until(EC.presence_of_element_located((By.NAME, "login")))
username.clear()
username.send_keys("chenqinghai@swirebev.com")
time.sleep(1)
submit_next.click()
# password_input = browser.find_element_by_name("password")
# submit_login = browser.find_element_by_name("login")
password_input = wait.until(EC.presence_of_element_located((By.NAME, "password")))
submit_login = wait.until(EC.presence_of_element_located((By.NAME, "login")))
password_input.clear()
password_input.send_keys("Trax12345")
time.sleep(1)
submit_login.click()
Explorer = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a")))
Explorer.click()
# Explorer = browser.find_element_by_xpath("/html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a").click()
# /html/body/ui-view/div/ui-view/div/div/div[1]/div[2]/a
Scenes = browser.find_element_by_xpath("/html/body/ui-view/div/ui-view/ui-view/div/div[2]/div[2]").click()
DateRange = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div"))).click()
# https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/?limit=200&from=2019-02-01&to=2019-02-02&direction=first
FromDate = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[1]/input[1]")))
ToDate = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[1]/input[2]")))
# '12 Mar, 2019' '14 Mar, 2019' Mar Feb Jan
FromDate.clear()
FromDate.send_keys("13 Mar, 2019")
ToDate.clear()
ToDate.send_keys("13 Mar, 2019")
time.sleep(1)
Apply_btn = wait.until(EC.presence_of_element_located((By.XPATH, "/html/body/ui-view/div/ui-view/ui-view/ui-view/div/div[1]/div/ui-view/div/div/trax-date-picker/div/div[2]/div[6]/button[2]")))
Apply_btn.click()
#
# page = browser.page_source
# 进入场景
time.sleep(5)
# getFirstScencesList()
getNextScencesList(lastPageScenceId)
def saveCookies():
cookies = browser.get_cookies()
jsonCookies = json.dumps(cookies)
with open('cookies.json', 'w') as f:
f.write(jsonCookies)
print(cookies)
# 获取cookies
def getCookies():
with open('cookies.json', 'r', encoding='utf-8') as f:
listCookies = json.loads(f.read())
cookie = [item["name"] + "=" + item["value"] for item in listCookies]
cookiestr = '; '.join(item for item in cookie)
return cookiestr
# 获取localStorage
def getLocalStorage(key):
# getItem = 'localStorage.getItem("temp2")'
print(key)
res = browser.execute_script("return localStorage.getItem({})".format(key))
return res
def getLabelResults(index):
print('发起请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/scene/' + str(index)
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
try:
rec_response = requests.get(base_url, headers=headers).text
rec_response = json.loads(rec_response)
scence_path = date_path + "/{}".format(str(index))
mkdir(scence_path)
# saveResults(scence_path + "/{}".format(str(index)), rec_response)
saveResultsByJson(scence_path + "/{}".format(str(index)), rec_response)
imagesList = rec_response["probeImages"]
for img in imagesList:
img_url = 'https://services.traxretail.com/images/traxus' + img["probe_image_path"].partition('http://traxus.s3.amazonaws.com')[2] + '/original'
img_name = img["probe_image_path"].split('/')[-1]
try:
saveimage(img_url, scence_path + "/{}.jpeg".format(img_name))
except Exception as e:
print("图片保存失败:", e)
print('爬取成功...')
except:
print("爬取失败")
time.sleep(2)
# print(rec_response)
def goToNextPage():
# span.xp-navigate-description.trax-tst-pagination-paging-summary
page_location = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span.xp-navigate-description.trax-tst-pagination-paging-summary')))
print('page_location:', page_location.text)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'span[title="next"]'))).click()
# 进入场景
time.sleep(5)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'a[href^="trax-one/swirecn/explore/scene/"]'))).click()
def getNextSence():
scence_location = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-center > ui-view > div > siblings-navigator > span > span > span.items-list.trax-tst-viewer-serializationText')))
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-center > ui-view > div > siblings-navigator > span > span > span.trax-icons.trax-icons-page-back.rotated-to-down-arrow.trax-tst-viewer-next'))).click()
scence_index = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'body > ui-view > div > ui-view > ui-view > div > div.is-subheader.is-viewer-subheader.sp-flex-shrink > span.is-subheader-left > ui-view > div > span > span:nth-child(4)')))
print('scence_location:', scence_location.text, 'scence_index:', scence_index.text)
def getFirstScencesList():
global pageNumber
global totalPages
print('发起场景列表请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/'
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
request_data = {
"limit": 200,
"from": from_date,
"to": to_date,
"direction": 'first',
# "last_known_primary_key": last_known_primary_key
}
scencesList_res = requests.get(url=base_url, headers=headers, params=request_data).text
scencesList_res = json.loads(scencesList_res)
saveResultsByJson(date_path +'/' + date + '_' + str(pageNumber + 1), scencesList_res)
print(scencesList_res)
totalItemsCount = scencesList_res["totalItems"]["total_items"]
items = scencesList_res["items"]
print("totalItemsCount:",totalItemsCount, "items:", items)
pageNumber += 1
totalPages = math.ceil(int(totalItemsCount) / 200)
for i in range(0, 200):
index = items[i]["scene_id"]
print("正在爬取第{}页的第{}条,共{}页,共{}条".format(pageNumber, i+1, totalPages, totalItemsCount))
try:
getLabelResults(index)
if i == 199 and pageNumber <= totalPages:
getNextScencesList(index)
except Exception as e:
print('获取下个场景失败', e)
def getNextScencesList(last_known_primary_key):
global pageNumber
global totalPages
print('发起场景列表请求...')
base_url = 'https://services.trax-cloud.cn/trax-one/api/projects/swirecn/explore/scenes/all/'
headers = {
"authentication_token": getLocalStorage("'authentication_token'"),
"authorization_token": getLocalStorage("'authorization_token'"),
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
"refresh_token": getLocalStorage("'refresh_token'"),
"cookie": getCookies()
}
request_data = {
"limit": 200,
"from": from_date,
"to": to_date,
"direction": 'next',
"last_known_primary_key": last_known_primary_key
}
scencesList_res = requests.get(url=base_url, headers=headers, params=request_data).text
scencesList_res = json.loads(scencesList_res)
# print(scencesList_res)
# saveResultsByJson(str(2019), scencesList_res)
saveResultsByJson(date_path + '/' + date + '_' + str(pageNumber + 1), scencesList_res)
print(scencesList_res)
totalItemsCount = scencesList_res["totalItems"]["total_items"]
items = scencesList_res["items"]
print("totalItemsCount:", totalItemsCount, "items:", items)
pageNumber += 1
totalPages = math.ceil(int(totalItemsCount) / 200)
for i in range(0, 200):
index = items[i]["scene_id"]
print("正在爬取第{}页的第{}条,共{}页,共{}条".format(pageNumber, i + 1, totalPages, totalItemsCount))
try:
getLabelResults(index)
if i == 199 and pageNumber <= totalPages:
getNextScencesList(index)
except Exception as e:
print('获取下个场景失败', e)
def saveimage(imgUrl, imgPath):
request.urlretrieve(imgUrl, imgPath)
def saveResults(filename, data):
with open("{}.json".format(filename), "w", encoding='utf-8') as f:
f.write(data)
def saveResultsByJson(filename, data):
with open("{}.json".format(filename), 'w', encoding='utf-8') as json_file:
json.dump(data, json_file, ensure_ascii=False)
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
print("{}创建成功".format(path))
return True
else:
print("{}已存在".format(path))
return False
if __name__ == "__main__":
from_date = '2019-03-13'
to_date = '2019-03-13'
date = from_date.replace('-', '')
date_path = "./scence/{}".format(date)
lastPageScenceId = 9237427
pageNumber = 5
totalPages = 0
mkdir(date_path)
# chromeOptions = webdriver.ChromeOptions()
# chromeOptions.add_argument('--proxy-server=https://210.16.189.230:16816')
# browser = webdriver.Chrome(chrome_options=chromeOptions)
browser = webdriver.Chrome()
wait = WebDriverWait(browser, 10)
login()
| [
"2463072824@qq.com"
] | 2463072824@qq.com |
3835b22d8900c6b757de48417b42a1a6aa1eda45 | ceabe6221dd70ef6b8e25c14dce2943e8732c453 | /keras06_RMSE.py | c2b71a160e31dcdd92def3c84c4dcc7728728c1d | [] | no_license | seheonpark/Keras | 6a68753ba869becb43f3d46146c73701bcccb676 | cbad985b0e80068e0868228998f888c45c84c81a | refs/heads/master | 2020-09-23T01:16:32.819268 | 2019-12-04T11:54:35 | 2019-12-04T11:54:35 | 225,343,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y_train = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x_test = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
y_test = np.array([11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
# x_predict = np.array([21, 22, 23, 24, 25])
model = Sequential()
# model.add(Dense(501, input_dim=1, activation='relu'))
model.add(Dense(500, input_shape=(1, ), activation='relu'))
model.add(Dense(497))
model.add(Dense(495))
model.add(Dense(493))
model.add(Dense(491))
model.add(Dense(1))
model.summary()
model.compile(loss='mse', optimizer='adam', metrics=['mse']) # metrics=['accuracy']
# acc : 1.0 loss : 1.6951153725131007e-07
# acc : 1.0916937576155306e-08 loss : 1.0916937576155306e-08
model.fit(x_train, y_train, epochs=700)
loss, mse = model.evaluate(x_test, y_test)
print("mse : ", mse)
print("loss : ", loss)
y_predict = model.predict(x_test)
print(y_predict)
# RMSE 구하기
from sklearn.metrics import mean_squared_error
def RMSE(y_test, y_predict):
return np.sqrt(mean_squared_error(y_test, y_predict))
print("RMSE : ", RMSE(y_test, y_predict)) | [
"noreply@github.com"
] | noreply@github.com |
13c272c3444b1b202eff2b15a9ed3f9dac8dbc7b | 6f293744f62c2e1f33250ade7cebfabc8bc24eda | /sentry_a/views.py | ec3227d201756bc62aa9a4d53a895d705ff13d47 | [] | no_license | 40huo/sentry_test | 1026552f8240404a0b1d7d0178c86a35a7e63b4b | cd57a666a41ad67a4b0f39eddd794ca5acdb4e06 | refs/heads/master | 2020-12-07T10:01:27.203541 | 2020-01-09T01:56:06 | 2020-01-09T01:56:06 | 232,698,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from django.shortcuts import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("hello")
| [
"git@40huo.cn"
] | git@40huo.cn |
8e6782093b61600cbe22a17ae9e9302e33c03002 | 22e88f41deec7cbfda7d6094d514e5941adc218e | /Bag3D_package.V5/delete_dtseq.py | f8471fcac22e82060bad0320ed026852162b42c5 | [] | no_license | SPURc-Lab/NGS-D9 | 6a2df9c96b4a029dd3672bd3189163e7baa3d1dc | 3e0da35912edb5ecf3b8c6b1b7acf1bc7a5853b4 | refs/heads/master | 2021-01-01T15:55:31.348720 | 2015-06-03T08:15:14 | 2015-06-03T08:15:14 | 34,987,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,309 | py | #!/usr/bin/python
# Usage: delete_dtseq.py <Original FASTA format file> <redundant contig name file>
# <Original FASTA format file> e.g. Dt_all_seq.fa
# <redundant contig name file> is the redundant contig name file generated from Program 3: get_gene_model.py, e.g. deleted_dtnames.txt
# Python program to filter the multiple FASTA format sequence file with the redundant contigs
import sys
def delete_dtseq():
inFile = open(sys.argv[1], 'r')
dtFile = open(sys.argv[2], 'r')
outFile = open("Dt_non-redundant_seq.fa", 'w')
dt_names = []
for line in dtFile.readlines():
dt_names.append([float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]), line.strip().split('Confidence_')[1]])
dt_names.sort(key=lambda x: x[0])
#for item in dt_names:
# print item
#sys.exit(0)
cur_dt_index = 0
deleting = False
breaking = False
for line in inFile:
if '>' in line and breaking == False:
while cur_dt_index < len(dt_names) and dt_names[cur_dt_index][0] < float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]):
cur_dt_index = cur_dt_index + 1
if cur_dt_index >= len(dt_names):
outFile.write(line)
breaking = True
continue
elif dt_names[cur_dt_index][0] == float(line.strip().split('_Transcript')[0].split('Locus_')[1]) + \
float(line.strip().split('_')[3].split('/')[0]) / float(line.strip().split('_')[3].split('/')[1]):# and dt_names[cur_dt_index][1] in line:
deleting = True
#print dt_names[cur_dt_index][0]
#cur_dt_index = cur_dt_index + 1
else:
deleting = False
outFile.write(line)
elif breaking == True or deleting == False:
outFile.write(line)
inFile.close()
outFile.close()
if __name__=='__main__':
if len(sys.argv) < 2:
print 'Invalid arguments!\n'
sys.exit(0)
delete_dtseq()
print 'I am DONE. Please check the output. :)'
| [
"yaolina@215-244.priv27.nus.edu.sg"
] | yaolina@215-244.priv27.nus.edu.sg |
60a9319cb5e51a72ea6172acb56753d27d908782 | 9aa52f7e5902ea8f4a2810809218d9631446345d | /backend/course/api/v1/serializers.py | 94b376e43c63bba2216fc46a5939adf50d3f51d9 | [] | no_license | crowdbotics-apps/merchandising-plays-21542 | e662e42b8766a2fc24d6e0ab926580de0b580461 | c0298b28a45a617b88984d074af4a69f4ea00700 | refs/heads/master | 2022-12-29T10:31:41.304017 | 2020-10-15T18:39:00 | 2020-10-15T18:39:00 | 304,412,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from rest_framework import serializers
from course.models import (
Recording,
Event,
Subscription,
Course,
Group,
Module,
PaymentMethod,
SubscriptionType,
Enrollment,
Lesson,
Category,
)
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = "__all__"
class SubscriptionTypeSerializer(serializers.ModelSerializer):
class Meta:
model = SubscriptionType
fields = "__all__"
class RecordingSerializer(serializers.ModelSerializer):
class Meta:
model = Recording
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = "__all__"
class EventSerializer(serializers.ModelSerializer):
class Meta:
model = Event
fields = "__all__"
class CourseSerializer(serializers.ModelSerializer):
class Meta:
model = Course
fields = "__all__"
class ModuleSerializer(serializers.ModelSerializer):
class Meta:
model = Module
fields = "__all__"
class LessonSerializer(serializers.ModelSerializer):
class Meta:
model = Lesson
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
class EnrollmentSerializer(serializers.ModelSerializer):
class Meta:
model = Enrollment
fields = "__all__"
class SubscriptionSerializer(serializers.ModelSerializer):
class Meta:
model = Subscription
fields = "__all__"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
964ebf4b9298853c334657ff5e86b55de08c732c | 33cc5bd288a4f98be57df6ac880d49ab9937f9e7 | /examples.py | 0dcb1b9bbc3670a7a148b9f28110a7b9bfbd95ee | [] | no_license | stefantalpalaru/morelia-pcre | d848d69aab718a27c72ddca6ee5c2a803fd270bc | 814b0b664d3a449889698775e5f1e40217ba77c1 | refs/heads/master | 2021-01-01T17:47:21.614705 | 2015-07-05T17:58:02 | 2015-07-05T17:58:02 | 4,858,206 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!/usr/bin/env python
import pcre
from pprint import pprint
pattern = r'(?<sna>fo{2})b'
subject = 'foobar FoObaz'
options = pcre.PCRE_CASELESS
compiled = pcre.pcre_compile(pattern, options)
extra = pcre.pcre_study(compiled)
result = pcre.pcre_exec(compiled, subject, extra=extra)
# find the first match
print('%d matches:' % result.num_matches)
for i in xrange(result.num_matches):
print(' "%s"' % repr(result.matches[i]))
print('named substrings:')
pprint(result.named_matches)
# find all the matches
results = pcre.pcre_find_all(compiled, subject, extra=extra)
print '*** find all ***'
for result in results:
for i in xrange(result.num_matches):
print(' "%s"' % repr(result.matches[i]))
| [
"stefantalpalaru@yahoo.com"
] | stefantalpalaru@yahoo.com |
0925941fe477c086703afaa2c02bab0a1f36fd82 | 50c1f7e4a3084ecd0ef72c9b20f8ea218cebe14c | /movie/urls.py | 85d544f939f483dd2237188e0c01883acb0a8856 | [] | no_license | rahulshivan05/Coder | 922240a494207d0fcf1a553d1749eb7c09c6425b | 79340971c4c1ac3123e5a65fc9fb423f87eac972 | refs/heads/main | 2023-02-17T13:50:51.491016 | 2021-01-13T15:55:22 | 2021-01-13T15:55:22 | 329,347,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
from .views import *
from .import views
urlpatterns = [
path('', views.movie, name='movie'),
# path('detail', views.detail, name='detail'),
# path('<str:slug>', views.blogPost, name='blogPost'),
] | [
"rahulshivan05@gmail.com"
] | rahulshivan05@gmail.com |
5222fa56f1244047dbb221e092b2f2d0ca6f8e32 | e88436ade391f11aa69138056d926a15bd335ab8 | /home/views.py | c80486e77d5f1712c6c643efac5051f938625f28 | [
"Apache-2.0"
] | permissive | Ethan-Jeong/Django | a492a6c4f5af373e18015db560c03707adfb420b | 01268256f782bc7ca36b8f7116380309def97ea5 | refs/heads/master | 2023-06-24T13:39:27.662976 | 2021-07-29T07:58:35 | 2021-07-29T07:58:35 | 384,298,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
def index(request):
path = request.path
resultstr = ''
if path == '/home':
resultstr = '<h1>여기는 Home 입니다.</h1>'
else:
resultstr = '<h1>여기는 master 입니다.</h1>'
return HttpResponse(resultstr)
def index01(request):
result = {'first':'Ethan', 'second':'Jeong'}
return render(request,'index.html', context=result )
def index02(request):
result = {'first': request.GET['first'], 'second': request.GET['second']}
return render(request,'index.html', context=result ) | [
"jds88guy@gmail.com"
] | jds88guy@gmail.com |
35c3037b8282d6a32e0538752d0f07e27be1b439 | f6dd42c6e7cef402c08bccd92c9a0fd3423eded9 | /ex015.py | 53540053af3f85482fdc757dfc8d7f59c5d0e765 | [] | no_license | ronicson/ronicson | 3e122e6fa8a534abc1799ef765ec85323b71f451 | 74b5738bc7467b7f25451d33e9c16f7a733731ea | refs/heads/master | 2023-07-14T23:24:09.419400 | 2021-08-25T18:17:59 | 2021-08-25T18:17:59 | 390,477,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | km = float(input('Quantos kilometros você percorreu:'))
d = int(input('Quantos dias você permaneceu com o veiculo:'))
p1 = d * 60
p2 = km * 0.15
print('O valor em diárias é de R${} de kms é de R${:.2f} e total de R${:.2f} a pagar.'.format(p1, p2, p1 + p2))
| [
"ronikism@gmail.com"
] | ronikism@gmail.com |
2543a1e62c58b091daee385222a1fcbed751cfba | 060c40375aee04f1f68352339ffa24eb74da56ef | /read-n-characters-given-read4-2.py | b25c2b8cc6d2362834bf2664b9b04ed6336dff7d | [] | no_license | cannium/leetcode | be7d9bfb3e2a999eabe6c466a5390005656cec2b | 70f16a872cb203f77eeddb812e734ad1d46df79d | refs/heads/master | 2021-01-23T20:14:47.821782 | 2020-04-12T09:34:20 | 2020-04-12T09:34:20 | 18,369,249 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,709 | py | """
The read4 API is already defined for you.
@param buf, a list of characters
@return an integer
def read4(buf):
# Below is an example of how the read4 API can be called.
file = File("abcdefghijk") # File is "abcdefghijk", initially file pointer (fp) points to 'a'
buf = [' '] * 4 # Create buffer with enough space to store characters
read4(buf) # read4 returns 4. Now buf = ['a','b','c','d'], fp points to 'e'
read4(buf) # read4 returns 4. Now buf = ['e','f','g','h'], fp points to 'i'
read4(buf) # read4 returns 3. Now buf = ['i','j','k',...], fp points to end of file
"""
class Solution(object):
def __init__(self):
self.bf = []
self.old_size = 0
def read(self, buf, n):
"""
:type buf: Destination buffer (List[str])
:type n: Number of characters to read (int)
:rtype: The number of actual characters read (int)
"""
nn = 0
while nn < n:
if self.old_size > 0:
if self.old_size >= n:
buf[nn:n] = self.bf[:n-nn]
self.bf = self.bf[n:]
self.old_size = self.old_size - n
return n
else:
buf[nn:nn+self.old_size] = self.bf
nn += self.old_size
self.bf = []
self.old_size = 0
bf = [' '] * 4
n4 = read4(bf)
if nn + n4 > n:
buf[nn:n] = bf[:n-nn]
self.bf = bf[n-nn:]
self.old_size = nn+n4-n
return n
buf[nn:nn+n4] = bf
nn += n4
if n4 < 4:
return nn
return nn
| [
"can@canx.me"
] | can@canx.me |
9f4bf716ff6b31f433999da81b499aaa0e0761c6 | 724f23eaa94c64b9a72abcb6df90b6ed72114a3c | /day1/var1.py | af7bcd99732d01d19b0b9bd467d099b831f631b3 | [] | no_license | wxwssg/leraning_python | e30f65fa65dd95707a7db9c3966b9514b3b1f880 | 282eae1b10ba6e1e3171e86b427a751918f85eae | refs/heads/master | 2020-04-30T08:05:53.941876 | 2019-08-06T08:47:26 | 2019-08-06T08:47:26 | 176,701,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | # -*- coding:utf-8 -*-
# Author:wizard_wang
name1 = 'wizard'
name2 = name1
print('My name is',name1,name2)
#内存指向 name1 指向被修改 但是值改了但是name2还是指向‘wizard’
name1 = 'old wang'
print(name1,name2)
#变量 奇葩使用 中文做变量 只有python3支持
名字 = '奇葩的中文变量'
print(名字)
#python没有常量 大写表示声明常量
NAME = '声明是常量'
print(NAME) | [
"wangxingwu17@qq.com"
] | wangxingwu17@qq.com |
701836e019bed7dc29f4b99b1ca2d9c0b7724046 | c929fe7a8983f162345ce8a9a5f2bd038c94db65 | /Track2/videowalk/code/utils_videowalk/visualize.py | e01642585e73132e56b1612367e229f950006990 | [] | no_license | YJingyu/UVO_Challenge | 7078cfb3482174badeaa0f708e4b5cb2fde0e299 | 78d1cb6d9299218fae08682e5e081eacbac29e4b | refs/heads/main | 2023-08-29T03:56:34.878106 | 2021-10-29T14:53:49 | 2021-10-29T14:53:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,772 | py | # Video's features
import numpy as np
from sklearn.decomposition import PCA
import cv2
import imageio as io
import visdom
import time
import PIL
import torchvision
import torch
import matplotlib.pyplot as plt
from matplotlib import cm
def pca_feats(ff, K=1, solver='auto', whiten=True, img_normalize=True):
## expect ff to be N x C x H x W
N, C, H, W = ff.shape
pca = PCA(
n_components=3*K,
svd_solver=solver,
whiten=whiten
)
ff = ff.transpose(1, 2).transpose(2, 3)
ff = ff.reshape(N*H*W, C).numpy()
pca_ff = torch.Tensor(pca.fit_transform(ff))
pca_ff = pca_ff.view(N, H, W, 3*K)
pca_ff = pca_ff.transpose(3, 2).transpose(2, 1)
pca_ff = [pca_ff[:, kk:kk+3] for kk in range(0, pca_ff.shape[1], 3)]
if img_normalize:
pca_ff = [(x - x.min()) / (x.max() - x.min()) for x in pca_ff]
return pca_ff[0] if K == 1 else pca_ff
def make_gif(video, outname='/tmp/test.gif', sz=256):
if hasattr(video, 'shape'):
video = video.cpu()
if video.shape[0] == 3:
video = video.transpose(0, 1)
video = video.numpy().transpose(0, 2, 3, 1)
video = (video*255).astype(np.uint8)
video = [cv2.resize(vv, (sz, sz)) for vv in video]
if outname is None:
return np.stack(video)
io.mimsave(outname, video, duration = 0.2)
def draw_matches(x1, x2, i1, i2):
# x1, x2 = f1, f2/
detach = lambda x: x.detach().cpu().numpy().transpose(1,2,0) * 255
i1, i2 = detach(i1), detach(i2)
i1, i2 = cv2.resize(i1, (400, 400)), cv2.resize(i2, (400, 400))
for check in [True]:
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=check)
# matches = bf.match(x1.permute(0,2,1).view(-1, 128).cpu().detach().numpy(), x2.permute(0,2,1).view(-1, 128).cpu().detach().numpy())
h = int(x1.shape[-1]**0.5)
matches = bf.match(x1.t().cpu().detach().numpy(), x2.t().cpu().detach().numpy())
scale = i1.shape[-2] / h
grid = torch.stack([torch.arange(0, h)[None].repeat(h, 1), torch.arange(0, h)[:, None].repeat(1, h)])
grid = grid.view(2, -1)
grid = grid * scale + scale//2
kps = [cv2.KeyPoint(grid[0][i], grid[1][i], 1) for i in range(grid.shape[-1])]
matches = sorted(matches, key = lambda x:x.distance)
# img1 = img2 = np.zeros((40, 40, 3))
out = cv2.drawMatches(i1.astype(np.uint8), kps, i2.astype(np.uint8), kps,matches[:], None, flags=2).transpose(2,0,1)
return out
import wandb
class Visualize(object):
def __init__(self, args):
self._env_name = args.name
self.vis = visdom.Visdom(
port=args.port,
server='http://%s' % args.server,
env=self._env_name,
)
self.args = args
self._init = False
def wandb_init(self, model):
if not self._init:
self._init = True
wandb.init(project="videowalk", group="release", config=self.args)
wandb.watch(model)
def log(self, key_vals):
return wandb.log(key_vals)
def nn_patches(self, P, A_k, prefix='', N=10, K=20):
nn_patches(self.vis, P, A_k, prefix, N, K)
def save(self):
self.vis.save([self._env_name])
def get_stride(im_sz, p_sz, res):
stride = (im_sz - p_sz)//(res-1)
return stride
def nn_patches(vis, P, A_k, prefix='', N=10, K=20):
# produces nearest neighbor visualization of N patches given an affinity matrix with K channels
P = P.cpu().detach().numpy()
P -= P.min()
P /= P.max()
A_k = A_k.cpu().detach().numpy() #.transpose(-1,-2).numpy()
# assert np.allclose(A_k.sum(-1), 1)
A = np.sort(A_k, axis=-1)
I = np.argsort(-A_k, axis=-1)
vis.text('', opts=dict(width=10000, height=1), win='%s_patch_header' %(prefix))
for n,i in enumerate(np.random.permutation(P.shape[0])[:N]):
p = P[i]
vis.text('', opts=dict(width=10000, height=1), win='%s_patch_header_%s' % (prefix, n))
# vis.image(p, win='%s_patch_query_%s' % (prefix, n))
for k in range(I.shape[0]):
vis.images(P[I[k, i, :K]], nrow=min(I.shape[-1], 20), win='%s_patch_values_%s_%s' % (prefix, n, k))
vis.bar(A[k, i][::-1][:K], opts=dict(height=150, width=500), win='%s_patch_affinity_%s_%s' % (prefix, n, k))
def compute_flow(corr):
# assume batched affinity, shape N x H * W x W x H
h = w = int(corr.shape[-1] ** 0.5)
# x1 -> x2
corr = corr.transpose(-1, -2).view(*corr.shape[:-1], h, w)
nnf = corr.argmax(dim=1)
u = nnf % w # nnf.shape[-1]
v = nnf / h # nnf.shape[-2] # nnf is an IntTensor so rounds automatically
rr = torch.arange(u.shape[-1])[None].long().cuda()
for i in range(u.shape[-1]):
u[:, i] -= rr
for i in range(v.shape[-1]):
v[:, :, i] -= rr
return u, v
def vis_flow_plt(u, v, x1, x2, A):
flows = torch.stack([u, v], dim=-1).cpu().numpy()
I, flows = x1.cpu().numpy(), flows[0]
H, W = flows.shape[:2]
Ih, Iw, = I.shape[-2:]
mx, my = np.mgrid[0:Ih:Ih/(H+1), 0:Iw:Iw/(W+1)][:, 1:, 1:]
skip = (slice(None, None, 1), slice(None, None, 1))
ii = 0
fig, ax = plt.subplots()
im = ax.imshow((I.transpose(1,2,0)),)
C = cm.jet(torch.nn.functional.softmax((A * A.log()).sum(-1).cpu(), dim=-1))
ax.quiver(my[skip], mx[skip], flows[...,0][skip], flows[...,1][skip]*-1, C)#, scale=1, scale_units='dots')
# ax.quiver(mx[skip], my[skip], flows[...,0][skip], flows[...,1][skip])
return plt
def frame_pair(x, ff, mm, t1, t2, A, AA, xent_loss, viz):
normalize = lambda xx: (xx-xx.min()) / (xx-xx.min()).max()
spatialize = lambda xx: xx.view(*xx.shape[:-1], int(xx.shape[-1]**0.5), int(xx.shape[-1]**0.5))
N = AA.shape[-1]
H = W = int(N**0.5)
AA = AA.view(-1, H * W, H, W)
##############################################
## Visualize PCA of Embeddings, Correspondences
##############################################
# import pdb; pdb.set_trace()
if (len(x.shape) == 6 and x.shape[1] == 1):
x = x.squeeze(1)
if len(x.shape) < 6: # Single image input, no patches
# X here is B x C x T x H x W
x1, x2 = normalize(x[0, :, t1]), normalize(x[0, :, t2])
f1, f2 = ff[0, :, t1], ff[0, :, t2]
ff1 , ff2 = spatialize(f1), spatialize(f2)
xx = torch.stack([x1, x2]).detach().cpu()
viz.images(xx, win='imgs')
# Flow
u, v = compute_flow(A[0:1])
flow_plt = vis_flow_plt(u, v, x1, x2, A[0])
viz.matplot(flow_plt, win='flow_quiver')
# Keypoint Correspondences
kp_corr = draw_matches(f1, f2, x1, x2)
viz.image(kp_corr, win='kpcorr')
# # PCA VIZ
pca_ff = pca_feats(torch.stack([ff1,ff2]).detach().cpu())
pca_ff = make_gif(pca_ff, outname=None)
viz.images(pca_ff.transpose(0, -1, 1, 2), win='pcafeats', opts=dict(title=f"{t1} {t2}"))
else: # Patches as input
# X here is B x N x C x T x H x W
x1, x2 = x[0, :, :, t1], x[0, :, :, t2]
m1, m2 = mm[0, :, :, t1], mm[0, :, :, t2]
pca_ff = pca_feats(torch.cat([m1, m2]).detach().cpu())
pca_ff = make_gif(pca_ff, outname=None, sz=64).transpose(0, -1, 1, 2)
pca1 = torchvision.utils.make_grid(torch.Tensor(pca_ff[:N]), nrow=int(N**0.5), padding=1, pad_value=1)
pca2 = torchvision.utils.make_grid(torch.Tensor(pca_ff[N:]), nrow=int(N**0.5), padding=1, pad_value=1)
img1 = torchvision.utils.make_grid(normalize(x1)*255, nrow=int(N**0.5), padding=1, pad_value=1)
img2 = torchvision.utils.make_grid(normalize(x2)*255, nrow=int(N**0.5), padding=1, pad_value=1)
viz.images(torch.stack([pca1,pca2]), nrow=4, win='pca_viz_combined1')
viz.images(torch.stack([img1.cpu(),img2.cpu()]), opts=dict(title=f"{t1} {t2}"), nrow=4, win='pca_viz_combined2')
##############################################
# LOSS VIS
##############################################
color = cm.get_cmap('winter')
xx = normalize(xent_loss[:H*W])
img_grid = [cv2.resize(aa, (50,50), interpolation=cv2.INTER_NEAREST)[None]
for aa in AA[0, :, :, :, None].cpu().detach().numpy()]
img_grid = [img_grid[_].repeat(3, 0) * np.array(color(xx[_].item()))[:3, None, None] for _ in range(H*W)]
img_grid = [img_grid[_] / img_grid[_].max() for _ in range(H*W)]
img_grid = torch.from_numpy(np.array(img_grid))
img_grid = torchvision.utils.make_grid(img_grid, nrow=H, padding=1, pad_value=1)
# img_grid = cv2.resize(img_grid.permute(1, 2, 0).cpu().detach().numpy(), (1000, 1000), interpolation=cv2.INTER_NEAREST).transpose(2, 0, 1)
viz.images(img_grid, win='lossvis')
| [
"dulucas24@gmail.com"
] | dulucas24@gmail.com |
462ff12ed72a87b6f46032cc0eeb6fd1d11f6baf | af669dbef653dd69474f4c0836582bf14262c80f | /price-test/frame/lib/commonlib/configure/configunit.py | d59369edd113378ff64e2167f6f76406ff180d06 | [] | no_license | siki320/fishtest | 7a3f91639d8d4cee624adc1d4d05563611b435e9 | 7c3f024192e1c48214b53bc45105bdf9e746a013 | refs/heads/master | 2021-01-19T21:58:36.807126 | 2017-04-19T09:56:37 | 2017-04-19T09:56:37 | 88,729,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | #!/usr/bin/env python
# -*- coding: GB18030 -*-
'''
Created on 2012-3-10
@author: tongdangdang
'''
class ConfigUnit(object):
'''
@author: tongdangdang
@summary: ub conf configure unit
'''
def __init__(self,key,value,father,note = ""):
self.key = key
self.value = value
self.level = -1
self.father = father
self.note = note
'''
@summary: user defined str
'''
def __str__(self):
return self.value
def __getitem__(self, key):
return self.value
#def __delitem__(self, key):
# if isinstance(self.father, configarray.ConfigArray):
# pass
# elif isinstance(self.father, configarray.ConfigArray):
# pass
| [
"lisiqi_i@didichuxing.com"
] | lisiqi_i@didichuxing.com |
9f7caf4fed95bc81250d025326f45ff1780ff678 | 0a995834cfd7cce1defc2e0e5e0bee8103dca2b5 | /luffy_permission/rbac/views/role.py | 7654a58cd7c5a0575b3ef191abdf7e8c98d3e419 | [] | no_license | mling17/rbac | 9bcbc6e3c0cfd66d7399a7c9599801ff9271b20e | 969b4e0c52e1eb8bd29cf67b02b26ae5b1203fe1 | refs/heads/master | 2020-11-29T19:30:56.315369 | 2019-12-26T05:05:57 | 2019-12-26T05:05:57 | 230,197,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,451 | py | from django.shortcuts import render, redirect, HttpResponse
from django.urls import reverse
from rbac import models
from rbac.forms.role import RoleModelForm
def role_list(request):
if request.method == 'GET':
roles = models.Role.objects.all()
return render(request, 'rbac/role_list.html', {'roles': roles})
def role_add(request):
if request.method == 'GET':
form = RoleModelForm()
return render(request, 'rbac/change.html', {'form': form})
form = RoleModelForm(data=request.POST)
if form.is_valid():
form.save()
return redirect(reverse('rbac:role_list'))
return render(request, 'rbac/change.html', {'form': form})
def role_edit(request, pk):
obj = models.Role.objects.filter(id=pk).first()
if not obj:
return HttpResponse('没有此角色')
if request.method == 'GET':
form = RoleModelForm(instance=obj)
return render(request, 'rbac/change.html', {'form': form})
form = RoleModelForm(instance=obj, data=request.POST)
if form.is_valid():
form.save()
return redirect(reverse('rbac:role_list'))
return render(request, 'rbac/role_list.html', {'form': form})
def role_del(request, pk):
origin_url = reverse('rbac:role_list')
if request.method == 'GET':
return render(request, 'rbac/delete.html', {'cancel': origin_url})
models.Role.objects.filter(id=pk).delete()
return redirect(origin_url)
| [
"mling17@163.com"
] | mling17@163.com |
033635d8b06972430571c7945b9aca52940092cf | 073affd8807eb668f9e7013414a9cc39a6c34f26 | /endgame.py | 68cdc9087984a18a713aa9218c2371b002ef0439 | [] | no_license | dencynluv/Hangman | fca2a972f07cf23b96a75f15661df168b6fc92ee | 36ba194ed0f602a996b36fe45f3a17a94af3555e | refs/heads/master | 2021-01-19T11:53:15.145780 | 2017-02-21T08:39:04 | 2017-02-21T08:39:04 | 82,270,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | # end game module
# imports global variables needed into this module
from setup import good_letters, bad_letters, unique_letters, secret_word, guess_limit
# function to end the game
def is_game_over():
# both lists have to match in length in order to win the game
# or else
# bad letters and the guess limit have to match to lose the game
# both statements return True which ends the game loop
if len(unique_letters) == len(good_letters):
print "\n" * 15
print "You win! The word was {}".format(secret_word)
print "\n" * 15
return True
elif len(bad_letters) == guess_limit:
print "\n" * 15
print "You didn't guess it! My word was {}".format(secret_word)
print "\n" * 15
return True
else:
return False
| [
"sotocyn@gmail.com"
] | sotocyn@gmail.com |
da255a406ffda289b1c78e5f386fd921f2efbcdd | abe02872257b18a9ad6179a057be71fe04825587 | /task_1/svm/SVM.py | 33340e6abb0e1a75b50ef17f52c0822adb0b7b56 | [] | no_license | georgeahill/tdads | 98924b9b28b826a8bbf1e374a2e1cb12db878c99 | dd7b1742452c205aca2f2abb603bd65be3807721 | refs/heads/master | 2023-03-13T09:28:29.215662 | 2021-03-03T22:20:29 | 2021-03-03T22:20:29 | 274,945,760 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | # Preprocesses the dataset using PCA, and increasing image contrast
import numpy as np
from sklearn import svm
from sklearn.decomposition import PCA
# The threshold at which a colour value is increased instead of reduced
# These values were calculated using manual testing
CONTRAST_THRESHOLD = 70 # 70
CONTRAST_REDUCTION = 80 # 80
CONTRAST_INCREASE = 70 # 70
def increaseContrast(value):
if value < CONTRAST_THRESHOLD:
return min(0, value - CONTRAST_REDUCTION)
else:
return max(255, value + CONTRAST_INCREASE)
VECTORISED_CONTRAST = np.vectorize(increaseContrast)
def fit(x_train, y_train):
clf = svm.SVC(kernel="rbf", C=10, gamma=2e-7)
x_train = VECTORISED_CONTRAST(np.asarray(x_train).reshape((len(x_train), 784)))
scaler = PCA(0.9).fit(x_train)
x_train = scaler.transform(x_train)
clf.fit(x_train, y_train)
return [clf, scaler]
def predict(x_test, fit_return_list):
clf = fit_return_list[0]
x_test = VECTORISED_CONTRAST(np.asarray(x_test).reshape((len(x_test), 784)))
x_test = fit_return_list[1].transform(x_test)
return clf.predict(x_test)
| [
"oliver.little12@gmail.com"
] | oliver.little12@gmail.com |
0c9ea3acd8dfbd941580cf0d8e50eff68889ee0b | 6c56f13050a3a8c1208030bb96e5a9dddeabd584 | /experiment/cpu_predictions_likelyhood_final_adaptation/nupic_output.py | aaaa36eb45b4a56edfe8fc530767497676dfbd8b | [] | no_license | baselm/self-healing-latex | 089dd97dbc818f65e176d977a65a6fea469684b8 | b43d5fa117aa40e9d4cf256f62f7945391d8681e | refs/heads/master | 2020-03-29T12:51:57.235243 | 2018-12-30T11:47:01 | 2018-12-30T11:47:01 | 149,924,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,438 | py | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
from nupic.algorithms import anomaly_likelihood
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num, DateFormatter
except ImportError:
pass
WINDOW = 200
HIGHLIGHT_ALPHA = 0.3
ANOMALY_HIGHLIGHT_COLOR = 'red'
WEEKEND_HIGHLIGHT_COLOR = 'yellow'
ANOMALY_THRESHOLD = 0.75
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood()
@abstractmethod
def write(self, timestamp, value, predicted, anomalyScore):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCount = 0
headerRow = [
'timestamp', 'CPU', 'prediction',
'anomaly_score', 'anomaly_likelihood'
]
outputFileName = "%s_out.csv" % self.name
print "Preparing to output %s data to %s" % (self.name, outputFileName)
self.outputFile = open(outputFileName, "w")
self.outputWriter = csv.writer(self.outputFile)
self.outputWriter.writerow(headerRow)
def write(self, timestamp, value, predicted, anomalyScore):
if timestamp is not None:
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood]
self.outputWriter.writerow(outputRow)
self.lineCount += 1
def close(self):
self.outputFile.close()
print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name)
def extractWeekendHighlights(dates):
weekendsOut = []
weekendSearch = [5, 6]
weekendStart = None
for i, date in enumerate(dates):
if date.weekday() in weekendSearch:
if weekendStart is None:
# Mark start of weekend
weekendStart = i
else:
if weekendStart is not None:
# Mark end of weekend
weekendsOut.append((
weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
weekendStart = None
# Cap it off if we're still in the middle of a weekend
if weekendStart is not None:
weekendsOut.append((
weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return weekendsOut
def extractAnomalyIndices(anomalyLikelihood):
anomaliesOut = []
anomalyStart = None
for i, likelihood in enumerate(anomalyLikelihood):
if likelihood >= ANOMALY_THRESHOLD:
if anomalyStart is None:
# Mark start of anomaly
anomalyStart = i
else:
if anomalyStart is not None:
# Mark end of anomaly
anomaliesOut.append((
anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
anomalyStart = None
# Cap it off if we're still in the middle of an anomaly
if anomalyStart is not None:
anomaliesOut.append((
anomalyStart, len(anomalyLikelihood)-1,
ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA
))
return anomaliesOut
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.value = []
self.allValues = []
self.predicted = []
self.anomalyScore = []
self.anomalyLikelihood = []
self.actualLine = None
self.predictedLine = None
self.anomalyScoreLine = None
self.anomalyLikelihoodLine = None
self.linesInitialized = False
self._chartHighlights = []
fig = plt.figure(figsize=(16, 10))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
self._mainGraph = fig.add_subplot(gs[0, 0])
plt.title(self.name)
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
self._anomalyGraph = fig.add_subplot(gs[1])
plt.ylabel('Percentage')
plt.xlabel('Date')
# Maximizes window
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.tight_layout()
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%m/%d %H:%M')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def highlightChart(self, highlights, chart):
for highlight in highlights:
# Each highlight contains [start-index, stop-index, color, alpha]
self._chartHighlights.append(chart.axvspan(
self.convertedDates[highlight[0]], self.convertedDates[highlight[1]],
color=highlight[2], alpha=highlight[3]
))
def write(self, timestamp, value, predicted, anomalyScore):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.allValues.append(value)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(self.allValues)
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
| [
"baz@Basels-MacBook-Pro.local"
] | baz@Basels-MacBook-Pro.local |
88ff3ce0379daf9f75d75ac23c9191cfe9db7fb9 | d61c8fed77acfb0b977425a7cb9bbbb6983bfd17 | /blog/forms.py | cc95e11a08a2770858a0e67065adefc525aaea9a | [] | no_license | soukaina-debug/CodeStorm | cc6fcf9bc9e0209a37c2dc87b985a73c65d4dbe4 | b5a02a4520b05e4a0004c4ce62e474fca1821ac6 | refs/heads/master | 2023-08-10T20:31:18.182705 | 2021-09-29T09:42:30 | 2021-09-29T09:42:30 | 411,614,438 | 1 | 0 | null | 2021-09-29T09:41:40 | 2021-09-29T09:41:39 | null | UTF-8 | Python | false | false | 257 | py | from django import forms
from .models import Doubt,Reply
class DoubtForm(forms.ModelForm):
class Meta:
model = Doubt
fields = ['ask']
class ReplyForm(forms.ModelForm):
class Meta:
model = Reply
fields = ['reply']
| [
"apurvaajmera10@gmail.com"
] | apurvaajmera10@gmail.com |
8bc1f3af1ca811d884a225dbd76851c0ad13c46a | 1da15a0ec8eb771d4584b3997d44d2af23d53484 | /D3/1220.Magnetic.py | 2da7b7c711faaafaf1586b556cbc79aeea42fe62 | [] | no_license | cdh3261/Algorithm_Problems | 1e9ad0310490ffe5396f8cef3205885d62ebefb7 | d9ad791e9a0bcdd1c13b8e18fa993b784a53b064 | refs/heads/master | 2020-08-29T07:27:04.331917 | 2020-03-06T11:33:57 | 2020-03-06T11:33:57 | 217,966,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | ####### N극 #######
####### S극 #######
for t in range(1, 11):
n = int(input())
arr = [list(map(int, input().split())) for i in range(n)]
col = []
for i in range(n):
a = []
for j in range(n):
if arr[j][i] != 0:
a.append(arr[j][i])
col.append(a)
cnt = 0
for i in range(n):
for j in range(len(col[i])):
if j != 0 and col[i][j] == 2 and col[i][j - 1] != 2:
cnt += 1
print(f'#{t} {cnt}') | [
"cdh3261@naver.com"
] | cdh3261@naver.com |
caf4b456838e4066cfe9191405c63b482a8eda64 | 036f11eaae82a9c7838580d141375ab3c03f739a | /unsupervised-semantic-audio-embeddings/main.py | 53c20702308eebe08c34ffa43db3737d513dda3c | [] | no_license | silvadirceu/experiments | 8b6f1739a51803f73da89c137d07871505ddf712 | 2390392726a43aa5587e02d8ee2a423cf281463c | refs/heads/master | 2022-02-19T11:18:52.485742 | 2019-09-26T14:43:51 | 2019-09-26T14:43:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,422 | py | from __future__ import division, print_function
import zounds
import argparse
from data import dataset
from deformations import make_pitch_shift, make_time_stretch, additive_noise
from training_data import TripletSampler
from train import Trainer
from network import EmbeddingNetwork
import torch
import numpy as np
import cPickle as pickle
from search import TreeSearch
# resample all audio in our dataset to this rate
samplerate = zounds.SR11025()
# produce a base class for our audio processing graph, which will do some
# basic preprocessing and transcoding of the signal
BaseModel = zounds.resampled(resample_to=samplerate, store_resampled=True)
# the length in samples of the audio segments we'll be creating embeddings for
window_size_samples = 8192
slice_duration = samplerate.frequency * window_size_samples
# segments occurring within ten seconds of our anchor will be considered
# semantically similar
temporal_proximity = zounds.Seconds(10)
# a collection of the audio deformations we'll use during training. Temporal
# proximity is included implicitly
deformations = [
make_time_stretch(samplerate, window_size_samples),
make_pitch_shift(samplerate),
additive_noise
]
@zounds.simple_lmdb_settings(
'/hdd/sounddb2', map_size=1e11, user_supplied_id=True)
class Sound(BaseModel):
"""
An audio processing graph, that will resample each audio file to 11025hz
and store the results in an LMDB database
"""
short_windowed = zounds.ArrayWithUnitsFeature(
zounds.SlidingWindow,
wscheme=zounds.HalfLapped(),
wfunc=zounds.OggVorbisWindowingFunc(),
needs=BaseModel.resampled)
stft = zounds.ArrayWithUnitsFeature(
zounds.FFT,
needs=short_windowed)
def train(network, batch_size, device, checkpoint, weights_file_path):
"""
Train the model indefinitely
"""
sampler = TripletSampler(
Sound, slice_duration, deformations, temporal_proximity)
trainer = Trainer(
network=network,
triplet_sampler=sampler,
learning_rate=1e-4,
batch_size=batch_size,
triplet_loss_margin=0.25).to(device)
for batch_num, error in enumerate(trainer.train()):
print('Batch: {batch_num}, Error: {error}'.format(**locals()))
if batch_num % checkpoint == 0:
torch.save(network.state_dict(), weights_file_path)
def compute_all_embeddings(network):
"""
A generator that will compute embeddings for every non-overlapping segment
of duration window_size_samples in the database
"""
for snd in Sound:
windowed = snd.resampled.sliding_window(
samplerate * window_size_samples).astype(np.float32)
arr = zounds.learn.apply_network(
network, windowed, chunksize=64)
ts = zounds.ArrayWithUnits(
arr, [windowed.dimensions[0], zounds.IdentityDimension()])
print(snd._id)
yield snd._id, ts
def build_search_index(network, search_file_path, n_trees=32):
"""
Build both a brute force search index, as well as an index that uses a tree
of random hyperplane splits
"""
try:
with open(search_file_path, 'rb') as f:
search = pickle.load(f)
except IOError:
search = zounds.BruteForceSearch(
compute_all_embeddings(network), distance_metric='cosine')
with open(search_file_path, 'wb') as f:
pickle.dump(search, f, pickle.HIGHEST_PROTOCOL)
print('building tree...')
tree_search = TreeSearch(search, n_trees=n_trees)
return search, tree_search
def visualize_embeddings(network, search_file_path):
from matplotlib import cm
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
# map labels/categories to some known examples of sounds that fall into
# that category
class_to_id = {
'piano': {'AOC11B', 'CHOPINBallades-NEWTRANSFER'},
'pop': {'02.LostInTheShadowsLouGramm', '08Scandalous'},
'jazz': {'Free_20s_Jazz_Collection'},
'hip-hop': {'LucaBrasi2', 'Chance_The_Rapper_-_Coloring_Book'},
'speech': {
'Greatest_Speeches_of_the_20th_Century', 'The_Speeches-8291'},
'nintendo': {
'CastlevaniaNESMusicStage10WalkingOnTheEdge',
'SuperMarioBros3NESMusicWorldMap6'}
}
# map a color to each category
color_map = cm.Paired
color_index = dict(
(key, color_map(x)) for x, key
in zip(np.linspace(0, 1, len(class_to_id)), class_to_id.iterkeys()))
# map sound ids to their labels
id_index = dict()
for snd in Sound:
for label, _ids in class_to_id.iteritems():
for _id in _ids:
if _id in snd._id:
id_index[snd._id] = label
# reduce the entire database of computed embeddings to just those with the
# ids we care about
search, tree_search = build_search_index(
network, search_file_path, n_trees=1)
# build up two sequences, one that contains the indices we're interested in
# and the other that contains the color that should be assigned to that
# data point
indices = []
labels = []
for index, pair in enumerate(search._ids):
_id, _ = pair
try:
label = id_index[_id]
labels.append(label)
indices.append(index)
except KeyError:
continue
indices = np.array(indices)
labels = np.array(labels)
# shuffle indices and take the first N
new_indices = np.random.permutation(len(indices))[:int(2e4)]
indices = indices[new_indices]
labels = labels[new_indices]
embeddings = search.index[indices]
print(embeddings.shape)
# dist = cosine_distances(embeddings, embeddings)
# print(dist.shape)
model = TSNE(metric='cosine')
points = model.fit_transform(embeddings)
print(points.shape)
plt.figure(figsize=(15, 15))
for label in class_to_id.iterkeys():
label_indices = np.where(labels == label)[0]
p = points[label_indices]
color = color_index[label]
plt.scatter(p[:, 0], p[:, 1], c=[color], label=label, edgecolors='none')
plt.xticks([])
plt.yticks([])
plt.legend()
plt.savefig('t-SNE.png')
def compare_search_indices(network, search_file_path):
search, tree_search = build_search_index(
network, search_file_path, n_trees=64)
tree_search.compare_and_plot(
n_trees=[1, 2, 4, 8, 16, 32, 64],
n_iterations=50,
n_results=50)
def visualize_tree(network, search_file_path):
search, tree_search = build_search_index(
network, search_file_path, n_trees=1)
tree_search.visualize_tree()
def demo_negative_mining(network, batch_size, device):
from matplotlib import pyplot as plt, gridspec
from itertools import product
sampler = TripletSampler(
Sound, slice_duration, deformations, temporal_proximity)
trainer = Trainer(
network=network,
triplet_sampler=sampler,
learning_rate=1e-4,
batch_size=batch_size,
triplet_loss_margin=0.25).to(device)
spec = gridspec.GridSpec(4, 4, wspace=0.25, hspace=0.25)
fig = plt.figure(figsize=(15, 15))
for x, y in product(xrange(4), xrange(4)):
anchor_to_positive, anchor_to_negative, mined_anchor_to_negative = \
trainer.negative_mining_demo()
ax = plt.subplot(spec[x, y])
ax.plot(anchor_to_positive, label='anchor-to-positive')
ax.plot(anchor_to_negative, label='anchor-to-negative')
ax.plot(mined_anchor_to_negative, label='mined-anchor-to-negative')
ax.set_xticks([])
ax.set_ylim(0, 1.0)
plt.legend(bbox_to_anchor=(1, 0), loc="lower right")
plt.savefig('negative_mining.png', format='png')
fig.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[
zounds.ui.AppSettings()
])
parser.add_argument(
'--ingest',
help='should data be ingested',
action='store_true')
parser.add_argument(
'--batch-size',
help='Batch size to use when training',
type=int)
parser.add_argument(
'--checkpoint',
help='save network weights every N batches',
type=int)
parser.add_argument(
'--weights-file-path',
help='the name of the file where weights should be saved')
parser.add_argument(
'--search',
help='test the search',
action='store_true')
parser.add_argument(
'--search-file-path',
help='the path where a pre-built search should be stored',
required=False)
parser.add_argument(
'--demo-negative-mining',
help='run a demo of within-batch semi-hard negative mining',
action='store_true')
parser.add_argument(
'--compare-search-indices',
help='run a comparison of search indices',
action='store_true')
parser.add_argument(
'--visualize-tree',
help='produce a visualization of one hyperplane tree',
action='store_true')
parser.add_argument(
'--visualize-embeddings',
help='produce a 2d visualiation of the embeddings using t-SNE',
action='store_true'
)
args = parser.parse_args()
if args.ingest:
zounds.ingest(dataset, Sound, multi_threaded=True)
network, device = EmbeddingNetwork.load_network(args.weights_file_path)
if args.search:
search, tree_search = build_search_index(
network=network,
search_file_path=args.search_file_path)
elif args.demo_negative_mining:
demo_negative_mining(network, args.batch_size, device)
elif args.compare_search_indices:
compare_search_indices(network, args.search_file_path)
elif args.visualize_tree:
visualize_tree(network, args.search_file_path)
elif args.visualize_embeddings:
visualize_embeddings(network, args.search_file_path)
else:
train(
network=network,
batch_size=args.batch_size,
device=device,
checkpoint=args.checkpoint,
weights_file_path=args.weights_file_path)
app = zounds.ZoundsApp(
model=Sound,
visualization_feature=Sound.stft,
audio_feature=Sound.ogg,
globals=globals(),
locals=locals())
app.start(port=args.port)
| [
"john.vinyard@gmail.com"
] | john.vinyard@gmail.com |
ef1b5e460b554cf63dd75e50f5f716da3096df96 | 9b483cba8b680c280becd611e136329724b4d4fb | /t_rank.py | 8d07e469f4fd931ed079ca1d0dfc34a2178e6d64 | [] | no_license | dmlicht/FMAssembler | eac29ab82ff81dac196e738b0ba4fe53085c4b2e | 3842390ea3be7cb7d9de1356a0f2a116a965023b | refs/heads/master | 2021-01-01T20:16:26.243439 | 2016-12-18T22:55:18 | 2016-12-18T22:55:18 | 8,988,810 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,362 | py | class TRank(object):
"""Maintains checkpoints of t-ranks for last row in BW matrix
values of characters at checkpoint is 0 if character has not been seen at all"""
def __init__(self, bwt, characters, cp_interval=4):
self.checkpoints = {}
self.characters = characters
self.bwt = bwt
self.cp_interval = cp_interval
#setting all values to nergative 1, because t-rank should be 0 at first occurence
self.char_occurences = { c: -1 for c in characters }
self.create_checkpoints()
def create_checkpoints(self):
"""iterates through last column of BW matrix recording num occurences of
each character seen. Stores current counts in checkpoint rows"""
for i, c in enumerate(self.bwt):
self.char_occurences[c] += 1 #track occurence of letter
if i % self.cp_interval == 0: #at specified interval
self.add_checkpoint(i) #add checkpoint
def add_checkpoint(self, checkpoint_row):
"""saves number of occurences of each character at checkpoint row"""
self.checkpoints[checkpoint_row] = self.char_occurences.copy()
def rank_at_row(self, char, row):
"""returns number of character occurences up to given row.
INCLUDING occurences at given row"""
#save ourselves trouble if the character does not occur at all
if char not in self.characters or row < 0:
return -1
distance_from_prev_cp = row % self.cp_interval
prev_cp_index = row - distance_from_prev_cp
t_rank_at_prev_index = self.checkpoints[prev_cp_index][char]
#previous cp_index + 1 because the checkpoints count occurences that happen on their index
occurences_after_checkpoint = self.count_up(char, prev_cp_index + 1, row + 1)
# print prev_cp_index, row
return t_rank_at_prev_index + occurences_after_checkpoint
def count_up(self, char, from_index, to_index):
"""counts occurences of char between from_index and to_index"""
occurences = 0
# print 'from_index:', from_index
# print 'to_index', to_index
# print len(self.bwt)
for i in xrange(from_index, to_index):
# print char
if self.bwt[i] == char:
occurences += 1
return occurences | [
"Dlichte5@jhu.edu"
] | Dlichte5@jhu.edu |
ad673bc733be8c44e976879669998549baebfa69 | 9180b7c014860820ea28af5876d374c935a8c8d3 | /01.flask/05.static.py | 5ec4c9832c93e58ed58152ba44237f616df0cac0 | [] | no_license | leele91/flask-web | 05d2ce3769b1c0a7196c828acfa3b35072fd7c97 | 7c66205d09cbcc1b044ad31e1ea6a7e882727afb | refs/heads/main | 2023-03-10T21:15:04.997561 | 2021-02-26T06:47:15 | 2021-02-26T06:47:15 | 320,127,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | from flask import Flask, render_template
import os
app = Flask(__name__)
@app.route('/')
def index():
img_file = os.path.join(app.root_path, 'static/img/cat.jpg')
mtime =int(os.stat(img_file).st_mtime)
return render_template('05.index.htm', mtime = mtime) # mtime를 넣어주면 이미지경로가 바뀔때마다 리플레쉬됨 /html에도 동일적용
if __name__ == '__main__':
app.run(debug=True) | [
"wlsduddl013@gmail.com"
] | wlsduddl013@gmail.com |
1275d2dfcb26f9a06de524ae94252ec4c0261fcb | b6df9e7962725e6b4dda7873576a1bc23f748d93 | /.history/crawling_20210906175955.py | d7af46376b00aacfd63666971793f5d6ccf0b9dd | [
"MIT"
] | permissive | jk7g14/keywordcount | 8500288c0d248fc7b9227897170b8d50c8eaf0c6 | 89187bd865716a8cb8a1f50bc80265a2edf95cea | refs/heads/main | 2023-07-21T20:22:05.016630 | 2021-09-08T00:34:10 | 2021-09-08T00:34:10 | 403,511,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | from requests_html import HTML, HTMLSession
from time import sleep
import csv
import datetime
file = open(f'data-{datetime.datetime.now()}', 'w', newline='')
writer = csv.writer(file)
writer.writerow(["name", "date", "content"])
session = HTMLSession()
url = 'https://www.ybtour.co.kr/promotion/incRepList.yb'
data = {'bnrMstNo': 20000016490, 'pageNo': 1}
while(True):
# r = session.post(url, data=data)
# items = r.html.find('.box_prom_temp .desc_cmt')
# if len(items) == 0:
# break
# for item in items:
# name = item.find('.list_user .name', first=True).text
# date = item.find('.list_user .date', first=True).text
# content = item.find('.txt_desc', first=True).text
# writer.writerow([name, date, content])
# print(f'작성자: {name}, 작성일: {date} \n')
# print(f'---------------------------------- \n')
# print(f'내용: {content}')
# print('------------------------------------------------------')
data['pageNo'] += 1
print(data['pageNo'])
sleep(1)
file.close()
print('---------------------------완료')
print('---------------------------완료')
print('---------------------------완료')
print('---------------------------완료')
| [
"llyppp@revhat.com"
] | llyppp@revhat.com |
3a9ee750bfdb695244f25d1a4fe174c316d25b0b | 0125394835e95f5fb4b47ec2a1bc7d5a555b81c4 | /card_layout test.py | db268c3417a9c214541f02a291029381766e7690 | [] | no_license | jayesh59/05-Blackjack | a7c91214bcba446eb2e48502ba5e55ff90ecd344 | ee75275fcc5fcc04b9bca18e40585106ee824ecb | refs/heads/master | 2023-04-13T22:43:38.336361 | 2021-04-27T03:02:45 | 2021-04-27T03:02:45 | 257,320,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
#Initiation:
Spade = cv2.imread('Spade.jpg')
Diamond = cv2.imread('Diamond.jpg')
Club = cv2.imread('Club.jpg')
Heart = cv2.imread('Heart.jpg')
shapes = [Spade, Diamond, Club, Heart]
for s in shapes: _, s = cv2.threshold(s, 127, 255, cv2.THRESH_BINARY)
resized_shapes = []
for s in shapes:
s_copy = s.copy()
s_copy = cv2.resize(s_copy, (0,0), s_copy, 0.2, 0.2)
resized_shapes.append(s_copy)
#Base For Cards:
shape = (133, 75, 3)
img = np.ones(shape, dtype = np.uint16) * 255
img_copy = img.copy
c = 0
cards = []
for s in resized_shapes:
img = np.ones(shape, dtype = np.uint16) * 255
img_copy = img.copy()
y = int((img.shape[0] - s.shape[0])/2)
x = int((img.shape[1] - s.shape[1])/2)
w,h = (img.shape[0]-3, img.shape[1]-24)
img_copy = cv2.putText(img_copy, '10', (0, 15), cv2.FONT_HERSHEY_PLAIN, 1, (0,0,0), 2)
img_copy[y:y+s.shape[0], x:x+s.shape[1]] = s
cards.append(img_copy)
for i in cards:
plt.imshow(i)
plt.show()
plt.imshow(cards[0])
plt.show()
| [
"51175318+jayesh59@users.noreply.github.com"
] | 51175318+jayesh59@users.noreply.github.com |
4d0eb742be6248d77f5c482bc1869c2dcbcc2143 | bb1a8acd8b17b687e6ab5e25628ef11c885b9735 | /wc_script/mots_dans_codes_fiscaux.py | c4e49aab1c8ee51a7357b3643dff75b64ad04be5 | [] | no_license | adrienpacifico/french-law-word-cout | d1e4b99fcb568fa14fd29878b0204503c643680d | dfc960058beb67660a1bbef446321e33b332decc | refs/heads/master | 2021-01-10T14:55:44.653122 | 2015-12-10T17:45:54 | 2015-12-10T17:45:54 | 47,777,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py | # -*- coding: utf-8 -*-
import os, sys, logging, shutil, ntpath
execution_directory = os.getcwd()
#os.system("pdflatex " + "-output-directory='/Users/adrienpacifico/.tmp_latex'" + " " + '"' + tex_file + '"' )
#TODO : allows for relatives path.
cgi_path = "/Users/adrienpacifico/Informatique/loi_versionne_sous_git/textes_de_lois/codes-juridiques-francais/codes-en-vigueur/code-general-des-impots-cgi"
#os.chdir("/Users/adrienpacifico/Informatique/loi_versionne_sous_git/textes_de_lois/codes-juridiques-francais/codes-en-vigueur/code-general-des-impots-cgi")
import re
import subprocess
last_line_re = re.compile('\s*(?P<n>\d+)\s+total$')
output = subprocess.check_output(["find . -type f -name '*.md' | xargs wc -w"],cwd = cgi_path, shell = True)
last_line = output.rstrip().split("\n")[-1]
match = last_line_re.match(last_line)
words_count = int(match.group('n'))
output = subprocess.check_output(["find . -type f -name 'README.md' | xargs wc -w"],cwd = cgi_path, shell = True)
last_line = output.rstrip().split("\n")[-1]
match = last_line_re.match(last_line)
readme_words_count = int(match.group('n'))
print "program output:", words_count - readme_words_count
#print 'float_print', float(out[-15:-6])
#import ipdb
#ipdb.set_trace()
#find . -name '*' ! -name 'README.md' |xargs wc -w
| [
"adrienpacifico@gmail.com"
] | adrienpacifico@gmail.com |
9aef497bd51f3ca575e9f0400deb92ea47e6ae5f | df3c449a70bc985246e9d6263cebcd685781d05b | /Blink_Detection/blinky.py | e2c5901a077b74cdde7fb7c99f79cba749c0cf39 | [
"MIT"
] | permissive | vaaiibhav/Blink-Detection-using-Webcam-in-Python--and-OPenCV | d800a16125e94ed8816b07f0b8c034a9f0286e71 | d0ab756f6a5088223f18b96f962b711daa08db65 | refs/heads/master | 2022-04-23T22:49:07.915855 | 2020-04-28T14:48:54 | 2020-04-28T14:48:54 | 259,666,059 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | #!/usr/bin/env python
"""blinky.py:
Starting point of blinky
"""
import extract
import webcam
import pylab
import numpy as np
def main(args):
# Extract video first
data = webcam.video2csv(args)
edgyBlinks = extract.find_blinks_using_edge(data)
outfile = "%s_blinks_using_edges.csv" % args['video_file']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(edgyBlinks).T, delimiter=","
, header = "time,blinks")
pixalBlinks = extract.find_blinks_using_pixals(data)
outfile = "%s_blinks_using_pixals.csv" % args['video_file']
print("[INFO] Writing to outfile %s" % outfile)
np.savetxt(outfile, np.array(pixalBlinks).T, delimiter=","
, header = "time,blinks")
if __name__ == '__main__':
import argparse
# Argument parser.
description = '''description'''
parser = argparse.ArgumentParser(description=description)
class Args: pass
args = Args()
parser.add_argument('--video-file', '-f'
, required = True
, help = 'Path of the video file'
)
parser.add_argument('--bbox', '-b'
, required = False
, nargs = '+'
, type = int
, help = 'Bounding box : topx topy width height'
)
parser.parse_args(namespace=args)
main(vars(args))
| [
"vaaiibhav@live.com"
] | vaaiibhav@live.com |
da03b2d8448602dd00e2ae9394f8c2537ee7e71f | b4be1cf1ac616abdf12297b3a69226d98ae8c70b | /src/coboljsonifier/fields/field_alphanumeric_ebcdic.py | 32d18049debfef65a752fc01c42d9a4f6a62b9bf | [] | no_license | jrperin/cobol-copybook.jsonifier | 08dc7cd6d6cb95bcb7e0b59b9db949e490b179a6 | 7aefd72956e3c26456327dcd222723a04a115bf3 | refs/heads/master | 2023-09-01T00:05:08.776147 | 2023-08-22T01:28:11 | 2023-08-22T01:28:11 | 405,270,494 | 13 | 7 | null | 2023-08-22T01:23:17 | 2021-09-11T03:11:16 | Python | UTF-8 | Python | false | false | 482 | py | from .field import Field
class FieldAlphanumericEbcdic(Field):
"""
Composite Pattern - Leaf
"""
def __init__(self, type: str, name: str, size: int):
super(FieldAlphanumericEbcdic, self).__init__(type, name, size, 0)
def parse(self, data_in):
if not data_in:
return
self._value = data_in[:self._size].decode('cp500').strip()
if not self._value:
self._value = None
return data_in[self._size:]
| [
"jrperin@gmail.com"
] | jrperin@gmail.com |
6debc3f00966a216dd43e092a1d80453f069d876 | cfe737ca092cf949f99af67743e2a935f53baecf | /Natural-Language-Processing_prepprocessing_and_feature_extraction/TFIDF.py | 5ef16440787fbe0ebb5ab28f3b7296ae59f086b4 | [] | no_license | Sakil786/Natural_Language_processing | 409147d9c0fac900265c92724161c53d7a574372 | 0dd11c3c848b92837a73f5bc84e239cf3e36f27f | refs/heads/master | 2022-06-26T22:47:59.125026 | 2020-05-10T14:08:11 | 2020-05-10T14:08:11 | 255,063,391 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,020 | py |
import nltk
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. Vikram Sarabhai of the Dept. of
space, Professor Satish Dhawan, who succeeded him and Dr. Brahm Prakash, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# Cleaning the texts
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
wordnet=WordNetLemmatizer()
sentences = nltk.sent_tokenize(paragraph)
corpus = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [wordnet.lemmatize(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the TF-IDF model
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer()
X = cv.fit_transform(corpus).toarray()
| [
"sakilansari4@gmail.com"
] | sakilansari4@gmail.com |
d93c54dd443bb190b38b1b80e83a4f17d08b5b2d | e8ce8f41f8875b2c76d62a69e2aadd1e2503e4e0 | /Alpaca_Blog_First/Alpaca_Blog_First/urls.py | 1804a2b39b0c81fcd6e3f3693e95a29fa19da504 | [] | no_license | Alpaca-H/Pyweb | 9bdc41c9f10360b54fc5a973a4bd054211b0ef4a | 7e7c6bdde9bf24acb5f7931638e9f734a310ff3b | refs/heads/master | 2020-03-19T18:01:12.586189 | 2018-08-19T13:50:54 | 2018-08-19T13:50:54 | 136,789,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | """Alpaca_Blog_First URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('load.urls')),
path('',include(('load.urls','load'),namespace="index")),
path('',include(('load.urls','load'),namespace="about")),
path('',include(('load.urls','load'),namespace="contact")),
path('',include(('load.urls', 'load'),namespace="full")),
path('',include(('load.urls', 'load'),namespace="readGo")),
]
| [
"1097690268@qq.com"
] | 1097690268@qq.com |
fe7bb503a0a28eaefde625c1ebbd8c2563154270 | 6d9d83eee49c5729afb3c6b4c3f1013d2e65107b | /05_boarding_pass/solution.py | e5154ae8202d5cae334fa061d10e8b62b695af05 | [] | no_license | jpclark6/advent_of_code_2020 | 7aa706849d159ff9cd9117556588d877a9c98c80 | b7255da50c1d555aa671f87b0d44cdb39777c117 | refs/heads/main | 2023-02-18T14:33:31.408534 | 2021-01-14T22:43:36 | 2021-01-14T22:43:36 | 317,354,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,442 | py | """
Advent of code
Day 5
"""
def parse_row(boarding_pass):
possible_seats = list(range(128))
for rule in boarding_pass[:7]:
if rule == 'F':
possible_seats = possible_seats[:round(len(possible_seats) / 2)]
else:
possible_seats = possible_seats[round(len(possible_seats) / 2):]
return possible_seats[0]
def parse_column(boarding_pass):
possible_seats = list(range(8))
for rule in boarding_pass[7:]:
if rule == 'L':
possible_seats = possible_seats[:round(len(possible_seats) / 2)]
else:
possible_seats = possible_seats[round(len(possible_seats) / 2):]
return possible_seats[0]
def find_seat_id(boarding_pass):
row = parse_row(boarding_pass)
column = parse_column(boarding_pass)
return row * 8 + column
def part_1(passes):
highest = 0
for boarding_pass in passes:
seat_id = find_seat_id(boarding_pass)
if seat_id > highest:
highest = seat_id
print('Part 1:', highest)
def part_2(passes):
all_ids = [find_seat_id(boarding_pass) for boarding_pass in passes]
all_ids.sort()
for i in range(len(all_ids)):
if all_ids[i] + 1 != all_ids[i + 1]:
print('part 2:', all_ids[i] + 1)
return
if __name__ == "__main__":
filename = './input.txt'
text = open(filename)
lines = text.read().split('\n')
part_1(lines)
part_2(lines) | [
"jpclark6@gmail.com"
] | jpclark6@gmail.com |
488a72882522860ee0646184122fed08d6f759bc | 5491c6305d3ce2000ad5a509b845c4e9846f6140 | /lle_isomap_plot.py | 9a81c278df20a4b52388c22ec32106fa2873743c | [] | no_license | Eason-Sun/Effects-Of-Different-Dimensionality-Reductions-On-Image-Data | 468bcaee38a8fbef53503959ebe28451ccf4a6c3 | 9697462eca9834b7f1ec01d48e30692866305854 | refs/heads/master | 2020-07-09T06:37:10.524241 | 2019-08-23T02:58:34 | 2019-08-23T02:58:34 | 203,907,911 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,986 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import manifold
df = pd.read_csv('DataB.csv', index_col=0)
class_matrix = df['gnd'].values
# Slice the data matrix so that only samples in class '3' are taken.
digit_3_df = df.loc[df['gnd'] == 3].copy()
digit_3_df.drop(columns=['gnd'], inplace=True)
digit_3_matrix = digit_3_df.values
df.drop(columns=['gnd'], inplace=True)
data_matrix = df.astype(float).values
n_neighbors = 5 # Set the number of nearest neighbour to 5.
# Plot the image based on the first and second components of LLE or ISOMAP.
def plot(X, title=None, min_dist=4e-3):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min) # Min-Max Normalization.
plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(X.shape[0]):
# Discard those samples that appear too near in the figure.
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < min_dist:
continue
shown_images = np.r_[shown_images, [X[i]]]
# Map each image to their corresponding coordinates provided by the first 2 components of the projected matrix.
imagebox = offsetbox.AnnotationBbox(offsetbox.OffsetImage(digit_3_matrix[i].reshape((28, 28)), cmap=plt.cm.gray_r), X[i],
frameon=False)
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
# Apply LLE to the dataset in class '3'.
lle = manifold.LocallyLinearEmbedding(n_neighbors, n_components=4, method='standard')
lle.fit(digit_3_matrix)
X_lle = lle.transform(digit_3_matrix)
plot(X_lle[:, 0:2], "LLE Projection", 3e-3)
# Apply ISOMAP to the dataset in class '3'.
iso = manifold.Isomap(n_neighbors, n_components=4)
iso.fit(digit_3_matrix)
X_iso = iso.transform(digit_3_matrix)
plot(X_iso[:, 0:2], "ISOMAP Projection")
plt.show()
| [
"easonsyx@gmail.com"
] | easonsyx@gmail.com |
ad91cb706816e164109bb744100c0b735da2602a | b8584fd8c5d6f7c44e92aa45c82d63ec79f6ec01 | /main.py | 149d3b22d7efaaf4d4d783f07c8e87b99ea7ff99 | [] | no_license | qcwthu/cnn-relation-extraction-with-ranking-loss | 24b2c37881f5cf6793a2a031df12aa90e98f052a | 3a73c445d3ee9bae6693e60188e7aeb5eb4c7c60 | refs/heads/master | 2022-01-13T22:36:36.918461 | 2019-05-22T08:24:50 | 2019-05-22T08:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,253 | py | import torch
import pandas as pd
import numpy as np
import csv
import spacy
import os
import re
from torchtext import data, datasets
import argparse
import train as trains
import model
import datetime
print('parse arguments.')
parser = argparse.ArgumentParser(description='CRCNN text classificer')
# learning
parser.add_argument('-lr', type=float, default=0.025, help='initial learning rate [default: 0.001]')
parser.add_argument('-epochs', type=int, default=300, help='number of epochs for train [default: 16]')
parser.add_argument('-batch-size', type=int, default=100, help='batch size for training [default: 256]')
parser.add_argument('-log-interval', type=int, default=100, help='how many steps to wait before logging training status [default: 500]')
parser.add_argument('-dev-interval', type=int, default=300, help='how many steps to wait before testing [default: 100]')
parser.add_argument('-save-interval', type=int, default=500, help='how many steps to wait before saving [default:500]')
parser.add_argument('-save-dir', type=str, default='snapshot', help='where to save the snapshot')
parser.add_argument('-early-stop', type=int, default=2000, help='iteration numbers to stop without performance increasing')
parser.add_argument('-save-best', type=bool, default=True, help='whether to save when get best performance')
# data
parser.add_argument('-shuffle', action='store_true', default=False, help='shuffle the data every epoch')
# model
parser.add_argument('-dropout', type=float, default=0.75, help='the probability for dropout [default: 0.5]')
parser.add_argument('-max-norm', type=float, default=0, help='l2 constraint of parameters [default: 3.0]')
parser.add_argument('-embed-dim', type=int, default=300, help='number of embedding dimension [default: 128]')
parser.add_argument('-kernel-num', type=int, default=500, help='number of each kind of kernel')
parser.add_argument('-kernel-sizes', type=str, default='2,3,4,5', help='comma-separated kernel size to use for convolution')
parser.add_argument('-static', action='store_true', default=False, help='fix the embedding')
# device
parser.add_argument('-device', type=int, default=2, help='device to use for iterate data, -1 mean cpu [default: -1]')
# option
parser.add_argument('-snapshot', type=str, default=None, help='filename of model snapshot [default: None]')
parser.add_argument('-test', action='store_true', default=False, help='train or test')
args = parser.parse_args()
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
print("\t{}={}".format(attr.upper(),value))
args.sent_len = 90
args.class_num = 19
args.pos_dim = 90
args.mPos = 2.5
args.mNeg = 0.5
args.gamma = 0.05
# args.device = torch.device(args.device)
args.kernel_sizes = [int(k) for k in args.kernel_sizes.split(',')]
nlp = spacy.load('en_core_web_sm')
def tokenizer(text): # create a tokenizer function
# 返回 a list of <class 'spacy.tokens.token.Token'>
return [tok.text for tok in nlp.tokenizer(text)]
def emb_tokenizer(l):
r = [y for x in eval(l) for y in x]
return r
TEXT = data.Field(sequential=True, tokenize=tokenizer,fix_length=args.sent_len)
LABEL = data.Field(sequential=False, unk_token='OTHER')
POS_EMB = data.Field(sequential=True,unk_token=0,tokenize=emb_tokenizer,use_vocab=False,pad_token=0,fix_length=2*args.sent_len)
print('loading data...')
train,valid,test = data.TabularDataset.splits(path='../data/SemEval2010_task8_all_data',
train='SemEval2010_task8_training/TRAIN_FILE_SUB.CSV',
validation='SemEval2010_task8_training/VALID_FILE.CSV',
test='SemEval2010_task8_testing_keys/TEST_FILE_FULL.CSV',
format='csv',
skip_header=True,csv_reader_params={'delimiter':'\t'},
fields=[('relation',LABEL),('sentence',TEXT),('pos_embed',POS_EMB)])
TEXT.build_vocab(train,vectors='glove.6B.300d')
LABEL.build_vocab(train)
args.vocab = TEXT.vocab
args.cuda = torch.cuda.is_available()
# args.cuda = False
args.save_dir = os.path.join(args.save_dir,datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
train_iter, val_iter, test_iter = data.Iterator.splits((train,valid,test),
batch_sizes=(args.batch_size,len(valid),len(test)),
device=args.device,
sort_key=lambda x: len(x.sentence),
# sort_within_batch=False,
repeat=False)
print('build model...')
cnn = model.CRCNN(args)
if args.snapshot is not None:
print('\nLoding model from {}...'.format(args.snapshot))
cnn.load_state_dict(torch.load(args.snapshot))
if args.cuda:
torch.cuda.set_device(args.device)
cnn = cnn.cuda()
if args.test:
try:
trains.eval(test_iter,cnn,args)
except Exception as e:
print("\n test wrong.")
else:
trains.train(train_iter,val_iter,cnn,args) | [
"xisikongji2354@gmail.com"
] | xisikongji2354@gmail.com |
b012696da3f84d3dfe7b35af80765087353bc29f | a8aa0624c8367d77079366a16671793978f47716 | /traine_genetic.py | 3542b9d4d11f49c39ac4edfad0392a187d79f0af | [] | no_license | Arty-Facts/battleships | 35a976dddf2224999ea9d99663a24479468ccaee | 5c7ede1d5dd49cd5925d4e3715728222411babfa | refs/heads/master | 2022-11-22T06:01:26.600460 | 2020-03-02T07:10:43 | 2020-03-02T07:10:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,987 | py | from ML.neural_tagger_trainer import train_neural
from ML.nn_agent import NN_Agent
from ML.training_agent import Train
from ML.evaluvate import bench
from lib.world import World
from lib.state import State
from lib.ship import Ship
from config import *
from time import time
from random import shuffle
from pathlib import Path
import torch
PATH
def train(model):
start = time()
name_gen = Path(model).name.split(".")
gen = 1
if len(name_gen) == 1:
name = name_gen[0]
else:
name, gen = name_gen
print("Started Taining")
print("Models start at:", name, "Genaration", gen)
network , optimizer = train_neural(Train ,State , World, Ship, n=TRAINING_ROUNDS, model=model)
print("Training Done in {:.2f} s".format((time() - start)))
score = bench(network, BENCHMARK)
print("Resulting score:", score)
torch.save({
'model_state_dict': network.model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, f"{PATH}/{int(score)}.{int(gen)+1}")
def main():
while True:
path = Path(PATH)
models = [str(x.name).split('.') + [x] for x in path.iterdir() if x.is_file()]
models = sorted(models)
for m in models:
print(m)
if len(models) == 0:
train("")
else:
for s,g, m in models:
train(m)
models = sorted([str(x.name).split('.') + [x] for x in path.iterdir() if x.is_file()])
removed = 0
if len(models) > GENARATIONS*1.5:
for i in range(1, len(models)):
if models[i-1][0] == models[i][0]:
models[i-1][2].unlink()
removed += 1
if not (len(models) - removed > GENARATIONS*1.5):
break
if len(models) > GENARATIONS:
for s, g , m in models[GENARATIONS+1:]:
m.unlink()
if __name__ == "__main__":
import sys
main() | [
"artal938@student.liu.se"
] | artal938@student.liu.se |
9bde88a2b2416002fb786ff4cd29779e70a3c2e9 | b5be3680c2b3404ec9c6156466b7c276bff0cf01 | /salon/admin.py | 7596574aa4e46d3d86c1088f779911155c0efef1 | [] | no_license | aselya0012777/beauty-salon | e2cf861055fd10d6a6f29fcc8306c4be0199a117 | 86abc8c3c90b494bdd16275051cdab8b4b2493fb | refs/heads/master | 2023-07-13T22:09:16.991190 | 2021-09-04T11:37:11 | 2021-09-04T11:37:11 | 370,702,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | from django.contrib import admin
from .models import EmployeeSchedule, Salon, SalonServices,Employee,EmployeeSchedule
@admin.register(Salon)
class SalonAdmin(admin.ModelAdmin):
list_display = ['name', 'address','number','rating']
list_filter = ('name','address', 'rating')
@admin.register(SalonServices)
class SalonServicesAdmin(admin.ModelAdmin):
list_display = ['salon','name','price','duration']
list_filter = ('salon', 'name','price','duration')
@admin.register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ['name']
list_filter = ('name', 'service')
@admin.register(EmployeeSchedule)
class EmployeeScheduleAdmin(admin.ModelAdmin):
list_display = ['employee','date','start_hour','end_hour']
list_filter = ('employee','date','start_hour','end_hour')
| [
"aselya.0012@yahoo.com"
] | aselya.0012@yahoo.com |
68630c5fbfab57458f2f69b296ec886ae0ac252a | 869bbe81819d15a5a1bfdf591e909d15d41eba33 | /src/nlp_utils/simple_tokenizer.py | c357607198bd69cc76fed14c4ef35a3a5f1949c0 | [] | no_license | LindaMoreau08/nlp_utils | c6b5899b46760c1306c8bf01028d46444fb81d0e | b32582343b764a94af6630ecde196e5e27773c2c | refs/heads/main | 2023-07-02T06:08:42.412320 | 2021-08-04T08:09:42 | 2021-08-04T08:09:42 | 392,092,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,960 | py | # -*- coding: utf-8 -*-
# TODO: turn this into a class and implement it correctly!
import numpy
import regex
import stopwords
from w3lib.html import replace_entities
lang_stopwords = stopwords.get_stopwords('en')
NORM_prefix = 'norm_'
# TOKEN TYPES (TT_)l
TT_email = 'email'
TT_emoji = 'emoji'
TT_hashtag = 'hashtag'
TT_normtag = 'norm_tag'
TT_number = 'number'
TT_punct = 'punct'
TT_quotation = 'quotation'
TT_space = 'space'
TT_symbol = 'symbol'
TT_url = 'url'
TT_username = 'username'
TT_unk = 'unk'
TT_word = 'word'
# TOKEN SUBTYPES
TS_emoticon = 'emoticon'
TS_emoji = 'emoji'
TS_flag = 'flag'
TS_ordinal = 'ordinal'
TS_cardinal = 'cardinal'
TS_money = 'monetary'
TS_acronym = 'acronym'
TS_abbreviation = 'abbreviation'
# single codepoint flags
Base_flags = ['⚐', '⚑', '⛳', '⛿', '🎌', '🏁', '🏱', '🏳', '🏴', '🚩']
Space_reguex = r'[\p{Z}\p{C}]+'
# TODO: add from unicode chart
Punct_pairs = {")": "(", "]": "[", "}": "{", "»": "«", ">": "<", "´": "`", "/": "\\"}
Char_subs = {"0": 'o', "1": 'i', "3": 'e', "4": 'a', "5": "s", "7": 't', "8": 'a', "9": 'g', "@": 'a', "$": 's', "!": 'i'}
emoji = r'[\p{Emoji=Yes}&&[^\u00A9\u00Ae\u0030-\u0039\u203c\u2049\u2122]]'
emoticon_1 = r'(?:[:;][*\-]?[Ddpb\)\(\}\{o\]])|(?:[Ddpb\)\(\}\{o\]][*\-]?[:;])'
emoticon_2 = r'[03578BbPpOODdXx×VvÞþ][\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]+[03578BbPpOODdXx×VvÞþ]?0?3?'
emoticon_3 = r'[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]+[\p{L}\p{N}]{1,2}[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]*[oO0.]?3?[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]*'
patterns = {
'html_codes': r'((?:&(?:#?[0-9a-f]+|[a-z]+);)',
'twitter_usernames': r'(?:(?<![\p{L}\p{N}])@+[\w_]+)',
'hashtags': r'(?:\#+[\w_]+[\w\'_\-]*[\w_]+)',
'norm_tags': r'(?:norm__?[_A-Z]+)',
'tags': r'(?:<\w+\/>)',
'email': r'(?:\b[\p{L}\p{N}][\p{L}\p{N}._%+\!-]+@(?:[A-Za-z0-9.\-]+\.[A-Za-z]{2,4}|\[?\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\]?))',
'url': r'(?:(?:http|ftp|file|mailto|data|irc)s?:\/\/\S+)|(?:www2?\.\S+)|(?:\S+\.(?:com|edu|gov|org|info|biz|mil|net|name|museum|[a-z]{2}\b)(?:\/\S+)?)',
'acronyms': r'(?:\b\p{L}{1,3}\.(?:\p{L}{1,3}\.)+(?:\p{L}\b)?)|(?:\b[\p{L}&&[^aeiouAEIOU]]{1,3}\.)|(?:\b[aeiouAEIOU][\p{L}&&[^aeiouAEIOU]]{1,3}\.)|(?:\b\p{L}\.)',
'slash_abbreviation': r'(?:\p{L}\/\p{L}(?:\/\p{L})*)',
'emoticon_1': r'(?:(?<![\p{L}\p{N}])[03578BbPpOODdXx×VvÞþ][\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]+[03578BbPpOODdXx×VvÞþ]?0?3?(?![\p{L}\p{N}\p{P}\p{S}]))',
'emoticon_2': r'(?:(?<![\p{L}\p{N}])[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]+[\p{L}\p{N}]{1,2}[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{Lm}\p{InBOX_DRAWING}]*[oO0.]?3?[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}\p{InBOX_DRAWING}]*(?![\p{L}\p{N}\p{P}]))',
'ordinals': r'(?:\b[0-9]*(?:1st|2nd|3rd|11th|12th|13th|[4-9]th)\b)',
'masked_words_numalpha': r'(?:\b\p{N}{1,2}\p{L}[\p{L}\p{N}\p{M}@$#*+✞]*)',
'digits': r'(?:[.+\-]?\p{N}+(?:[.,\-:\/]*\p{N}+)*)',
'contractions': r'(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))',
'taboo_punct': r'(?:[$*@][\p{L}\p{M}\p{N}@$#*+✞]+(?:[\-\'’,.][\p{L}\p{M}\p{N}]+)*)',
'words': r'(?:\p{L}[\p{L}\p{M}\p{N}@$#*+✞]*(?:[\-\'’][\p{L}\p{M}\p{N}@$#*+✞]+)*)',
'punct_repeat': r'(?:(?:\.\.+|--+|__+|~~+|[!?][!?]+|\*\*+|//+|##+)[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}°\p{InBOX_DRAWING}\p{InGEOMETRIC_SHAPES}]*)',
'punct': r'(?:[\p{P}\p{M}\p{Sm}\p{Sc}\p{Sk}°\p{InBOX_DRAWING}\p{InGEOMETRIC_SHAPES}]+)',
'key_caps': r'(?:[#\*0-9]\ufe0f?\u20e3)',
'flags': r'(?:[\U0001F1E6-\U0001F1FF]{2})|(?:[⚐⚑⛳⛿🎌🏁🏱🏳🏴🚩🏴](?:[\u200d\uFE0F]{1,2}\p{So}\uFE0F?)?)',
'emojis': r'(?:'+emoji+r'[\U0001f3fb-\U0001f3ff]?\uFE0F?(?:\u200D'+emoji+r'[\U0001f3fb-\U0001f3ff]?\uFE0F?)*)',
'symbols': r'(?:\p{S}+)',
'space': r'(?:[\p{Z}\p{C}]))'
}
def join_patterns(ignore_emoticons=False, ignore_taboo=False, split_punct=True, split_repeated_punct=False):
token_reguex = ''
patterns_added = 0
for pattern_name, pattern in patterns.items():
pattern_name = pattern_name
if split_repeated_punct and pattern_name == 'punct_repeat':
continue
if split_punct and pattern_name == 'punct':
pattern = r'(?:\p{P})'
if ignore_emoticons and pattern_name.find('emoticon') >= 0:
continue
if ignore_taboo and pattern_name.find('taboo') >= 0:
continue
if patterns_added > 0:
token_reguex += '|'
token_reguex += pattern
patterns_added += 1
return regex.compile(token_reguex, regex.V1)
big_regex = join_patterns()
def tokenize(text):
return regex.findall(big_regex, text)
# Assemble token patterns into various regexes for use by the token tests.
NORM_TAG_REGEX = regex.compile(patterns['norm_tags'], regex.V1)
IS_NORM_TAG_REGEX = regex.compile('^norm_(?<norm_opt>[a-zA-Z_]+)$', regex.V1)
TOKEN_REGEX = big_regex
EMAIL_REGEX = regex.compile(patterns['email'], regex.V1) # contains email
IS_EMAIL_REGEX = regex.compile('^' + patterns['email'] + '$', regex.V1)
URL_REGEX = regex.compile(patterns['url'], regex.IGNORECASE|regex.V1) # contains url
IS_URL_REGEX = regex.compile('^' + patterns['url'] + '$', regex.IGNORECASE|regex.V1)
IS_DIGITS_REGEX = regex.compile("^" + patterns['digits'] + "$", regex.V1)
IS_MONEY_REGEX = regex.compile(r'^\p{Sc}' + patterns['digits'] + "$", regex.V1)
DIGITS_REGEX = IS_DIGITS_REGEX # TODO: for Salton, update when Salton changes have been pushed
IS_ORDINAL_REGEX = regex.compile("^" + patterns['ordinals'] + "$", regex.V1)
IS_SPACE_REGEX = regex.compile('^' + Space_reguex + '$', regex.V1)
SPACE_REGEX_SPLIT = regex.compile(Space_reguex, regex.V1)
SPACE_REGEX_KEEP = regex.compile('(' + Space_reguex + ')', regex.V1)
IS_PUNCT_REGEX = regex.compile(r'^\p{P}+$', regex.V1) # sequence of punctuation marks
IS_SYMBOL_REGEX = regex.compile("^" + patterns['symbols'] + "$", regex.V1)
IS_WORD_REGEX = regex.compile("^" + patterns['words'] + "$", regex.V1)
IS_ACRONYM_REGEX = regex.compile("^" + patterns['acronyms'] + "$", regex.V1)
STRIP_REGEX = r'^(?:[\s"\']|\\")+|(?:[\s"\']|\\")+$' # leading or trailing quotes (possibly escaped) and spaces
QUOTATION_REGEX = r'["“”][^"“”]+?["“”]'
IS_QUOTATION_REGEX = regex.compile(r'^["“”«][^"“”«»]+?["“”»]$', regex.V1)
USERNAME_REGEX = regex.compile(patterns['twitter_usernames'], regex.V1)
IS_USERNAME_REGEX = regex.compile('^' + patterns['twitter_usernames'] + '$', regex.V1)
HASHTAG_REGEX = regex.compile(patterns['hashtags'], regex.V1)
IS_HASHTAG_REGEX = regex.compile('^' + patterns['hashtags'] + '$', regex.V1)
EMOTICON_REGEX = regex.compile(patterns['emoticon_1'] + '|' + patterns['emoticon_2'], regex.V1)
IS_EMOTICON_REGEX = regex.compile('^((?:' + emoticon_1 + ')|(?:' + emoticon_2 + ')|(?:' + emoticon_3 + '))$', regex.V1)
EMOJI_REGEX = regex.compile(patterns['key_caps'] + '|' + patterns['flags'] + '|' + patterns['emojis'], regex.V1)
IS_EMOJI_REGEX = regex.compile(
'^' + patterns['key_caps'] + '|' + patterns['flags'] + '|' + patterns['emojis'] + '$', regex.V1)
CONTRACTION_REGEX = regex.compile(r'(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))', regex.V1)
IS_CONTRACTION_REGEX = regex.compile(r'^(?:(?:n[\'’]t\b)|(?:[\'’](?:[sdm]|(?:ld)|(?:ll)|(?:re)|(?:ve)|(?:nt))\b))$', regex.V1)
CONTRACTION_SPLIT_REGEX = regex.compile(
r'(?:^(?<word>\p{L}[\p{L}\p{M}]*?)(?<contraction>(?:n[\'’]t)|(?:[\'’](?:s|d|m|ld|ll|re|ve|nt)))$)', regex.V1)
FLAG_REGEX = regex.compile(patterns['flags'], regex.V1)
IS_FLAG_REGEX = regex.compile('^' + patterns['flags'] + '$', regex.V1)
KEY_CAP_REGEX = regex.compile(patterns['flags'], regex.V1)
IS_KEY_CAP_REGEX = regex.compile('^' + patterns['flags'] + '$', regex.V1)
IS_SLASH_ABBREVIATION_REGEX = regex.compile('^' + patterns['slash_abbreviation'] + '$', regex.V1)
# TODO: this is not terribly efficient to scan with so many regexes
def pre_process_text(text, options):
if options['norm_codes']:
text = replace_entities(text)
if options['norm_quotations']:
text = regex.sub(QUOTATION_REGEX, "#{NORM_QUOTATION}", text)
if options['norm_space'] and options['keep_space']:
text = regex.sub(SPACE_REGEX_SPLIT, ' ', text)
return text
def regex_tokenize(text, keep_space=False, norm_space=False):
rgx_tokenize(text, keep_space=keep_space, norm_space=norm_space)
def rgx_tokenize(text, keep_space=False, norm_space=False, pre_process=True, ignore_emoticons=False, split_punct=False,
split_repeated_punct=False):
if pre_process:
text = pre_process_text(text, {keep_space: keep_space, norm_space: norm_space})
tokenization_reguex = join_patterns(ignore_emoticons=ignore_emoticons, split_punct=split_punct,
split_repeated_punct=split_repeated_punct )
if keep_space:
tokens = text.scan(tokenization_reguex)
else:
tokens = text.split(SPACE_REGEX_SPLIT)
tokens = map(lambda x: regex.findall(tokenization_reguex, x), tokens)
tokens = numpy.array(tokens).flatten()
# tokens = retokenize(tokens, options)
return tokens
def retokenize(tokens, opts):
tokenization_reguex = join_patterns(ignore_emoticons=True, ignore_taboo=True, split_punct=opts['split_punct'],
split_repeated_punct=opts['split_repeated_punct']) #if ignore_emoticons
retokenized = []
for token in tokens:
token_type = get_token_type(token)
if is_money(token):
retokenized.append(token.scan(tokenization_reguex))
elif is_taboo(token) or token_type[0] != TT_unk:
retokenized.append(token)
else:
retokenized.append(token.scan(tokenization_reguex))
new_tokens = numpy.array(retokenized).flatten()
return new_tokens
# TODO: Port taboo list
def is_taboo(text):
if text in ['shit', 'hell']:
return True
return False
def is_stopword(text):
return text.strip().lower() in lang_stopwords
# TODO: implement this loading elsewhere
def is_lang_stopword(text, lang='en'):
if lang != 'en':
the_stopwords = stopwords.get_stopwords(lang)
return is_stopword(text)
# TODO: implement features
#def has_feature(text, feature, language):
# my_featurizer.has_feature?(text, feature, language)
def extract_emails(text):
return regex.findall(EMAIL_REGEX, text)
def contains_email(text):
return regex.search(EMAIL_REGEX, text)
def is_email(text):
return regex.match(IS_EMAIL_REGEX, text.strip()) is not None
def extract_urls(text):
return regex.findall(URL_REGEX, text)
def contains_url(text):
return regex.search(URL_REGEX, text)
def is_url(text):
return regex.match(IS_URL_REGEX, text.strip())
def extract_hashtags(text):
return regex.findall(HASHTAG_REGEX, text)
def contains_hashtag(text):
return regex.search(HASHTAG_REGEX, text)
def is_hashtag(text):
return regex.match(IS_HASHTAG_REGEX, text.strip())
def extract_usernames(text):
return regex.findall(USERNAME_REGEX, text)
def contains_username(text):
return regex.search(USERNAME_REGEX, text)
def is_username(text):
return regex.match(IS_USERNAME_REGEX, text.strip())
def extract_emojis(text):
return regex.findall(EMOJI_REGEX, text)
def contains_emoji(text):
return regex.search(EMOJI_REGEX, text)
def is_emoji(text):
return regex.match(IS_EMOJI_REGEX, text.strip())
def extract_emoticons(text):
return regex.findall(EMOTICON_REGEX, text)
def contains_emoticon(text):
return regex.search(EMOTICON_REGEX, text)
# TODO: reimplement emoticon file reading with detection of unknown emoticons, etc.
def is_emoticon(text):
return regex.match(IS_EMOTICON_REGEX, text.strip())
def is_emoji_or_emoticon(text):
return is_emoji(text) or is_emoticon(text)
def is_digits(text):
return regex.match(IS_DIGITS_REGEX, text.strip())
def is_money(text):
return regex.match(IS_MONEY_REGEX, text.strip())
def is_ordinal(text):
return regex.match(IS_ORDINAL_REGEX, text.strip())
def is_space(text):
return regex.match(IS_SPACE_REGEX, text)
def is_punct(text):
return regex.match(IS_PUNCT_REGEX, text)
def is_symbol(text):
return regex.match(IS_SYMBOL_REGEX, text)
def is_normtag(text):
return regex.match(IS_NORM_TAG_REGEX, text)
def is_word(text):
return regex.match(IS_WORD_REGEX, text)
def is_acronym(text):
return regex.match(IS_ACRONYM_REGEX, text)
def is_slash_abbreviation(text):
return regex.match(IS_SLASH_ABBREVIATION_REGEX, text)
def is_quotation(text):
text = text.strip().lower()
return text == "#{NORM_prefix}#{TT_quotation}".lower() or regex.match(IS_QUOTATION_REGEX, text)
def has_contraction(text):
return regex.search(CONTRACTION_REGEX, text)
def is_contraction(text):
return has_contraction(text)
def is_flag(text):
return text.strip() in Base_flags or regex.match(IS_FLAG_REGEX, text.strip())
def all_symbol_or_punct(text, ignore_space=True):
if ignore_space:
text = regex.sub(r'\p{Z}', '', text)
return regex.match(r'^[\p{P}\p{S}\p{M}\p{Lm}]+$', text)
def all_emoji_chars(text, ignore_space=True):
if ignore_space:
text = regex.sub(r'\p{Z}', '', text)
return regex.match(r'^[へ\p{P}\p{S}\p{M}\p{Lm}\p{In_BOX_DRAWING}\p{In_GEOMETRIC_SHAPES}]+$', text)
# TODO: consider whether \p{Lm} belongs
def symbol_mark_punct_count(text):
match_count = 0
matches = regex.findall(r'[\p{P}\p{M}\p{S}]', text)
if matches:
match_count = len(matches)
return match_count
# get the token type and subtype, if applicable
def get_token_type(token_text):
if is_space(token_text):
return [TT_space, '']
if is_quotation(token_text):
return [TT_quotation, '']
if is_normtag(token_text):
return [TT_normtag, '']
if is_email(token_text):
return [TT_email, '']
if is_url(token_text):
return [TT_url, '']
if is_acronym(token_text):
return [TT_word, TS_acronym]
if is_slash_abbreviation(token_text):
return [TT_word, TS_abbreviation]
if is_word(token_text):
return [TT_word, '']
if is_hashtag(token_text):
return [TT_hashtag, '']
if is_ordinal(token_text):
return [TT_number, TS_ordinal]
if is_digits(token_text):
return [TT_number, TS_cardinal]
if is_money(token_text):
return [TT_number, TS_money]
if is_username(token_text):
return [TT_username, '']
if is_flag(token_text):
return [TT_emoji, TS_flag]
if is_emoji(token_text):
return [TT_emoji, TS_emoji]
if is_emoticon(token_text):
return [TT_emoji, TS_emoticon]
if is_punct(token_text):
return [TT_punct, '']
if is_symbol(token_text):
return [TT_symbol, '']
return [TT_unk, '']
| [
"lindamoreau08@gmail.com"
] | lindamoreau08@gmail.com |
dc2b280772e197fed6525ef2bb9002c0ee5025f7 | 6e7463cfc51a2b190a42ea45b416cdcade6cffe8 | /Examenes/E1.3.Final/main.py | c993af08190cc29a774e5422c23db332c32ca9cc | [] | no_license | JoanAndoni/CyPS_2018 | f8b510c0c53b6426831678869549abaad169c056 | 945fe2f335f05ecaf829a53ea25d670c8e8c0bc6 | refs/heads/master | 2020-03-25T16:02:39.900345 | 2018-11-20T23:41:29 | 2018-11-20T23:41:29 | 143,912,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | # Joan Andoni Gonzalez Rioz
# A00569929
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import csv
driver = webdriver.Chrome()
driver.get("http://fiware01.cem.itesm.mx:3000/~certificatec/pronosticos")
driver.find_element_by_id("login_username").send_keys("ariel.garcia@itesm.mx")
driver.find_element_by_id("login_password").send_keys("1234")
driver.find_element_by_id("login_submit").click()
sleep(1)
driver.find_element_by_xpath('//*[@id="nav"]/div/ul/li[7]/a').click()
sleep(1)
driver.find_element_by_xpath('//*[@id="sidebar"]/ul[1]/li[5]/a').click()
sleep(1)
table = len(driver.find_elements_by_tag_name("tr"))+1
alumnos = [['Matricula', 'Nombre', 'Materia', 'Codigo']]
for row in xrange(1,table):
matricula = driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[1]/a').text
nombre = driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[2]/a').text
#print(matricula)
#print(nombre)
driver.find_element_by_xpath('//*[@id="candidatos_container"]/tbody/tr['+str(row)+']/td[1]/a').click()
sleep(1)
driver.find_element_by_xpath('//*[@id="sidebar"]/ul[1]/li[3]/a').click()
sleep(1)
tableMaterias = len(driver.find_elements_by_tag_name("tr"))+1
for row2 in xrange(1,tableMaterias):
codigo = driver.find_element_by_xpath('//*[@id="pronosticos_container"]/tbody/tr['+str(row2)+']/td[1]').text
nombreMateria = driver.find_element_by_xpath('//*[@id="pronosticos_container"]/tbody/tr['+str(row2)+']/td[2]').text
#print(codigo)
#print(nombreMateria)
sleep(1)
alumnos.append([matricula.encode('utf-8'), nombre.encode('utf-8'), nombreMateria.encode('utf-8'), codigo.encode('utf-8')])
driver.back()
driver.back()
sleep(1)
#print(alumnos)
with open('pronosticos.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
for col in alumnos:
writer.writerow(col)
csvFile.close()
| [
"mcr_joan@hotmail.com"
] | mcr_joan@hotmail.com |
52258e928c4847ff0ce541f546f10238923ecc97 | 086c483bec404a0aaafdd81dd2eb20760d8b6d90 | /prueba_hilos.py | 0e94a7bdfda78d5ca92d7386c99887699f9d4b22 | [] | no_license | MariaFernandaG/raspberry | 9faa8eaa23c9df2597f22e09d5cd825e3d2614d5 | 176ce4b26ef01007d92740ecacbf47b02844f808 | refs/heads/main | 2023-07-17T12:29:13.281874 | 2021-09-02T20:49:49 | 2021-09-02T20:49:49 | 367,141,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,790 | py | #OXIMETRO, PESA, TEMPERATURA Y ALTURA
#lIBRERIAS
import RPi.GPIO as GPIO
import time
from time import sleep
import serial
import threading
#TEMPERATURA
import board
import adafruit_mlx90614
GPIO.setwarnings(False)
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_TRIGGER = 18
GPIO_ECHO = 24
GPIO_TRIGGER_AL = 16
GPIO_ECHO_AL = 25
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(GPIO_TRIGGER_AL, GPIO.OUT)
GPIO.setup(GPIO_ECHO_AL, GPIO.IN)
#VARIABLES ALTURA
global altura
altura = 0.0
#VARIABLES TEMPERATURA
global promedioTemperatura
promedioTemperatura = 0.0
#VARIABLES PESO
global flag_peso
#VARIABLES OXIGENO
flag_ox = 0
flag_peso = 0
#SERIAL
global ser
ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=1)
#EQUIVALENTE A MILLIS() DE ARDUINO
millis = lambda: int(round(time.time() * 1000))
def readTemperatura():
global promedioTemperatura
promedioTemperatura = 0.0
i2c = board.I2C()
mlx = adafruit_mlx90614.MLX90614(i2c)
temp = 0.0
tLow = 0.0
tHigh = 0.0
TA = 0.0
TF = 0.0
TCore = 0.0
varTemp = 0.007358834
varProcess = 1e-9
Pc = 0.0
G = 0.0
P = 1.0
Xp = 0.0
Zp = 0.0
Xe = 0.0
for i in range(10):
temp = mlx.object_temperature
while temp < 0 or temp > 45:
print("Sensor no responde")
#mlx.writeEmissivity(0.98);
time.sleep(100)
temp = mlx.object_temperature
print(temp)
#FILTRO KALMAN
Pc = P + varProcess
G = Pc / (Pc + varTemp)
P = (1 - G) * Pc
Xp = Xe
Zp = Xp
Xe = G * (temp - Zp) + Xp
time.sleep(0.01) #delay de 10ms
TA = mlx.ambient_temperature
if TA <= 25:
tLow = 32.66 + 0.186 * (TA - 25)
tHigh = 34.84 + 0.148 * (TA - 25)
if TA > 25:
tLow = 32.66 + 0.086 * (TA - 25)
tHigh = 34.84 + 0.100 * (TA - 25)
TF = Xe
if TF < tLow:
TCore = 36.3 + (0.551658273 + 0.021525068 * TA) * (TF - tLow)
if tLow < TF and TF < tHigh:
TCore = 36.3 + 0.5 / (tHigh - tLow) * (TF - tLow)
if TF > tHigh:
TCore = 36.8 + (0.829320618 + 0.002364434 * TA) * (TF - tHigh)
promedioTemperatura = TCore
def medicionTemperatura():
distance = 0.0;
distanceValid = 0
t_inicio = millis()
while distanceValid == 0:
t_final = millis()
t_total = t_final - t_inicio
getAltura(1) #utilizar el sensor 1
distance = altura
#print(t_total)
if distance > 4 and distance <= 6:
distanceValid = 1
else:
distanceValid = 0
if distanceValid == 1:
readTemperatura()
if promedioTemperatura > 34 and promedioTemperatura < 42.1:
print("Temperatura: ",promedioTemperatura)
else:
print("Temperatura fuera de los límites: ",promedioTemperatura)
#print(distance)
distanceValid = 2
if t_total > 12000:
print("No se realizó la medición")
distanceValid = 2
def readAltura(n_sensor):
#TEMPERATURA
if n_sensor == 1:
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
#distance = TimeElapsed * 0.01715
#ALTURA
if n_sensor == 2:
# set Trigger to HIGH
GPIO.output(GPIO_TRIGGER_AL, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER_AL, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(GPIO_ECHO_AL) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(GPIO_ECHO_AL) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
distance = (TimeElapsed * 34300) / 2
#distance = TimeElapsed * 0.01715
return distance
def getAltura(n_sensor):
global altura
varDistance = 0.0567274773869351 #variance determined using excel and reading samples of raw sensor data
varProcessD = 1e-8
temp = 0.0
prom = 0
PcD = 0.0
GD = 0.0
PD = 1.0
XpD = 0.0
ZpD = 0.0
XeD = 0.0
flag_alt = 0
while flag_alt == 0:
for i in range(25):
temp = round(readAltura(n_sensor))
prom += temp
#Filtro Kalman
PcD = PD + varProcessD
GD = PcD / (PcD + varDistance)
PD = (1 - GD) * PcD
XpD = XeD
ZpD = XpD
XeD = GD * (temp - ZpD) + XpD
time.sleep(0.01) #delay de 10ms
prom /= 25
prom += 1
altura = (XeD) + 1
#print(altura)
if altura > 250 or altura < 0:
flag_alt = 0
#print("fuera límite: ", altura)
else:
flag_alt = 1
def medicionAltura():
global altura
menu = 0
if menu == 0:
getAltura(2) #utilizar el sensor 2
refaltura = altura
print("Referencia: ", refaltura)
menu = 1
while menu == 1:
print("Desea medir altura y/n")
entrada = input()
if entrada == "y":
getAltura(2)
altura = refaltura - altura
print("Altura: ",altura)
#menu = 0
elif entrada == "n":
print("No se midió altura")
menu = 0
else:
print("Instrucción no válida")
def lectura_calibracion():
global flag_peso
global ser
while True:
if flag_peso == 1:
enviar = input()
if enviar == "+":
ser.write(b"+\n")
#print("mas")
elif enviar == "-":
ser.write(b"-\n")
#print("menos")
elif enviar == "salir":
ser.write(b"salir\n")
flag_peso = 0
#print("salir")
else:
print("comando inválido")
if __name__ == '__main__':
try:
ser.flush()
#e = threading.Event()
#CREACIÓN DE HILO
#hilo1 = threading.Thread(target=lectura_calibracion, args=(e,))
hilo1 = threading.Thread(target=lectura_calibracion)
hilo1.start()
while True:
print("Insertar opción: oxigeno, temperatura, altura o peso")
medicion = input()
#print(medicion)
if medicion == "temperatura":
print("Medir temperatura")
medicionTemperatura()
elif medicion == "altura":
print("Medir altura")
medicionAltura()
elif medicion == "oxigeno":
print("Medir oxigeno")
flag_ox = 1
ser.write(b"oxigeno\n")
while flag_ox == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
#time.sleep(1)
elif medicion == "peso":
ser.write(b"peso\n")
print("Opción: calibrar o medir")
medicion2 = input()
if medicion2 == "calibrar":
ser.write(b"calibrar\n")
print("Coloque una masa conocida sobre la pesa e inserte el valor de dicha masa.")
masa = input()
if masa != 0:
ser.write(masa.encode('utf-8')) #REVISAR ESTO
flag_peso = 1
while flag_peso == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
#if line == "salir":
#flag_peso = 0
#e.clear()
elif medicion2 == "medir":
ser.write(b"medir\n")
flag_peso = 1
while flag_peso == 1:
if ser.in_waiting > 0:
line = ser.readline().decode('utf-8').rstrip()
print(line)
if line == "finp":
line = ser.readline().decode('utf-8').rstrip()
print(line)
flag_peso = 0
else:
print("Instrucción no válida")
else:
print("Instrucción no válida")
# Reset by pressing CTRL + C
except KeyboardInterrupt:
print("Measurement stopped by User")
GPIO.cleanup()
| [
"noreply@github.com"
] | noreply@github.com |
6e39adc45883cef0bb9f57391ecb69ad5f28f226 | 77ed9133c4c184a93539d0eea303f2da9e4448cf | /models/CNN/LeNet.py | 0350cd1c51aef3a7d7ecd34a5eadd4fd2e175f43 | [] | no_license | AgFeather/StudyNote | 16e1bc34196cd2bce2ef26aed7eb3010e6adba5e | 4c258e73153cf38a6392937a75fa560b2a6bcc97 | refs/heads/master | 2020-06-22T11:31:10.176663 | 2019-10-10T13:08:49 | 2019-10-10T13:08:49 | 197,707,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,594 | py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
# 第一层卷积层的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5
# 第二层卷积层的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5
# 全连接层的节点个数
FC_SIZE = 512
class LeNetModel():
def __init__(self, flags):
self.flags = flags
self.build_model()
self.build_optimizer()
def build_model(self):
self.input_x = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE], name='input_x')
self.target_y = tf.placeholder(tf.float32, [None, NUM_LABELS], name='target_y')
self.keep_drop = tf.placeholder(tf.float32)
image_x = tf.reshape(self.input_x, [-1, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS])
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable('weight',
[CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable('bias',
[CONV1_DEEP], initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(image_x, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
with tf.variable_scope('layer1-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.variable_scope('layer2-conv2'):
conv2_weights = tf.get_variable('weight',
[CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_baises = tf.get_variable('bias',
[CONV2_DEEP], initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_baises))
with tf.variable_scope('layer2-pool2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
pool_shape = pool2.get_shape().as_list()
flatten_size = pool_shape[1] * pool_shape[2] * pool_shape[3]
flatting = tf.reshape(pool2, [-1, flatten_size])
with tf.variable_scope('layer3-fc1'):
fc1_weights = tf.get_variable('weight',
[flatten_size, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
fc1_biases = tf.get_variable('bias',
[FC_SIZE],
initializer=tf.constant_initializer(0.0))
fc1 = tf.nn.relu(tf.matmul(flatting, fc1_weights) + fc1_biases)
fc1 = tf.nn.dropout(fc1, self.keep_drop)
with tf.variable_scope('layer4-fc2'):
fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1))
fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
self.logits = tf.matmul(fc1, fc2_weights) + fc2_biases
def build_optimizer(self):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.logits, labels=self.target_y), name='loss')
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(
tf.argmax(self.logits, axis=1),
tf.argmax(self.target_y, axis=1)), tf.float32), name='accuracy')
self.optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(self.loss)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accuracy', self.accuracy)
self.merge_op = tf.summary.merge_all()
def train(self, mnist):
saver = tf.train.Saver()
global_step = 0
each_step = 500
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(self.flags.tensorboard_log_path, sess.graph)
for epoch in range(self.flags.num_epochs):
for _ in range(each_step):
global_step += 1
batch_x, batch_y = mnist.train.next_batch(self.flags.batch_size)
feed_dict = {self.input_x: batch_x, self.target_y: batch_y, self.keep_drop:0.5}
accu, loss, _, summary_str = sess.run([self.accuracy, self.loss, self.optimizer, self.merge_op],
feed_dict)
writer.add_summary(summary_str, global_step)
if global_step % 50 == 0:
print('Epoch: {}; Global Step: {}; accuracy: {:.2f}%; loss: {:.4f}'.
format(epoch + 1, global_step, accu*100, loss))
saver.save(sess, self.flags.model_save_path)
print('trained model has been saved in epoch:{}'.format(epoch+1))
def eval(self, mnist):
new_saver = tf.train.Saver()
with tf.Session() as new_sess:
# 获取参数到new_sess 中
graph = tf.get_default_graph()
new_saver.restore(new_sess, self.flags.model_save_path)
input_x = graph.get_tensor_by_name('input_x:0')
output_y = graph.get_tensor_by_name('target_y:0')
accuracy = graph.get_tensor_by_name('accuracy:0')
feed = {input_x: mnist.test.images, output_y: mnist.test.labels, self.keep_drop:1.0}
accu = new_sess.run(accuracy, feed_dict=feed)
print('test accuarcy:{:.2f}%'.format(accu * 100))
if __name__ == '__main__':
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
FLAGS = tf.app.flags.FLAGS
lenet_model = LeNetModel(FLAGS)
tf.app.flags.DEFINE_string('model_save_path', 'trained_model/lenet/lenet_model.ckpt', 'model_save_path')
tf.app.flags.DEFINE_float('learning_rate', 0.002, 'learning_rate')
tf.app.flags.DEFINE_integer('batch_size', 64, 'number of batch size')
tf.app.flags.DEFINE_integer('num_epochs', 1, 'number of epoch to train')
tf.app.flags.DEFINE_string('tensorboard_log_path', 'tensorboard_log/lenet/', 'tensorboard_log_path')
lenet_model.train(mnist)
lenet_model.eval(mnist) | [
"18843740508@163.com"
] | 18843740508@163.com |
f150726c58233152b08d58900602edbd74120923 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/util/updates/info_file.py | 8b990c36ca53d32b4dff8ed8376bbc10a937fb35 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 6,573 | py |
import warnings
warnings.warn("Module is deprecated.", DeprecationWarning)
import md5
import os
import re
import warnings
from xml.etree import ElementTree as ET
#========================================================================
# Regex helpers for parsing file names and validating checksums.
#========================================================================
_version_in_name = re.compile("(\S*)[-](\d+\.\d+\.*\d*)\S*")
def _get_filename(filename):
match = _version_in_name.search(filename)
if match is None:
raise ValueError, "Could not find name in filename: " + filename
return match.group(1)
def _get_version(filename):
match = _version_in_name.search(filename)
if match is None:
raise ValueError, "Could not find version in filename: " + filename
return match.group(2)
def _get_checksum(filename):
base, ext = os.path.splitext(filename)
data = open(base).read()
return md5.new(data).hexdigest()
filename_re = re.compile('filename: \s*(.*)\n')
version_re = re.compile('version: \s*(.*)\n')
checksum_re = re.compile('checksum: \s*(.*)\n')
desc_re = re.compile('\ndescription:\n')
codedict = {'filename':{'re':filename_re,
'get':_get_filename},
'version': {'re':version_re,
'get':_get_version},
'checksum': {'re':checksum_re,
'get':_get_checksum}
}
class InfoFile:
"""Representation of an .info file, which provides metadata of another
file (its "target").
Important methods:
@classmethod
from_info_file(filename)
construct an InfoFile object from a filename --- simple parser
name: %filename% (if not present extracted from .info filename)
version: %filename% (if not present it is extracted from name of file)
checksum: md5hash (if not present it is computed from the basefile)
html: (everything else in the file from the next line to the end)
get_xml()
return a list of xml elements for this file
"""
# The filename of the update_file. This is not the full path -
# see **location_root** below.
filename = ""
# The version of the target file
version = None
# Checksum of the target file
checksum = None
# A multi-line HTML document describing the changes between
# this version and the previous version
description = ""
# The reported location of where self.filename can be found. This gets
# prepended to self.filename to form the full path. Typically this will be
# an HTTP URL, but this can be a URI for a local or LAN directory.
# This field usually gets set by an external tool, and is not present
# in the .info format.
location = "./"
# A function that takes a string (self.version) and returns something
# that can be used to compare against the version-parsed version of
# another VersionInfo object.
version_parser = None
#========================================================================
# Constructors
#========================================================================
@classmethod
def from_info_file(cls, filename):
""" Construct an InfoFile instance from a .info file on disk.
"""
str = open(filename).read()
obj = cls()
for attr in ['filename', 'version', 'checksum']:
funcdict = codedict[attr]
match = funcdict['re'].search(str)
if match is None:
value = funcdict['get'](filename)
else:
value = match.group(1)
setattr(obj, attr, value)
match = desc_re.search(str)
if match is None:
warnings.warn("Info file " + filename + " lacks a description: field")
else:
beg, end = match.span()
start = str.find('\n', end)
obj.description = str[start:]
return obj
@classmethod
def from_target_file(cls, filename):
""" Construct an InfoFile given the filename of the target file.
"""
obj = cls(filename=filename)
# Try to glean a version number from the file name
try:
version = _get_version(filename)
obj.version = version
except ValueError:
pass
return obj
@classmethod
def from_xml(cls, bytes):
""" Returns a new InfoFile instance from a multi-line string of
XML data
"""
raise NotImplementedError
def __init__(self, **kwargs):
# Do a strict Traits-like construction
for attr in ("filename", "version", "checksum", "description",
"location", "version_parser"):
if attr in kwargs:
setattr(self, attr, kwargs[attr])
return
#========================================================================
# Public methods
#========================================================================
def to_xml(self):
root = ET.Element("file")
for attrname in ("version", "filename", "location", "checksum", "description"):
node = ET.SubElement(root, attrname)
node.text = getattr(self, attrname)
return root
def to_xml_str(self):
""" Returns a multi-line string of XML representing the information in
this object.
"""
return ET.tostring(self.to_xml())
def to_info_str(self):
""" Returns a multi-line string in the .info file format
"""
lines = []
for attr in ["filename", "version", "checksum"]:
lines.append(attr + ": " + getattr(self, attr))
return "\n".join(lines) + "\ndescription:\n" + self.description + "\n"
def __cmp__(self, other):
""" Allows for comparing two VersionInfo objects so they can
be presented in version-sorted order. This is where we parse
and interpretation of the **version** string attribute.
"""
# TODO: Do something more intelligent here, if version parsers are missing
if self.version_parser is not None:
self_ver = self.version_parser(self.version)
else:
self_ver = self.version
if other.version_parser is not None:
other_ver = other.version_parser(other.version)
else:
other_ver = other.version
if self_ver < other_ver:
return -1
elif self.ver == other_ver:
return 0
else:
return 1
| [
"robert.kern@gmail.com"
] | robert.kern@gmail.com |
a0bcdf633e6f26afa8a550121748d1798216dddb | 7994559f1baad2049751ac3c9d20455115d4a48b | /mySpartaSns/urls.py | c9d7678e8b3cf24e3b52e15eb598cf729615e6b6 | [] | no_license | normaljeon/mySpartaSns | 3f76f95aced89484520a8e6ad626b52ed73bfb6e | f934fac265a413dbcf74b04abde9eacb19ceb276 | refs/heads/main | 2023-06-07T01:40:06.608213 | 2021-06-26T16:13:56 | 2021-06-26T16:13:56 | 380,543,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | """mySpartaSns URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', views.base_response, name = 'first_test'),
path('first/', views.first_view, name = 'first_view'),
path('', include('user.urls')),
path('', include('tweet.urls')),
]
| [
"inputjsh@gmail.com"
] | inputjsh@gmail.com |
fd91de5e21ca19d5f64f3219747ad6809612c353 | 10c40d2a919a69f4a680efffd17e326e25e9e73d | /sigopt/magics.py | d65810ab869a51a5098b128e4a519ada018c39f7 | [
"MIT"
] | permissive | XiaozhiShenNovelis/sigopt-python | 34b8930397bd364376a0daef6baaac7f3d34e8fc | 3ba596cfae37a8f987c61f87055b8081a12aeaf0 | refs/heads/master | 2023-08-23T00:59:19.555102 | 2021-10-19T19:25:57 | 2021-10-19T19:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,430 | py | import http
import io
import sys
import yaml
import IPython
from IPython.core.magic import (
Magics,
cell_magic,
magics_class,
)
from .config import config
from .interface import get_connection
from .log_capture import NullStreamMonitor, SystemOutputStreamMonitor
from .run_context import global_run_context
from .factory import SigOptFactory
from .defaults import get_default_project
from .validate import validate_experiment_input, ValidationError
from .logging import print_logger
from .exception import ApiException
def get_ns():
# NOTE(taylor): inspired by https://github.com/ipython/ipython/blob/master/IPython/core/interactiveshell.py
# Walk up the stack trace until we find the 'exit' command
stack_depth = 1
while True:
frame = sys._getframe(stack_depth)
f_locals = frame.f_locals
try:
if isinstance(f_locals['exit'], IPython.core.autocall.ExitAutocall):
return f_locals
except KeyError:
pass
stack_depth += 1
@magics_class
class SigOptMagics(Magics):
def __init__(self, shell):
super().__init__(shell)
self._connection = get_connection()
self._experiment = None
self._factory = SigOptFactory(get_default_project())
def setup(self):
config.set_user_agent_info([
'Notebook',
'/'.join(['IPython', IPython.__version__]),
])
@cell_magic
def experiment(self, _, cell):
ns = get_ns()
# pylint: disable=eval-used
cell_value = eval(cell, ns)
# pylint: enable=eval-used
if isinstance(cell_value, dict):
experiment_body = dict(cell_value)
else:
experiment_body = yaml.safe_load(io.StringIO(cell_value))
self.setup()
try:
validated = validate_experiment_input(experiment_body)
except ValidationError as validation_error:
print_logger.error("ValidationError: %s", str(validation_error))
return
try:
self._experiment = self._factory.create_prevalidated_experiment(validated)
except ApiException as api_exception:
if api_exception.status_code == http.HTTPStatus.BAD_REQUEST:
print_logger.error("ApiException: %s", str(api_exception))
def exec_cell(self, run_context, cell, ns):
global_run_context.set_run_context(run_context)
try:
if config.cell_tracking_enabled:
run_context.log_source_code(content=cell)
stream_monitor = SystemOutputStreamMonitor() if config.log_collection_enabled else NullStreamMonitor()
with stream_monitor:
# pylint: disable=exec-used
exec(cell, ns)
# pylint: enable=exec-used
stream_data = stream_monitor.get_stream_data()
if stream_data:
stdout, stderr = stream_data
run_context.set_logs({'stdout': stdout, 'stderr': stderr})
finally:
global_run_context.clear_run_context()
@cell_magic
def run(self, line, cell):
ns = get_ns()
name = None
if line:
name = line
self.setup()
run_context = self._factory.create_run(name=name)
with run_context:
self.exec_cell(run_context, cell, ns)
@cell_magic
def optimize(self, line, cell):
ns = get_ns()
if self._experiment is None:
raise Exception('Please create an experiment first with the %%experiment magic command')
name = None
if line:
name = line
self.setup()
for run_context in self._experiment.loop(name=name):
with run_context:
self.exec_cell(run_context, cell, ns)
| [
"noreply@github.com"
] | noreply@github.com |
0cf7cd7851932bf83ba266a23941fc3516d97cab | 722905ff407bbabcfe9c3a0b1f848ef3bb7a9571 | /randomforest1.py | 8bf8cf1c364c09a6cd292713d3bb6697559f92be | [] | no_license | Anupwilson/datascience-python_code | 469d4178ab3b272e84d67b953b632c36438ab0c2 | 5e067248866c241dc1ece30044540e8a969196e0 | refs/heads/main | 2023-04-01T17:06:21.993684 | 2021-04-23T16:10:07 | 2021-04-23T16:10:07 | 309,037,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import pandas as pd
import streamlit as st
from sklearn.ensemble import RandomForestClassifier
st.title('Model Deployment: random forest classifier')
st.sidebar.header('User Input Parameters')
def user_input_features():
CALLERID = st.sidebar.number_input("Insert the callerid")
OPENBY = st.sidebar.number_input("Insert the open by")
LOC = st.sidebar.number_input("Insert the loc")
CATEGORY = st.sidebar.number_input("Insert the category")
data = {'CALLERID': CALLERID,
'OPENBY': OPENBY,
'LOC': LOC,
'CATEGORY': CATEGORY}
features = pd.DataFrame(data, index=[0])
return features
df = user_input_features()
st.subheader('User Input parameters')
st.write(df)
incident = pd.read_csv("final_data.csv")
incident = incident.dropna()
X = incident.loc[:, ['caller_id', 'open_by', 'loc', 'category']]
Y = incident.loc[:, 'i_impact']
clf = RandomForestClassifier()
clf.fit(X, Y)
prediction = clf.predict(df)
prediction_proba = clf.predict_proba(df)
st.subheader('Predicted Result')
st.write('high impact' if prediction_proba[0][1] > 0.5 else 'not high impact')
st.subheader('Prediction Probability')
st.write(prediction_proba) | [
"noreply@github.com"
] | noreply@github.com |
d4b0ecde11724f063f3638035d6adf52935d4f31 | 211f7fab75d54945e98be68974d35ff2ab8ac94a | /client/constants.py | fd9e555539f15c3e61b257342c9a62ddd2a26e7b | [
"MIT"
] | permissive | heni/rem | de0693e7dbdf63539b564ca7923860c3b56d9fff | 7472bc3b106f512355cfc2ca646e8290d19661c6 | refs/heads/master | 2022-02-23T15:06:59.926472 | 2022-01-19T11:52:42 | 2022-01-19T11:52:42 | 9,545,619 | 15 | 6 | null | 2015-11-17T05:33:49 | 2013-04-19T13:05:55 | Python | UTF-8 | Python | false | false | 297 | py | #for Job class
#one week
NOTIFICATION_TIMEOUT = 604800
#two weeks
KILL_JOB_DEFAULT_TIMEOUT = 1209600
#Packet`s names policy
IGNORE_DUPLICATE_NAMES_POLICY = 0b001
WARN_DUPLICATE_NAMES_POLICY = 0b010
DENY_DUPLICATE_NAMES_POLICY = 0b100
DEFAULT_DUPLICATE_NAMES_POLICY = DENY_DUPLICATE_NAMES_POLICY
| [
"lexplua@41d65440-b5be-11dd-afe3-b2e846d9b4f8"
] | lexplua@41d65440-b5be-11dd-afe3-b2e846d9b4f8 |
e1d6a76f3ee7691607e6831868d4cd7850d105a8 | 60cde9d4107b3bb5724ec36618af4b4ecd692165 | /camera.py | 6a2f93f5e942721d3bb04ce5cbb64fdbe2e7022a | [] | no_license | Yemy/security-camera-in-python | 54cc176170c20c9b67dedefe037ced7410beb0d2 | eb9b2a3b549045c5980b12a50c9b43c143659e4a | refs/heads/main | 2023-08-02T16:56:27.780886 | 2021-10-09T10:41:52 | 2021-10-09T10:41:52 | 415,277,859 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import cv2
capture = cv2.VideoCapture("http://192.168.1.5:8080/video")
while True:
_, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
mirror = cv2.flip(gray, -1)
cv2.imshow('Live', mirror)
if cv2.waitKey(1) == ord("q"):
break
capture.release()
cv2.destroyAllWindows()
| [
"yemybold@gmail.com"
] | yemybold@gmail.com |
fe0fe5ceb581258a2127b48eb65b8201e67adcf1 | 2310bf22a57aae02b09866169ca228a04a160fa3 | /pysfOp/vsGraphOp.py | 686ac1fba2c5b6e905c832cc58f0a307f1e2e4d0 | [] | no_license | lobosKobayashi/PythonSfCp932 | e16f8e94c38cea89879021b3ae4378f37a39e5e1 | 78127ce59cadc54e291d5e0afd4440240be630a1 | refs/heads/master | 2021-01-23T13:42:08.187821 | 2017-01-03T20:21:38 | 2017-01-03T20:21:38 | 11,274,335 | 0 | 0 | null | null | null | null | SHIFT_JIS | Python | false | false | 8,733 | py | # -*- encoding: cp932 -*-
from __future__ import division
"""'
english:
PythonSf pysfOp\vsGraphOp.py
https://github.com/lobosKobayashi
http://lobosKobayashi.github.com/
Copyright 2016, Kenji Kobayashi
All program codes in this file was designed by kVerifierLab Kenji Kobayashi
I release souce codes in this file under the GPLv3
with the exception of my commercial uses.
2016y 12m 28d Kenji Kokbayashi
japanese:
PythonSf pysfOp\vsGraphOp.py
https://github.com/lobosKobayashi
http://lobosKobayashi.github.com/
Copyright 2016, Kenji Kobayashi
このファイルの全てのプログラム・コードは kVerifierLab 小林憲次が作成しました。
作成者の小林本人に限っては商用利用を許すとの例外条件を追加して、
このファイルのソースを GPLv3 で公開します。
2016年 12月 28日 小林憲次
'"""
import sfFnctnsOp as sf
__obj2dDisplayGeneratedStt = None # to enable overlap plot
def plotTrajectory(arg, color = sf.cyan, xyRate=True, radiusRate = 80.0
, blAxes = True):
"""' plot 2D/3D trajectory. You can hand over list of length 2 element at 2D
or length 3 element at 3D.
The line radius is 1/200 for max display size. The line radius can be
changed by radiusRate.
If blAxes = False then the RGB axis is not displayed.
At 2D plot, if xyRage == False then plot in a same hight/width square
'"""
if not(hasattr(arg, '__getitem__')) and hasattr(arg, '__iter__'):
arg = list(arg)
vs = sf.vs_()
color = tuple(color) # color argment may be list/vector
if isinstance(arg,list) or isinstance(arg,tuple) or isinstance(
arg,type(sf.sc.array([0,]))):
from octnOp import ClOctonion
if not(hasattr(arg[0],'__len__')) and isinstance(arg[0], complex):
arg = [ (x.real, x.imag) for x in arg]
elif not(hasattr(arg[0],'__len__')) and isinstance(arg[0], ClOctonion):
arg = [ x[1:4] for x in arg]
if len(arg[0])==2:
import visual.graph as vg
global __obj2dDisplayGeneratedStt
maxX = max([abs(elm[0]) for elm in arg])
maxY = max([abs(elm[1]) for elm in arg])
print "maxX:",maxX, " maxY:",maxY
if (__obj2dDisplayGeneratedStt == None):
if xyRate == True: # 11.01.16 to
maxAt = max(maxX, maxY)
__obj2dDisplayGeneratedStt = vg.gdisplay(
width=600*maxX/maxAt,height=600*maxY/maxAt)
else:
__obj2dDisplayGeneratedStt = vg.gdisplay(
width=600,height=600)
#__bl2dDisplayGeneratedStt = True
grphAt = vg.gcurve(color = color)
for i in range(len(arg)):
assert len(arg[i])==2, "unexpeted length data:"+str(arg[i])
grphAt.plot(pos = arg[i])
#return __obj2dDisplayGeneratedStt
#import pdb; pdb.set_trace()
#print "debug:",grphAt.gcurve.pos
# plot start mark
grphSqAt = vg.gcurve(color = color)
pos0At = grphAt.gcurve.pos[0,:][:2]
rateAt = 50
for x,y in sf.mitr([-maxX/rateAt, maxX/rateAt]
, [-maxY/rateAt, maxY/rateAt]):
grphSqAt.plot(pos = pos0At+[x,y])
grphSqAt.plot(pos = pos0At+[-maxX/rateAt,-maxY/rateAt])
return grphAt # 09.02.04 to animate graph
elif len(arg[0])==3:
vs.scene.forward=(-1,+1,-1)
vs.scene.up=(0,0,1)
c = vs.curve( color = color )
maxX, maxY, maxZ = 0,0,0
for i in range(len(arg)):
if maxX < abs(arg[i][0]):
maxX = abs(arg[i][0])
if maxY < abs(arg[i][1]):
maxY = abs(arg[i][1])
if maxZ < abs(arg[i][2]):
maxZ = abs(arg[i][2])
c.append( arg[i] )
#print c.pos
print "maxX:",maxX, " maxY:",maxY, " maxZ:",maxZ
maxAt = max(maxX,maxY,maxZ)
c.radius = maxAt/radiusRate
vs.sphere(pos = arg[0], radius = 3*c.radius, color = color)
if blAxes == True:
# draw axise
vs.curve( pos=[(0,0,0), (maxAt,0,0)]
, color=(1,0,0)
, radius = maxAt/100 )
vs.curve( pos=[(0,0,0), (0,maxAt,0)]
, color=(0,1,0)
, radius = maxAt/100 )
vs.curve( pos=[(0,0,0), (0,0,maxAt)]
, color=(0,1,1)
, radius = maxAt/100 )
#return vs.scene
return c # 09.02.04 to animate graph
else:
assert False,"unexpeted data:"+str(arg)
__objGrDisplayGeneratedStt = None # to enable overlap plot
def plotGr(vctAg, start=(), end=None, N=50, color = sf.cyan):
"""' plot graph for a function or vector data
If you call plotGr(..) a number of times, then the graphs were plotted
in piles.
start,end are domain parameters, which are used if vctAg type is
function
if you want to vanish the graph then do as below
objAt=plotGr(..)
.
.
objAt.visible = None
usage:
plotGr(sin) # plot sin graph in a range from 0 to 1
plotGr(sin,-3,3) #plot sin in a range from -3 to 3
plotGr(sin,[-3,-2,0,1])
# plot sequential line graph by
# [(-3,sin(-3),(-2,sin(-2),(0,sin(0),(1,sin(1)]
plotGr([sin(x) for x in klsp(-3,3)]) # plot a sequence data
'"""
if not(hasattr(vctAg, '__getitem__')) and hasattr(vctAg, '__iter__'):
vctAg = list(vctAg)
vs = sf.vs_()
global __objGrDisplayGeneratedStt
color = tuple(color) # color argment may be list/vector
import visual.graph as vg
if __objGrDisplayGeneratedStt == None:
__objGrDisplayGeneratedStt = vg.gdisplay()
grphAt = vg.gcurve( color = color)
#grphAt = vg.gcurve(gdisplay=dspAt, color = color)
#import pdb; pdb.set_trace()
if '__call__' in dir(vctAg):
# vctAg is function
if start != () and end == None and hasattr(start, '__iter__'):
for x in start:
grphAt.plot(pos = [x, float(vctAg(x))] )
else:
if start == ():
start = 0
if end == None:
end = 1
assert start != end
if start > end:
start, end = end, start
#assert start != end
"""'
for x in arsq(start, N, float(end-start)/N):
# 08.10.27 add float(..) cast to avoid below error
# "No registered converter was able to produce a C++ rvalue"
# at ;;n=64;plotGr([sf.sc.comb(n,i) for i in range(n)])
grphAt.plot(pos = [x, float(vctAg(x))] )
'"""
for x in sf.klsp(start, end, N):
# 09.12.03 to display end and avoid 0
grphAt.plot(pos = [x, float(vctAg(x))] )
#return grphAt
return __objGrDisplayGeneratedStt
else:
if (start != ()) or (end != None):
#import pdb; pdb.set_trace()
if start == ():
start = 0
if end == None:
end = 1
assert start != end
if start > end:
start, end = end, start
N = len(vctAg)
for i, x in enmasq([start, N, (end - start)/N]):
grphAt.plot(pos = [x, float(vctAg[i])] )
else:
for i in range(len(vctAg)):
grphAt.plot(pos = [i, float(vctAg[i])] )
#return grphAt
return __objGrDisplayGeneratedStt
def plotDbl(sq0,sq1, region=None, N=50):
if isinstance(sq0, (tuple, list, sf.np.ndarray)):
sf.plotGr(sq0)
return sf.plotGr(sq1,color=sf.red)
else:
assert hasattr(sq0,'__call__'), "at plotDbl(..), you set parameter sq0 that is not function"
assert hasattr(sq1,'__call__'), "at plotDbl(..), you set parameter sq1 that is not function"
if region==None:
region = [-1,1]
assert(isinstance(region,(tuple, list, sf.np.ndarray)) and len(region)==2)
sf.plotGr(sq0,region[0],region[1],N=N)
return sf.plotGr(sq1, region[0], region[1], N=N, color=sf.red)
| [
"lobosKobayashi@gmail.com"
] | lobosKobayashi@gmail.com |
fe619f1da9f99173a18dd7df28146699dca5644e | e133ce013b0a3ca56120db66a95884f464308f2f | /learning_logs/forms.py | 102a5d3d7097bca6f7bca5bc17359a91f583d6f9 | [] | no_license | zhouyl02/learninglogs | 27e2ec011cbc3c27db46c66ed42aa849186e1c81 | d23ad9a0bfc72c813140bca07a898ee1e22293a8 | refs/heads/master | 2021-04-01T13:23:45.175845 | 2020-12-04T12:30:54 | 2020-12-04T12:30:54 | 124,403,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | from django import forms
from .models import Topic, Entry
class TopicForm(forms.ModelForm):
class Meta:
model = Topic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['text']
labels = {'text': ''}
widgets = {'text': forms.Textarea(attrs={'cols': 80})} | [
"915186072@qq.com"
] | 915186072@qq.com |
f32dc0bf2193c95d85801ddc0ca99e3a0991e3fe | 133dbe47cf8d64d11dfaf2a1109f472c67f56136 | /tests/objects/test_time.py | 1b32001d7b6594eabc6b56e2ad8db8d78cbdecca | [
"MIT"
] | permissive | My-Novel-Management/storybuilderunite | a2bae6f3d79a8bc22d141663a2b09dde12556299 | c003d3451e237f574c54a87ea7d4fd8da8e833be | refs/heads/master | 2021-07-13T16:35:31.922363 | 2021-01-29T00:38:04 | 2021-01-29T00:38:04 | 230,050,595 | 1 | 0 | MIT | 2020-06-24T01:49:03 | 2019-12-25T06:05:42 | Python | UTF-8 | Python | false | false | 966 | py | # -*- coding: utf-8 -*-
'''
Time class test
===============
'''
import datetime
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.objects import time as tm
class TimeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(tm.__name__, 'Time class')
def test_instance(self):
data = [
# (name, hour, minute, expect, exp_h, exp_m, exp_time)
(True, 'test', 5, 20, 'test', 5, 20, datetime.time(5,20)),
]
def checker(name, hour, minute, expect, exp_h, exp_m, exp_time):
tmp = tm.Time(name, hour, minute)
self.assertIsInstance(tmp, tm.Time)
self.assertEqual(tmp.name, expect)
self.assertEqual(tmp.hour, exp_h)
self.assertEqual(tmp.minute, exp_m)
self.assertEqual(tmp.time, exp_time)
validate_with_fail(self, 'class instance', checker, data)
| [
"nagisc007@yahoo.co.jp"
] | nagisc007@yahoo.co.jp |
a22ecfc5db8d716f603611bc6d34e5846f57d403 | ddb76ba98767c0e042d1eacfa41591ef2ee8bce8 | /case/exchange/trade/test_navigation.py | 9310eead8212d2ff05c16356b42dfcc3df72e747 | [] | no_license | linzhiyang85/CryptoTasks | 710f9b7cec613056090dc2de6109adcf132f28bc | 7e0bfc8a09e32c9a4388ab9ea9b55f0b8dce757f | refs/heads/main | 2023-09-01T08:14:07.790208 | 2021-09-25T20:46:21 | 2021-09-25T20:46:21 | 410,161,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | import pytest
import time
from page.exchange import ExchangePage
from page.trade import TradePage
class TestTrade:
@pytest.mark.parametrize('source, target', [('CRO', 'USDC'), ('ATOM', 'CRO')])
def test_navigate_to_trade_page(self, source, target, driver, settings):
# initialize page object
exchange_page = ExchangePage(driver)
trade_page = TradePage(driver)
# maximize
exchange_page.maximize_window()
# open initial page, accept cookie
exchange_page.open(settings.get_start_url('exchange'))
exchange_page.accept_cookie()
# find target instrument
exchange_page.click_market_menu(target)
instrument = exchange_page.get_instrument(source, target)
assert instrument is not None, f'Failed to find instrument for {source}/{target}'
# click to open trade page
exchange_page.open_instrument(instrument)
exchange_page.wait_for_url_change(driver.current_url)
# verification
assert '/trade/' in driver.current_url and f'{source}_{target}' in driver.current_url, f"Failed to open trade page for {source}/{target}"
assert f'{source}/{target}' == trade_page.get_instrument_name(), "Instrument name in trade page is not correct"
| [
"linzhiyang@aliyun.com"
] | linzhiyang@aliyun.com |
91efd913c270d343c4b45b6d1eb44d4aa58f912c | 35a6b6b5cabcf9fb39527bab020ef7c96265a026 | /p3.py | 5911e61bf240cc3e917c3377949ca16c9c46851d | [] | no_license | mepky/data-structure-and-algorithm | 9a1324142276e6966692c51734613f15234f5300 | 96f64e657f97e46fc2d32cca5294fa0f104d5d01 | refs/heads/master | 2020-03-24T08:57:41.692564 | 2020-02-10T12:40:13 | 2020-02-10T12:40:13 | 142,614,071 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | from collections import defaultdict
t=int(input())
for _ in range(t):
l=defaultdict(int)
n=int(input())
d=2**20
t=0
s=input()
a=[-1]*27
for i in range(n):
if a[ord(s[i])-97]==-1:
a[ord(s[i])-97]=i
else:
d=min(d,i-a[ord(s[i])-97])
t=1
a[ord(s[i])-97]=i
if t==0:
print(0)
else:
print(n-d)
| [
"noreply@github.com"
] | noreply@github.com |
e03435734d25d798b79681af85b6574d5cbf61f6 | a3ac96f0c7da0b6b03b4b52a35fbae551f488c1c | /Assignment_3_sol.py | 9f978d91192edcbd63e299340d61007e5743209e | [] | no_license | Sohaib-50/OOP-assignment-3 | 4b59c98e74c2ba892d6e746f52f736878c84bee6 | 2974a00927070d5cb328908f6cf01b50d7141a00 | refs/heads/master | 2022-11-08T05:56:38.310520 | 2020-06-30T20:02:01 | 2020-06-30T20:02:01 | 276,196,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,173 | py | #### Q1
##class Circle:
## def setRadius(self, r):
## self.radius = r
##
## def getRadius(self):
## return self.radius
##
## def setColor(self, c):
## self.color = c
##
## def getColor(self):
## return self.color
##
## def getCircumference(self):
## return 2 * (22 / 7) * self.radius
##
## def getArea(self):
## return (22 / 7) * (self.radius ** 2)
##
##a = Circle()
##a.setRadius(32)
##print(f"Radius of a: {a.getRadius()}")
##a.setColor("Blue")
##print(f"Area and circumference of a: {a.getArea()}, {a.getCircumference()}")
##print(f"Color of a: {a.getColor()}")
##a.setRadius(2)
##print(f"New radius of a: {a.getRadius()}")
#### Q2
##class BankAccount:
## current_balance = 0
##
## def withdraw(self):
## amount = float(input("Enter amount to be withdrawn: "))
## if amount <= self.current_balance:
## self.current_balance -= amount
## else:
## print("Not enough balance.")
##
## def deposit(self):
## amount = float(input("Enter amount to be deposited: "))
## self.current_balance += amount
##
## def balance(self):
## return self.current_balance
##
##Ahmed_account = BankAccount()
##Ahmed_account.deposit()
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
##Ahmed_account.current_balance += 800
##print(Ahmed_account.current_balance)
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
#### Q3
##class BankAccount:
## __current_balance = 0
##
## def withdraw(self):
## amount = float(input("Enter amount to be withdrawn: "))
## if amount <= self.__current_balance:
## self.__current_balance -= amount
## else:
## print("Not enough balance.")
##
## def deposit(self):
## amount = float(input("Enter amount to be deposited: "))
## self.__current_balance += amount
##
## def balance(self):
## return self.__current_balance
##
##Ahmed_account = BankAccount()
##Ahmed_account.deposit()
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
##Ahmed_account._BankAccount__current_balance += 800
##print(Ahmed_account._BankAccount__current_balance)
##Ahmed_account.withdraw()
##print(Ahmed_account.balance())
#### Q4
##class Worker:
## def setHoursWorked(self, h):
## self.__hoursWorked = h
##
## def changeRate(self, r):
## self.__wageRate = r
##
## def pay(self):
## return self.__hoursWorked * self.__wageRate
##
##Ahmed = Worker()
##Ahmed.setHoursWorked(2)
##Ahmed.changeRate(10)
##print(Ahmed.pay())
##Ahmed.setHoursWorked(4)
##print(Ahmed.pay())
##Ahmed.changeRate(20)
##print(Ahmed.pay())
#### Q5
##class Worker:
## def __init__(self, h=0, r=0):
## self.__hoursWorked = h
## self.__wageRate = r
##
## def setHoursWorked(self, h):
## self.__hoursWorked = h
##
## def changeRate(self, r):
## self.__wageRate = r
##
## def pay(self):
## return self.__hoursWorked * self.__wageRate
##
##Ahmed = Worker(r = 10)
##print(Ahmed.pay())
##Ahmed.setHoursWorked(3)
##print(Ahmed.pay())
##Ali = Worker(3)
##print(Ali.pay())
##Kamran = Worker(3, 5)
##print(Kamran.pay())
#### Q6
##class Vehicle:
## def __init__(self, w = 4, c = "white", m = 0):
## self.__noOfWheels = w
## self.__color = c
## self.__modelNo = m
##
## def getNumWheels(self):
## return self.__noOfWheels
##
## def getColor(self):
## return self.__color
##
## def getModelNum(self):
## return self.__modelNo
##
## def setColor(self, c):
## self.__color = c
##
##a = Vehicle(8, 'black', 2007)
##print(f"a: wheels = {a.getNumWheels()}, color = {a.getColor()}, Model Number = {a.getModelNum()}")
##a.setColor("green")
##print(f"a: wheels = {a.getNumWheels()}, color = {a.getColor()}, Model Number = {a.getModelNum()}")
##b = Vehicle(m = 2019)
##print(f"b: wheels = {b.getNumWheels()}, color = {b.getColor()}, Model Number = {b.getModelNum()}")
#### Q7
##class Engine:
## def __init__(self, e = 0, d = 1900):
## self.__engineNo = e
## self.__dateOfManufacture = d
##
## def getEngineNo(self):
## return self.__engineNo
##
## def getDateOfManufacture(self):
## return self.__dateOfManufacture
##
##a = Engine(d = 2012, e = 492)
##b = Engine(625)
##print(f"a: Engine Number = {a.getEngineNo()}, Date of manufacture = {a.getDateOfManufacture()}")
##print(f"b: Engine Number = {b.getEngineNo()}, Date of manufacture = {b.getDateOfManufacture()}")
#### Q8
##class Int:
## def __init__(self, i = 0):
## try:
## self.__i = int(i)
## except ValueError:
## print("Invalid value for integer.")
## del self
##
## def setValue(self, i=0):
## self.__init__(i)
##
## def getValue(self):
## return self.__i
##
## def displayValue(self):
## print(self.__i)
##
## def add(self, a):
## return Int(self.__i + a.getValue())
##
##x = Int()
##y = Int("32")
##z = Int(-5)
##
##x = y.add(z)
##x.displayValue()
#### Q9
##class TollBooth:
## def __init__(self):
## self.__numCars = 0
## self.__moneyCollected = 0
##
## def payingCar(self):
## self.__numCars += 1
## self.__moneyCollected += 50
##
## def nopaycar(self):
## self.__numCars += 1
##
## def display(self):
## print(f"Cars passed: {self.__numCars}, Cash collected: {self.__moneyCollected}")
##
##TollBooth_x = TollBooth()
##import keyboard
##while 1:
## inp = input("Press 1 to count paying car, 2 to count nonpaying car, 0 to exit: ")
## if inp == "1":
## TollBooth_x.payingCar()
## elif inp == "2":
## TollBooth_x.nopaycar()
## elif inp == "0":
## TollBooth_x.display()
## print()
## break
## else:
## print("Invalid input")
## print()
#### Q10
##class Time:
## def __init__(self, hours=0, minutes=0, seconds=0):
## if any((type(seconds) != int, type(minutes) != int, type(hours) != int)):
## raise ValueError("Time values can only be ints")
## else:
## self.__seconds = int(seconds) % 60
## self.__minutes = ((seconds // 60) + int(minutes)) % 60
## self.__hours = ((minutes // 60) + int(hours)) % 24
##
##
## def displayTime(self):
## print(f"{self.__hours:02}:{self.__minutes:02}:{self.__seconds:02}")
##
## def addTime(self, t):
## if not isinstance(t, Time):
## raise ValueError("Can not add non-Time type object to Time type object")
## self.__seconds += t._Time__seconds
## self.__minutes += (self._Time__seconds // 60) + t._Time__minutes
## self.__hours += (self._Time__minutes // 60) + t._Time__hours
## self.__seconds %= 60
## self.__minutes %= 60
## self.__hours %= 24
##
##t1 = Time(23, 59,25)
##t1.displayTime()
##
##t2 = Time(61,5,45)
##t2.displayTime()
##
##t1.addTime(t2)
##t1.displayTime()
## Q11
##In ocean navigation, locations are measured in degrees and minutes of latitude and longitude.
##Thus if you’re lying off the mouth of Papeete Harbor in Tahiti, your location is 149 degrees 34.8
##minutes west longitude, and 17 degrees 31.5 minutes south latitude. This is written as 149°34.8’ W,
##17°31.5’ S. There are 60 minutes in a degree (an older system also divided a minute into 60 seconds,
## but the modern approach is to use decimal minutes instead).
##Longitude is measured from 0 to 180 degrees, east or west from Greenwich, England, to the international
##dateline in the Pacific. Latitude is measured from 0 to 90 degrees, north or south from the equator to
##the poles. Write code to create a class Angle that includes three member variables: int for degrees, a
##float for minutes, and a char for the direction letter (N, S, E, or W). This class can hold either a latitude
##variable or a longitude variable. Write one method to obtain an angle value (in degrees and minutes) and a direction
##from the user, and a second to display the angle value in 179°59.9’ E format. Also write a three-argument constructor.
##Write a main program that displays an angle initialized with the constructor, and then, within a loop, allows the user
##to input any angle value, and then displays the value. You can use the hex character constant ‘\xF8’, which usually
##prints a degree (°) symbol.
#### Q12
##class Tracker:
## count = 0
## def __init__(self):
## Tracker.count += 1
## self.__serialNo = Tracker.count
##
## def tellSerialNo(self):
## print(f"I am object number {self.__serialNo}")
##
##a = Tracker()
##b = Tracker()
##c = Tracker()
##a.tellSerialNo()
##b.tellSerialNo()
##c.tellSerialNo()
## Q13
| [
"noreply@github.com"
] | noreply@github.com |
4883732ef5574518d7a8eea9d3610c27c1229f76 | 72371fbbca5e1631844de3a3815be721ad37ad4f | /compilador/simbolo.py | 9abf35cbb0cc6a7ea53456333b78c3dfd9813edb | [] | no_license | gustavokida/compiladores_1 | 7c046386e8b92831e5b1b48afa301732cf29373b | cf50f20381c99784da1e76547f00abe0b51396aa | refs/heads/master | 2023-08-02T19:36:53.712753 | 2021-09-25T22:11:24 | 2021-09-25T22:11:24 | 404,526,949 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | class Simbolo:
def __init__(self, nome, tipo):
self.nome = nome
self.tipo = tipo
def getNome(self):
return self.nome
def setNome(self, nome):
self.nome = nome
def getTipo(self):
return self.tipo
def setTipo(self, tipo):
self.tipo = tipo | [
"45009122+gustavokida@users.noreply.github.com"
] | 45009122+gustavokida@users.noreply.github.com |
f4cc030b9c8573c816c10160ff087a8c68c9d808 | e00cf0bf72421ec31e4d3608c615aeeba5064731 | /wows/move.py | 3165d0d74b85208a58ea1b2ed7ee70fd489a053c | [] | no_license | lorne-luo/auto-wows | b4a84c7d99585c84a635fb5be11fd0f03a5f37fd | 992ad473f1d5a78686e1c4c939c6c218e72373d7 | refs/heads/master | 2020-12-30T00:52:17.497039 | 2020-02-25T11:10:30 | 2020-02-25T11:10:30 | 238,803,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | import time
from random import randint
import pyautogui as pag
import settings as settings
from helper import search_template, get_map_image
class WOWS_Move(object):
def move_ship(self):
global MOVE_TO
pag.press('m', presses=1, interval=0.25)
pag.sleep(1.5)
if not MOVE_TO:
map_image = get_map_image()
self_loc = search_template(map_image, 'map_self_icon.bmp')
print('self_loc', self_loc)
if self_loc:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] - self_loc[1],
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] - self_loc[0])
else:
MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] / 2,
settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] / 2)
for i in range(4):
loc = (MOVE_TO[0] + randint(-50, 50),
MOVE_TO[1] + randint(-50, 50))
pag.moveTo(loc)
pag.click(clicks=2, interval=0.5, button='left')
time.sleep(1)
pag.press('esc')
time.sleep(2)
| [
"dev@luotao.net"
] | dev@luotao.net |
69f71e98982e26c3a6a8f76756133b13cb028e93 | 2f7c81de79cfd34770051d9bda3ee3fd1a0c9477 | /solucion_usando_verlet.py | 7fccdfab5b9a2c0dd2c002a6bea23273981abe5f | [
"MIT"
] | permissive | TatiFlores/04Tarea | ae38959d6313c16b5ff628912d88be45ff27d54d | 2f219a4b02865ccac716c76dd5545248b8789f44 | refs/heads/master | 2020-05-29T12:15:21.408718 | 2015-10-22T02:14:26 | 2015-10-22T02:14:26 | 44,268,867 | 0 | 0 | null | 2015-10-14T18:45:56 | 2015-10-14T18:45:56 | null | UTF-8 | Python | false | false | 1,486 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from planeta import Planeta
import numpy as np
import matplotlib.pyplot as plt
condicion_inicial = [10, 0, 0, 0.4]
p = Planeta(condicion_inicial)
dt = 0.1
iteraciones = 60000
x = np.zeros(iteraciones)
y = np.zeros(iteraciones)
vx = np.zeros(iteraciones)
vy = np.zeros(iteraciones)
energia = np.zeros(iteraciones)
tiempo = np.zeros(iteraciones)
x[0] = p.y_actual[0]
y[0] = p.y_actual[1]
energia[0] = p.energia_total()
tiempo[0] = p.t_actual
vx[0] = p.y_actual[2]
vy[0] = p.y_actual[3]
#Verlet necesita una iteracion extra
p.avanza_rk4(dt)
x[1] = p.y_actual[0]
y[1] = p.y_actual[1]
energia[1] = p.energia_total()
tiempo[1] = p.t_actual
vx[1] = p.y_actual[2]
vy[1] = p.y_actual[3]
for i in range(2, iteraciones):
y_anterior = np.array([x[i-2], y[i-2], vx[i-2], vy[i-2]])
p.avanza_verlet(dt, y_anterior)
x[i] = p.y_actual[0]
y[i] = p.y_actual[1]
vx[i] = p.y_actual[2]
vy[i] = p.y_actual[3]
energia[i] =p.energia_total()
tiempo [i] = p.t_actual
plt.figure(1)
plt.clf()
plt.plot(x,y,color='green')
plt.xlabel('x[m]')
plt.ylabel('y[m]')
plt.grid(True)
plt.title(' 'u'Ó''rbita calculada con el m'u'é''todo de Verlet ')
plt.savefig('Orbita_verlet.eps')
plt.figure(2)
plt.clf()
plt.plot(tiempo,energia,'green')
plt.xlabel('Tiempo [s]')
plt.ylabel('Energ'u'í''a')
plt.grid(True)
plt.title(' Energ'u'í''a en funci'u'ó''n del tiempo, m'u'é''todo de Verlet ')
plt.savefig('Energia_verlet.eps')
plt.show()
| [
"tatiflores.4@gmail.com"
] | tatiflores.4@gmail.com |
672cf8f365696192c08ad050669fd61e20d0a34b | 504398cad76a23e1ad5f4eebf8f499b855d57771 | /manage.py | 2e47f39a342cb53f05ed73b51f03116c9fe19445 | [] | no_license | jvxtaposed/WebFrame | 75cd0c437eb94ed4c78e8e2dd47d0e6d21fa4421 | 4da3d76fb8ed79c8a0f32dccb28ca671688802cf | refs/heads/master | 2020-03-18T16:57:43.814453 | 2018-05-26T21:36:00 | 2018-05-26T21:36:00 | 134,996,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WebFrame.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"iliketreeslol@gmail.com"
] | iliketreeslol@gmail.com |
265dc95d5597dd6704b8d1810358d09d0ea447cb | ea788d354226634a96fdba62d9f4a7587f6cb09f | /process_dynamixel_data.py | 609ced7ba71a0288cee3a616012e02ea0279529a | [] | no_license | DeinFreund/px4_measurements | 47f7cb2fd00017522df15b4ac6f5f97ee9c98ca6 | a53e5f781634a11c2b24d145304228c7afa66b9e | refs/heads/master | 2020-12-04T06:36:58.166716 | 2020-01-03T20:31:21 | 2020-01-03T20:31:21 | 231,660,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | #import rosbag
import matplotlib.pyplot as plt
import numpy as np
import sys
dyn_time = []
angle_measured = np.empty((0,6))
angle_cmd = np.empty((0,6))
for line in sys.stdin:
try:
if len(line.split()) == 14 and line.split()[0] == 'LOG':
dyn_time.append(int(line.split()[1])/1e6)
cmd = []
measured = []
for i in range(6):
cmd.append(float(line.split()[2*i+3]))
measured.append(float(line.split()[2*i+2]))
angle_measured = np.concatenate((angle_measured, np.array([measured])))
angle_cmd = np.concatenate((angle_cmd, np.array([cmd])))
except:
pass
dyn_time = np.array(dyn_time) - dyn_time[0]
fig_new, ax_new = plt.subplots(6,1, sharey = True, sharex=True)
fig_new.suptitle('Position tracking')
for i in range(6):
ax_new[i].plot(dyn_time, angle_measured[:,i], '.', label='measured_angles')
ax_new[i].plot(dyn_time, angle_cmd[:,i], '.', label='cmd angles' )
ax_new[i].legend(loc=0)
fig, ax = plt.subplots(6,1, sharey = True, sharex=True)
fig.suptitle('Position tracking')
for i in range(6):
ax[i].plot(dyn_time, angle_cmd[:,i] - angle_measured[:,i], '.', label='cmd - measured_angles')
ax[i].legend(loc=0)
plt.show()
| [
"flyck@ethz.ch"
] | flyck@ethz.ch |
7ce4e5c6f137596f23f8186d9b680431e5a52ceb | ad37bbf3b1b0d4d6ad6bf0c127bd92669a8f3029 | /node_modules/@web3-js/websocket/build/config.gypi | 83aac1135a412270945de7c7f9ac49c1d0349e62 | [
"Apache-2.0"
] | permissive | william-rittmeyer/private_ethereum_blockchain | 9b7bef440186430c0fd293220543abc41a63b4e7 | 079a21a3d9f41784934e5abc7d59024e9fcfd706 | refs/heads/master | 2022-05-23T22:25:26.886789 | 2020-05-02T00:33:33 | 2020-05-02T00:33:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,459 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"gas_version": "2.27",
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt64l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "64",
"llvm_version": 0,
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "so.64",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/home/sensorweb1/.cache/node-gyp/10.19.0",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.13.4 node/v10.19.0 linux x64",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"noproxy": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/sensorweb1/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"format_package_lock": "true",
"prefix": "/usr",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/sensorweb1/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0002",
"fund": "true",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"before": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "10.19.0",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/sensorweb1/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"wittspace77@gmail.com"
] | wittspace77@gmail.com |
cb015eac44936cc54ab38c8fc85c628d7da24a26 | d99a84bfcd2709bb04da73979c3ec5ea7e68cef9 | /data/abilities/move.py | 98965503339949433cd4314a3fdf56969155d212 | [] | no_license | nikolr/PythonIsometricJRPG | 151591cac17682807a202f6d4c47e316451b5078 | d6c631fd06fb2b9a858cb9e36ac8e0ee0cee1334 | refs/heads/master | 2023-08-26T12:37:31.237974 | 2021-10-04T15:38:41 | 2021-10-04T15:38:41 | 408,345,232 | 0 | 0 | null | 2021-09-27T09:42:28 | 2021-09-20T07:16:03 | Python | UTF-8 | Python | false | false | 1,057 | py | from data.abilities.ability import Ability
class Move(Ability):
def __init__(self, name: str, potency: int, ap_cost: int, targeting_type, range: int, user=None):
super().__init__(name, potency, ap_cost, targeting_type, range, user=user)
self.description = "Move 1 square forward"
def activate(self):
if self.user.sprite.move_a_square() == True:
self.user.scene.current_character.action_points = self.user.scene.current_character.action_points - self.ap_cost
if self.user.scene.current_character.action_points > 0:
self.user.scene.state_machine.change_state(self.user.scene.turn_state)
else:
print("Next turn")
self.user.scene.current_character.action_points = self.user.scene.current_character.base_action_points
self.group_manager.determine_turn_queue()
self.user.scene.current_character = self.group_manager.get_next_character()
self.state_machine.change_state(self.user.scene.turn_state)
| [
"nikolai.rantimo@gmail.com"
] | nikolai.rantimo@gmail.com |
ac05dc636b481b86f4960bbd377201c4bffdcfe9 | 569498d8a61dd6cdaa515165415d3a72c5dbf0c5 | /part 2/050.py | a014890e32dbfcda0d6f27e272044b4dc8cee371 | [] | no_license | baikzzi/python | 068e0b05ea13ab30a3798e97a25f9ea48084c36b | 112b4565d8bc09eb08b9c152eaf2dd6dd0ba8b35 | refs/heads/master | 2023-03-04T20:45:03.028018 | 2021-02-15T01:53:04 | 2021-02-15T01:53:04 | 338,942,000 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | class MyClass:
var = '안녕하세요'
def sayHello(self):
param1 = '안녕'
self.param2 = '하이'
print(param1) #'안녕'이 출력됨
print(self.var) #'안녕하세요'가 출력됨
obj = MyClass()
print(obj.var) #'안녕하세요'가 출력됨
obj.sayHello()
#obj.param1 | [
"qorwlghks120@gmail.com"
] | qorwlghks120@gmail.com |
35fd3b5566f1aaafda384b69fbcda78b39a3b231 | 334a0efacc8abaa26d92d4a06ef1ba8bb5b7b213 | /clockString2image.py | ab8a88dd1535e142436f310ecce5b87b73ee8cce | [] | no_license | gunman-vagabond/gunclock-tensorflow-flask | 84c39e2ca387f4b9bdf13b6ebf65c77af8f32563 | d6a0af8672d2c6583b385c54248364fd555119a2 | refs/heads/master | 2022-12-11T06:14:32.194994 | 2020-02-11T03:04:05 | 2020-02-11T03:04:05 | 236,270,374 | 0 | 0 | null | 2022-09-23T22:35:25 | 2020-01-26T05:18:08 | Python | UTF-8 | Python | false | false | 447 | py | import os,csv
import sys
import numpy as np
from PIL import Image,ImageDraw,ImageFont
def clockString2image(clockString):
text = clockString
text_split = text.split("\n")
clocksize = len(text_split) - 1
img = Image.new("RGB", (clocksize*12, clocksize*12), (255,255,255))
draw = ImageDraw.Draw(img)
for i in range(clocksize):
draw.text((0,i*12),text_split[i],(0,0,0))
img = img.resize((80,80))
return img
| [
"noreply@github.com"
] | noreply@github.com |
91e05bb48354f85dc8b7543296aeacca210c3e50 | 2fcef29373541c8707b4047c444cfd567aef03a4 | /src/bttracker/_version.py | 805186d291b945c1733ebe1b3ee68f7d3081fdab | [] | no_license | manjuladangalla/BtTracker | a6c94e752abfca89cad6b47e246847aec7203b3b | 9641d7f6ac62c91986c1fd83879c4852bf208448 | refs/heads/master | 2022-12-03T15:39:31.276673 | 2020-08-07T03:35:02 | 2020-08-07T03:35:02 | 285,728,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,442 | py |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "bttracker-"
cfg.versionfile_source = "src/bttracker/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "1.0",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "1.0", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "1.0", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| [
"noreply@github.com"
] | noreply@github.com |
8405cbf135eef4a8ea7864cace75bd9e40f91ebf | adbc667076cc5d35f2411239c1d717558df7f9a2 | /ecom/blog/migrations/0001_initial.py | 29cf764f4f6b1bfbc605d0254ade79b4f5b094be | [] | no_license | satyam2912/E-commerce-django | a455f9943c3e04de4b74a4101010fbccf4b10bc4 | 455d28ce6a1ec502bdccfc4c430c5f86a812127b | refs/heads/master | 2023-03-01T17:22:11.923843 | 2021-02-14T11:42:23 | 2021-02-14T11:42:23 | 326,479,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | # Generated by Django 2.2.12 on 2020-09-29 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blogpost',
fields=[
('post_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=50)),
('head0', models.CharField(default='', max_length=500)),
('chead0', models.CharField(default='', max_length=5000)),
('head1', models.CharField(default='', max_length=500)),
('chead1', models.CharField(default='', max_length=5000)),
('head2', models.CharField(default='', max_length=500)),
('chead2', models.CharField(default='', max_length=5000)),
('publish_date', models.DateField()),
('thumbnail', models.ImageField(default='', upload_to='shop/images')),
],
),
]
| [
"pandeysatyam1996@gmail.com"
] | pandeysatyam1996@gmail.com |
36303e57f19d8642e572535bcda07112eb1b0a31 | e1266f257c741395be3f9f3fe02c34a652c3612e | /scrapyP1/scrapyP1/settings.py | 3b8e8c21ffcf3c53286d11e9b94ccd902342b438 | [] | no_license | zhaocc1106/Web-spider | 94ffadcaadeb38fbdc1da8f5841a5a623565b236 | ed0de975c22c2d7235dfc668b99ea946ad99a6c5 | refs/heads/master | 2020-04-09T04:01:49.756553 | 2018-12-02T02:51:51 | 2018-12-02T02:51:51 | 160,007,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | # -*- coding: utf-8 -*-
# Scrapy settings for scrapyP1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'scrapyP1'
SPIDER_MODULES = ['scrapyP1.spiders']
NEWSPIDER_MODULE = 'scrapyP1.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'scrapyP1 (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'scrapyP1.middlewares.Scrapyp1SpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'scrapyP1.middlewares.Scrapyp1DownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'scrapyP1.pipelines.Scrapyp1Pipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"450959507@qq.com"
] | 450959507@qq.com |
327de0fb6195fa9d70bb2f59a1b649c60f9ad8da | 31900bdf5648061a3093230711c5394e20b90436 | /usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/porn/cliphunter.py | f0ad44408f3a495045054598cafc29b1ceb97fb7 | [] | no_license | linuxbox10/enigma2-plugin-extensions-mediaportal | aa6f14ecfc42ce91e22c487070541459a1ab820c | e6b388918c186442718e7200e03c83d0db260831 | refs/heads/master | 2021-05-01T18:50:50.332850 | 2018-02-10T11:33:48 | 2018-02-10T11:33:48 | 121,009,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,746 | py | # -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2018
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
default_cover = "file://%s/cliphunter.png" % (config.mediaportal.iconcachepath.value + "logos")
class cliphunterGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
url = "http://www.cliphunter.com/categories/"
getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
Cats = re.findall(' <a href="(/categories/.*?)" title="(.*?)">.*?<img src="(.*?)"/>', data, re.S)
if Cats:
for (Url, Title, Image) in Cats:
Url = 'http://www.cliphunter.com%s/' % Url.replace(' ','%20')
if not Title == "All":
self.genreliste.append((Title, Url, Image))
self.genreliste.sort()
self.genreliste.insert(0, ("Pornstars", 'http://www.cliphunter.com/pornstars/top/overview/', default_cover))
self.genreliste.insert(0, ("Top Year", 'http://www.cliphunter.com/popular/ratings/year/', default_cover))
self.genreliste.insert(0, ("Top Month", 'http://www.cliphunter.com/popular/ratings/month/', default_cover))
self.genreliste.insert(0, ("Top Week", 'http://www.cliphunter.com/popular/ratings/week/', default_cover))
self.genreliste.insert(0, ("Top Yesterday", 'http://www.cliphunter.com/popular/ratings/yesterday/', default_cover))
self.genreliste.insert(0, ("Top Today", 'http://www.cliphunter.com/popular/ratings/today/', default_cover))
self.genreliste.insert(0, ("Hall of Fame", 'http://www.cliphunter.com/popular/ratings/all/', default_cover))
self.genreliste.insert(0, ("Newest", 'http://www.cliphunter.com/categories/All/', default_cover))
self.genreliste.insert(0, ("--- Search ---", "callSuchen", default_cover))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
if Name == "--- Search ---":
self.suchen()
elif Name == "Pornstars":
self.session.open(cliphunterPornstarScreen, Link, Name)
else:
self.session.open(cliphunterFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '%20')
Link = '%s' % (self.suchString)
Name = "--- Search ---"
self.session.open(cliphunterFilmScreen, Link, Name)
class cliphunterPornstarScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self.genreliste = []
url = "%s%s" % (self.Link, str(self.page))
getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
self.getLastPage(data, '', 'maxPages="(.*?)"')
Parse = re.search('photoGrid">(.*?)class="clearfix">', data, re.S)
Cats = re.findall('href="(.*?)">.*?src=\'(.*?)\'/>.*?<span>(.*?)</span>', Parse.group(1), re.S)
if Cats:
for (Url, Image, Title) in Cats:
Url = "http://www.cliphunter.com" + Url + "/movies/"
self.genreliste.append((Title.title(), Url, Image))
self.ml.setList(map(self._defaultlistleft, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.genreliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
Title = self['liste'].getCurrent()[0][0]
Image = self['liste'].getCurrent()[0][2]
self['name'].setText(Title)
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(cliphunterFilmScreen, Link, Name)
class cliphunterFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("cliphunter.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match(".*?Search", self.Name):
url = "http://www.cliphunter.com/search/%s/%s" % (self.Link, str(self.page))
else:
url = "%s%s" % (self.Link, str(self.page))
getPage(url, agent=agent).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, '', 'maxPages="(.*?)"')
Movies = re.findall('class="t"\shref="(/w/\d+/(.*?))".*?class="i"\ssrc="(.*?)".*?class="tr">(.*?)</div>.*?class="vttl.*?">(.*?)</a>', data, re.S)
if Movies:
for (Url, TitleUrl, Image, Runtime, Title) in Movies:
Url = "http://www.cliphunter.com" + Url
self.filmliste.append((TitleUrl.replace('_',' '), Url, Image, Runtime))
if len(self.filmliste) == 0:
self.filmliste.append((_('No videos found!'), '', None, ''))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
pic = self['liste'].getCurrent()[0][2]
runtime = self['liste'].getCurrent()[0][3]
self['handlung'].setText("Runtime: %s" % runtime)
self['name'].setText(title)
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
self.keyLocked = True
getPage(Link, agent=agent).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
url = re.findall('"url":"(.*?)"}', data, re.S)
if url:
url = url[-1]
url = url.replace('\u0026', '.')
translation_table = {
'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n',
'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r',
'y': 'l', 'z': 'i',
'$': ':', '&': '.', '(': '=', '^': '&', '=': '/',
}
url = ''.join(translation_table.get(c, c) for c in url)
self.keyLocked = False
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='cliphunter') | [
"jaysmith940@hotmail.co.uk"
] | jaysmith940@hotmail.co.uk |
fe11ab8369162212f8ea26d7d1324131d3d39039 | 7c285bc226eb1424a7b9dae154301e92af08e2ee | /.c9/metadata/environment/products/models.py | 2d3c42af567c10672fc988634cbb7a3ce5766a4a | [] | no_license | JShad30/ecommerce | f0755d06e2790a9456b3b90f6e8cd7bb9e3f5f51 | 1634618b00dee14400948d4d06321d02a999a5c4 | refs/heads/master | 2020-06-20T03:40:41.073735 | 2019-07-15T10:45:30 | 2019-07-15T10:45:30 | 196,979,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | {"filter":false,"title":"models.py","tooltip":"/products/models.py","ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":10,"column":24},"end":{"row":10,"column":24},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":{"row":45,"mode":"ace/mode/python"}},"hash":"e0b274e4a9a212f98c452e16d3b65848db0d56f5","undoManager":{"mark":-1,"position":-1,"stack":[]},"timestamp":1563055655208} | [
"ubuntu@ip-172-31-80-73.ec2.internal"
] | ubuntu@ip-172-31-80-73.ec2.internal |
00ea6f281f439dcb4972df7de507c087190b305f | fd8bbeed2fe5de26cce3630bab9ba477b371b3aa | /csv_to_android_table_layout.py | 047c6e145a029d6498d844bbfd513cd120d8fff5 | [] | no_license | himanshugarg/scripts | b905820a1a1e6ddeb0870d183b9ceb8b98c666fe | 88140d9784b2fd236556f766fccd266f36b02da2 | refs/heads/master | 2023-09-02T17:20:17.220500 | 2021-11-21T02:33:32 | 2021-11-21T02:33:32 | 121,906,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | import csv
import sys
import os
import pdb
print """<?xml version="1.0" encoding="utf-8"?>
<TableLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:shrinkColumns="*"
android:stretchColumns="*">"""
#extract table id prefix from csv file name
table_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
with open(sys.argv[1], 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
rownum = 0
for row in csvreader:
print """
<TableRow
android:id="@+id/{0}_row_{1}"
android:layout_height="wrap_content"
android:layout_width="match_parent">""".format(table_name, rownum)
colnum = 0
for field in row:
if rownum == 0:
print """
<TextView
android:textStyle="bold"
android:id="@+id/{0}_row_{1}_col_{2}"
android:text="{3}" />""".format(table_name, rownum, colnum, field.strip())
else:
print """
<TextView
android:id="@+id/{0}_row_{1}_col_{2}"
android:text="{3}" />""".format(table_name, rownum, colnum, field.strip())
colnum = colnum+1
print """
</TableRow>"""
rownum = rownum+1
print """
</TableLayout>"""
| [
"noreply@github.com"
] | noreply@github.com |
8b564330a2dd2c16aace1926c913fcca01a19d48 | fd19e9dc496d7af22cbdd8cd8d185d4e99583c18 | /blog/urls.py | 15b95e2762e9645f4f966fa434d380810ee61a13 | [] | no_license | averlor/Blog_django | aa5e652e6970cf5aac6c8b155134685076bc9a25 | 53c324be0e2ca3b4fe5e4cc245fec361375d57de | refs/heads/master | 2020-04-08T07:39:36.313018 | 2018-11-26T15:39:34 | 2018-11-26T15:39:34 | 159,146,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name='post_new'),
url(r'^post/(?P<pk>\d+)/edit/$',views.post_edit,name='post_edit'),
] | [
"vabramanc@gmail.com"
] | vabramanc@gmail.com |
70c2e83deb862bcaf5c7853c8fc59b7e6fda372a | 2c4739888c53871524eb883f884470932c93542a | /PyPoll/Main/main.py | 14dffa7ea8bf1fea7e838735332869024f228e15 | [] | no_license | ncastal/python-challenge | c3171530437cd499f40f27cdb0f66b8a336ec62a | cc4eee24dbbf411c067252e4ad5b04672f9d77bc | refs/heads/master | 2020-09-16T14:27:59.102292 | 2019-12-06T03:14:58 | 2019-12-06T03:14:58 | 223,798,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,865 | py | import os #importing libraries
import csv
os.chdir(os.path.dirname(os.path.abspath(__file__))) #change working directory to one where the python file is located
election_csv = os.path.join("..","Resources","election_data.csv") #giving the location for election_data.csv
election_result = os.path.join("..","Output","election_result.txt") #setting location for election result text file
print("Election Results")
print("-------------------------")
with open(election_csv, newline="") as csvfile: #reading election_data.csv
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvfile) #skipping header
total_votes=0 #variable to count how many total votes
khan_votes=0 #variables to count how many votes for each canidate
correy_votes=0
li_votes=0
otooley_votes=0
for row in csvreader:
total_votes=total_votes+1 #counting total votes
if row[2]=="Khan": #If statements to count votes for each canidate
khan_votes=khan_votes+1
if row[2]=="Correy":
correy_votes=correy_votes+1
if row[2]=="Li":
li_votes=li_votes+1
if row[2]=="O'Tooley":
otooley_votes=otooley_votes+1
canidates={"Khan":khan_votes,"Correy":correy_votes,"Li":li_votes,"O'Tooley":otooley_votes} #creating a dictonary for the canidates and votes cast for them
percent_khan = round((khan_votes/total_votes)*100,2) #calculating the percentage of the votes each candiate recieved
percent_correy = round((correy_votes/total_votes)*100,2)
percent_li = round((li_votes/total_votes)*100,2)
percent_otooley=round((otooley_votes/total_votes)*100,2)
print(f"Total votes: {total_votes}") #printing the results
print("-------------------------")
print(f"Khan: {percent_khan}% ({khan_votes})")
print(f"Correy: {percent_correy}% ({correy_votes})")
print(f"Li: {percent_li}% ({li_votes})")
print(f"O'Tooley: {percent_otooley}% ({otooley_votes})")
print("-------------------------")
winner = max(canidates, key=canidates.get) #determines the winner of election by finding the canidate with the most votes
print(f"Winner: {winner}")
print("-------------------------")
with open(election_result, 'w') as writer: #wtiting the results to a text file
writer.writelines('Election Results\n')
writer.writelines(f"Total votes: {total_votes}\n")
writer.writelines("-------------------------\n")
writer.writelines(f"Khan: {percent_khan}% ({khan_votes})\n")
writer.writelines(f"Correy: {percent_correy}% ({correy_votes})\n")
writer.writelines(f"Li: {percent_li}% ({li_votes})\n")
writer.writelines(f"O'Tooley: {percent_otooley}% ({otooley_votes})\n")
writer.writelines("-------------------------\n")
writer.writelines(f"Winner: {winner}\n")
writer.writelines("-------------------------\n") | [
"nick.cast89@gmail.com"
] | nick.cast89@gmail.com |
5771ab25a8f03b02ce6256a0612f9f2a7e8269a9 | 81a02bce72a7db755eb813cb9619c423bb06684d | /core/migrations/0004_auto_20190314_2308.py | e3e1c39da39e49c4d011f278cb2ddf2d92819f71 | [
"MIT"
] | permissive | tsnaf/semita | 5d6f88ea982b5296649cfe65e4689fad45ac1641 | 23cc7954c9bcdf5607592b8d3bb264f69098d5a8 | refs/heads/master | 2020-08-22T16:59:13.381126 | 2019-09-06T00:12:52 | 2019-09-06T00:12:52 | 216,441,977 | 1 | 0 | MIT | 2019-10-20T23:36:21 | 2019-10-20T23:36:21 | null | UTF-8 | Python | false | false | 1,706 | py | # Generated by Django 2.1.7 on 2019-03-14 23:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("core", "0003_grant_attachment")]
operations = [
migrations.CreateModel(
name="Dashboard",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("dash", models.CharField(max_length=50, null=True)),
],
),
migrations.AlterField(
model_name="contact",
name="organisation",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="contactorgslist",
to="core.Organisation",
),
),
migrations.AlterField(
model_name="grant",
name="fund",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="grantfundslist",
to="core.Fund",
),
),
migrations.AlterField(
model_name="grant",
name="organisation",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="grantorgslist",
to="core.Organisation",
),
),
]
| [
"hello@rae.li"
] | hello@rae.li |
a6ae38ab912151500b6230c13145389b39b83dc1 | 01677f99acef3c457b49804990321a7db10f35f7 | /LaneCV.py | c11a6e30953006fb4eb6b4b0e2e619d2383e0909 | [] | no_license | pranitdemiri/Cap | d56569b75b17bcba23d13e4177d4f457965a82cd | 5eb2111b9e0d17ab3ec85e939f8986309210972e | refs/heads/master | 2023-01-06T17:43:58.960119 | 2020-10-21T21:20:39 | 2020-10-21T21:20:39 | 306,150,318 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,321 | py | import cv2
import numpy as np
import logging
import math
_SHOW_IMAGE = False
class HandCodedLaneFollower(object):
def __init__(self, car=None):
logging.info('Creating a HandCodedLaneFollower...')
self.car = car
self.curr_steering_angle = 90
def follow_lane(self, frame):
# Main entry point of the lane follower
show_image("orig", frame)
lane_lines, frame = detect_lane(frame)
final_frame = self.steer(frame, lane_lines)
return final_frame
def steer(self, frame, lane_lines):
logging.debug('steering...')
if len(lane_lines) == 0:
logging.error('No lane lines detected, nothing to do.')
return frame
new_steering_angle = compute_steering_angle(frame, lane_lines)
self.curr_steering_angle = stabilize_steering_angle(self.curr_steering_angle, new_steering_angle,
len(lane_lines))
if self.car is not None:
self.car.front_wheels.turn(self.curr_steering_angle)
curr_heading_image = display_heading_line(frame, self.curr_steering_angle)
show_image("heading", curr_heading_image)
return curr_heading_image
############################
# Frame processing steps
############################
def detect_lane(frame):
logging.debug('detecting lane lines...')
edges = detect_edges(frame)
show_image('edges', edges)
cropped_edges = region_of_interest(edges)
show_image('edges cropped', cropped_edges)
line_segments = detect_line_segments(cropped_edges)
line_segment_image = display_lines(frame, line_segments)
show_image("line segments", line_segment_image)
lane_lines = average_slope_intercept(frame, line_segments)
lane_lines_image = display_lines(frame, lane_lines)
show_image("lane lines", lane_lines_image)
return lane_lines, lane_lines_image
#OPEN CV STARTS HERE
def detect_edges(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
lower_blue = np.array([30, 40, 0])
upper_blue = np.array([150, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
show_image("blue mask", mask)
# detect edges
edges = cv2.Canny(mask, 200, 400)
return edges
def detect_edges_old(frame):
# filter for blue lane lines
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
show_image("hsv", hsv)
for i in range(16):
lower_blue = np.array([30, 16 * i, 0])
upper_blue = np.array([150, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
show_image("blue mask Sat=%s" % (16 * i), mask)
edges = cv2.Canny(mask, 200, 400)
return edges
#this is for ROI
def region_of_interest(canny):
height, width = canny.shape
mask = np.zeros_like(canny)
polygon = np.array([[
(0, height * 1 / 2),
(width, height * 1 / 2),
(width, height),
(0, height),
]], np.int32)
cv2.fillPoly(mask, polygon, 255)
show_image("mask", mask)
masked_image = cv2.bitwise_and(canny, mask)
return masked_image
def detect_line_segments(cropped_edges):
rho = 1 # precision in pixel, i.e. 1 pixel
angle = np.pi / 180 # degree in radian, i.e. 1 degree
min_threshold = 10 # minimal of votes
line_segments = cv2.HoughLinesP(cropped_edges, rho, angle, min_threshold, np.array([]), minLineLength=8,
maxLineGap=4)
if line_segments is not None:
for line_segment in line_segments:
logging.debug('detected line_segment:')
logging.debug("%s of length %s" % (line_segment, length_of_line_segment(line_segment[0])))
return line_segments
#This is for the left and right lane
def average_slope_intercept(frame, line_segments):
lane_lines = []
if line_segments is None:
logging.info('No line_segment segments detected')
return lane_lines
height, width, _ = frame.shape
left_fit = []
right_fit = []
boundary = 1 / 3
left_region_boundary = width * (1 - boundary) # left lane line segment should be on left 2/3 of the screen
right_region_boundary = width * boundary # right lane line segment should be on left 2/3 of the screen
for line_segment in line_segments:
for x1, y1, x2, y2 in line_segment:
if x1 == x2:
logging.info('skipping vertical line segment (slope=inf): %s' % line_segment)
continue
fit = np.polyfit((x1, x2), (y1, y2), 1)
slope = fit[0]
intercept = fit[1]
if slope < 0:
if x1 < left_region_boundary and x2 < left_region_boundary:
left_fit.append((slope, intercept))
else:
if x1 > right_region_boundary and x2 > right_region_boundary:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
if len(left_fit) > 0:
lane_lines.append(make_points(frame, left_fit_average))
right_fit_average = np.average(right_fit, axis=0)
if len(right_fit) > 0:
lane_lines.append(make_points(frame, right_fit_average))
logging.debug('lane lines: %s' % lane_lines) # [[[316, 720, 484, 432]], [[1009, 720, 718, 432]]]
return lane_lines
def compute_steering_angle(frame, lane_lines):
if len(lane_lines) == 0:
logging.info('No lane lines detected, do nothing')
return -90
height, width, _ = frame.shape
if len(lane_lines) == 1:
logging.debug('Only detected one lane line, just follow it. %s' % lane_lines[0])
x1, _, x2, _ = lane_lines[0][0]
x_offset = x2 - x1
else:
_, _, left_x2, _ = lane_lines[0][0]
_, _, right_x2, _ = lane_lines[1][0]
camera_mid_offset_percent = 0.02
mid = int(width / 2 * (1 + camera_mid_offset_percent))
x_offset = (left_x2 + right_x2) / 2 - mid
y_offset = int(height / 2)
angle_to_mid_radian = math.atan(x_offset / y_offset) # angle (in radian) to center vertical line
angle_to_mid_deg = int(angle_to_mid_radian * 180.0 / math.pi) # angle (in degrees) to center vertical line
steering_angle = angle_to_mid_deg + 90 # this is the steering angle needed by picar front wheel
logging.debug('new steering angle: %s' % steering_angle)
return steering_angle
def stabilize_steering_angle(curr_steering_angle, new_steering_angle, num_of_lane_lines,
max_angle_deviation_two_lines=5, max_angle_deviation_one_lane=1):
if num_of_lane_lines == 2:
# if both lane lines detected, then we can deviate more
max_angle_deviation = max_angle_deviation_two_lines
else:
# if only one lane detected, don't deviate too much
max_angle_deviation = max_angle_deviation_one_lane
angle_deviation = new_steering_angle - curr_steering_angle
if abs(angle_deviation) > max_angle_deviation:
stabilized_steering_angle = int(curr_steering_angle
+ max_angle_deviation * angle_deviation / abs(angle_deviation))
else:
stabilized_steering_angle = new_steering_angle
logging.info('Proposed angle: %s, stabilized angle: %s' % (new_steering_angle, stabilized_steering_angle))
return stabilized_steering_angle
############################
# Utility Functions
############################
def display_lines(frame, lines, line_color=(0, 255, 0), line_width=10):
line_image = np.zeros_like(frame)
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(line_image, (x1, y1), (x2, y2), line_color, line_width)
line_image = cv2.addWeighted(frame, 0.8, line_image, 1, 1)
return line_image
def display_heading_line(frame, steering_angle, line_color=(0, 0, 255), line_width=5, ):
heading_image = np.zeros_like(frame)
height, width, _ = frame.shape
steering_angle_radian = steering_angle / 180.0 * math.pi
x1 = int(width / 2)
y1 = height
x2 = int(x1 - height / 2 / math.tan(steering_angle_radian))
y2 = int(height / 2)
cv2.line(heading_image, (x1, y1), (x2, y2), line_color, line_width)
heading_image = cv2.addWeighted(frame, 0.8, heading_image, 1, 1)
return heading_image
def length_of_line_segment(line):
x1, y1, x2, y2 = line
return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
def show_image(title, frame, show=_SHOW_IMAGE):
if show:
cv2.imshow(title, frame)
def make_points(frame, line):
height, width, _ = frame.shape
slope, intercept = line
y1 = height # bottom of the frame
y2 = int(y1 * 1 / 2) # make points from middle of the frame down
# bound the coordinates within the frame
x1 = max(-width, min(2 * width, int((y1 - intercept) / slope)))
x2 = max(-width, min(2 * width, int((y2 - intercept) / slope)))
return [[x1, y1, x2, y2]]
############################
# Test Functions
############################
def test_photo(file):
land_follower = HandCodedLaneFollower()
frame = cv2.imread(file)
combo_image = land_follower.follow_lane(frame)
show_image('final', combo_image, True)
cv2.waitKey(0)
cv2.destroyAllWindows()
def test_video(video_file):
lane_follower = HandCodedLaneFollower()
cap = cv2.VideoCapture(video_file + '.avi')
# skip first second of video.
for i in range(3):
_, frame = cap.read()
video_type = cv2.VideoWriter_fourcc(*'XVID')
video_overlay = cv2.VideoWriter("%s_overlay.avi" % (video_file), video_type, 20.0, (320, 240))
try:
i = 0
while cap.isOpened():
_, frame = cap.read()
print('frame %s' % i)
combo_image = lane_follower.follow_lane(frame)
cv2.imwrite("%s_%03d_%03d.png" % (video_file, i, lane_follower.curr_steering_angle), frame)
cv2.imwrite("%s_overlay_%03d.png" % (video_file, i), combo_image)
video_overlay.write(combo_image)
cv2.imshow("Road with Lane line", combo_image)
i += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
finally:
cap.release()
video_overlay.release()
cv2.destroyAllWindows()
| [
"pranitpokhrel@gmail.com"
] | pranitpokhrel@gmail.com |
28279a0327096fede9a88609bda8fc917788902d | e022862c20d92931f6b119998792377d028d0137 | /multi-agent-irl/irl/mack/ncdail.py | f13bb1d098d528977b874cc1834e4af99ef3efb1 | [
"Apache-2.0"
] | permissive | renos/CoDAIL | 3519dface3720318c79bd9afa3cc0ae889b9ee05 | 92423c90166ac7c5b88637ca1ef0002f126918db | refs/heads/master | 2023-06-26T06:20:09.275801 | 2021-07-30T03:11:33 | 2021-07-30T03:11:33 | 384,311,940 | 0 | 0 | Apache-2.0 | 2021-07-09T03:36:48 | 2021-07-09T03:36:47 | null | UTF-8 | Python | false | false | 27,922 | py | import os.path as osp
import random
import time
import joblib
import numpy as np
import tensorflow as tf
from scipy.stats import pearsonr, spearmanr
from rl.acktr.utils import Scheduler, find_trainable_variables, discount_with_dones
from rl.acktr.utils import cat_entropy, mse, onehot, multionehot
from rl import logger
from rl.acktr import kfac
from rl.common import set_global_seeds, explained_variance
from irl.mack.kfac_discriminator_ncdail import Discriminator
# from irl.mack.kfac_discriminator_wgan import Discriminator
from irl.dataset import Dset
class Model(object):
def __init__(self, policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=2, nsteps=200,
nstack=1, ent_coef=0.00, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, lrschedule='linear', identical=None):
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=nprocs,
inter_op_parallelism_threads=nprocs)
config.gpu_options.allow_growth = True
self.sess = sess = tf.Session(config=config)
nbatch = nenvs * nsteps
self.num_agents = num_agents = len(ob_space)
self.n_actions = [ac_space[k].n for k in range(self.num_agents)]
if identical is None:
identical = [False for _ in range(self.num_agents)]
scale = [1 for _ in range(num_agents)]
pointer = [i for i in range(num_agents)]
h = 0
for k in range(num_agents):
if identical[k]:
scale[h] += 1
else:
pointer[h] = k
h = k
pointer[h] = num_agents
A, ADV, R, PG_LR = [], [], [], []
for k in range(num_agents):
if identical[k]:
A.append(A[-1])
ADV.append(ADV[-1])
R.append(R[-1])
PG_LR.append(PG_LR[-1])
else:
A.append(tf.placeholder(tf.int32, [nbatch * scale[k]]))
ADV.append(tf.placeholder(tf.float32, [nbatch * scale[k]]))
R.append(tf.placeholder(tf.float32, [nbatch * scale[k]]))
PG_LR.append(tf.placeholder(tf.float32, []))
# A = [tf.placeholder(tf.int32, [nbatch]) for _ in range(num_agents)]
# ADV = [tf.placeholder(tf.float32, [nbatch]) for _ in range(num_agents)]
# R = [tf.placeholder(tf.float32, [nbatch]) for _ in range(num_agents)]
# PG_LR = [tf.placeholder(tf.float32, []) for _ in range(num_agents)]
# VF_LR = [tf.placeholder(tf.float32, []) for _ in range(num_agents)]
pg_loss, entropy, vf_loss, train_loss = [], [], [], []
self.model = step_model = []
self.model2 = train_model = []
self.pg_fisher = pg_fisher_loss = []
self.logits = logits = []
sample_net = []
self.vf_fisher = vf_fisher_loss = []
self.joint_fisher = joint_fisher_loss = []
self.lld = lld = []
for k in range(num_agents):
if identical[k]:
step_model.append(step_model[-1])
train_model.append(train_model[-1])
else:
step_model.append(policy(sess, ob_space[k], ac_space[k], ob_space, ac_space,
nenvs, 1, nstack, reuse=False, name='%d' % k))
train_model.append(policy(sess, ob_space[k], ac_space[k], ob_space, ac_space,
nenvs * scale[k], nsteps, nstack, reuse=True, name='%d' % k))
logpac = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=train_model[k].pi, labels=A[k])
lld.append(tf.reduce_mean(logpac))
logits.append(train_model[k].pi)
pg_loss.append(tf.reduce_mean(ADV[k] * logpac))
entropy.append(tf.reduce_mean(cat_entropy(train_model[k].pi)))
pg_loss[k] = pg_loss[k] - ent_coef * entropy[k]
vf_loss.append(tf.reduce_mean(mse(tf.squeeze(train_model[k].vf), R[k])))
train_loss.append(pg_loss[k] + vf_coef * vf_loss[k])
pg_fisher_loss.append(-tf.reduce_mean(logpac))
sample_net.append(train_model[k].vf + tf.random_normal(tf.shape(train_model[k].vf)))
vf_fisher_loss.append(-vf_fisher_coef * tf.reduce_mean(
tf.pow(train_model[k].vf - tf.stop_gradient(sample_net[k]), 2)))
joint_fisher_loss.append(pg_fisher_loss[k] + vf_fisher_loss[k])
self.policy_params = []
self.value_params = []
for k in range(num_agents):
if identical[k]:
self.policy_params.append(self.policy_params[-1])
self.value_params.append(self.value_params[-1])
else:
self.policy_params.append(find_trainable_variables("policy_%d" % k))
self.value_params.append(find_trainable_variables("value_%d" % k))
self.params = params = [a + b for a, b in zip(self.policy_params, self.value_params)]
params_flat = []
for k in range(num_agents):
params_flat.extend(params[k])
self.grads_check = grads = [
tf.gradients(train_loss[k], params[k]) for k in range(num_agents)
]
clone_grads = [
tf.gradients(lld[k], params[k]) for k in range(num_agents)
]
self.optim = optim = []
self.clones = clones = []
update_stats_op = []
train_op, clone_op, q_runner = [], [], []
for k in range(num_agents):
if identical[k]:
optim.append(optim[-1])
train_op.append(train_op[-1])
q_runner.append(q_runner[-1])
clones.append(clones[-1])
clone_op.append(clone_op[-1])
else:
with tf.variable_scope('optim_%d' % k):
optim.append(kfac.KfacOptimizer(
learning_rate=PG_LR[k], clip_kl=kfac_clip,
momentum=0.9, kfac_update=1, epsilon=0.01,
stats_decay=0.99, async=0, cold_iter=10,
max_grad_norm=max_grad_norm)
)
update_stats_op.append(optim[k].compute_and_apply_stats(joint_fisher_loss, var_list=params[k]))
train_op_, q_runner_ = optim[k].apply_gradients(list(zip(grads[k], params[k])))
train_op.append(train_op_)
q_runner.append(q_runner_)
with tf.variable_scope('clone_%d' % k):
clones.append(kfac.KfacOptimizer(
learning_rate=PG_LR[k], clip_kl=kfac_clip,
momentum=0.9, kfac_update=1, epsilon=0.01,
stats_decay=0.99, async=0, cold_iter=10,
max_grad_norm=max_grad_norm)
)
update_stats_op.append(clones[k].compute_and_apply_stats(
pg_fisher_loss[k], var_list=self.policy_params[k]))
clone_op_, q_runner_ = clones[k].apply_gradients(list(zip(clone_grads[k], self.policy_params[k])))
clone_op.append(clone_op_)
update_stats_op = tf.group(*update_stats_op)
train_ops = train_op
clone_ops = clone_op
train_op = tf.group(*train_op)
clone_op = tf.group(*clone_op)
self.q_runner = q_runner
self.lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
self.clone_lr = Scheduler(v=lr, nvalues=total_timesteps, schedule=lrschedule)
def train(obs, states, rewards, masks, actions, values):
advs = [rewards[k] - values[k] for k in range(num_agents)]
for step in range(len(obs)):
cur_lr = self.lr.value()
ob = np.concatenate(obs, axis=1)
td_map = {}
for k in range(num_agents):
if identical[k]:
continue
new_map = {}
if num_agents > 1:
action_v = []
for j in range(k, pointer[k]):
action_v.append(np.concatenate([multionehot(actions[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1))
action_v = np.concatenate(action_v, axis=0)
new_map.update({train_model[k].A_v: action_v})
td_map.update({train_model[k].A_v: action_v})
new_map.update({
train_model[k].X: np.concatenate([obs[j] for j in range(k, pointer[k])], axis=0),
train_model[k].X_v: np.concatenate([ob.copy() for j in range(k, pointer[k])], axis=0),
A[k]: np.concatenate([actions[j] for j in range(k, pointer[k])], axis=0),
ADV[k]: np.concatenate([advs[j] for j in range(k, pointer[k])], axis=0),
R[k]: np.concatenate([rewards[j] for j in range(k, pointer[k])], axis=0),
PG_LR[k]: cur_lr / float(scale[k])
})
sess.run(train_ops[k], feed_dict=new_map)
td_map.update(new_map)
if states[k] != []:
td_map[train_model[k].S] = states
td_map[train_model[k].M] = masks
policy_loss, value_loss, policy_entropy = sess.run(
[pg_loss, vf_loss, entropy],
td_map
)
return policy_loss, value_loss, policy_entropy
def clone(obs, actions):
td_map = {}
cur_lr = self.clone_lr.value()
for k in range(num_agents):
if identical[k]:
continue
new_map = {}
new_map.update({
train_model[k].X: np.concatenate([obs[j] for j in range(k, pointer[k])], axis=0),
A[k]: np.concatenate([actions[j] for j in range(k, pointer[k])], axis=0),
PG_LR[k]: cur_lr / float(scale[k])
})
sess.run(clone_ops[k], feed_dict=new_map)
td_map.update(new_map)
lld_loss = sess.run([lld], td_map)
return lld_loss
def save(save_path):
ps = sess.run(params_flat)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params_flat, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.clone = clone
self.save = save
self.load = load
self.train_model = train_model
self.step_model = step_model
def step(ob, av, *_args, **_kwargs):
a, v, s = [], [], []
obs = np.concatenate(ob, axis=1)
for k in range(num_agents):
a_v = np.concatenate([multionehot(av[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1)
a_, v_, s_ = step_model[k].step(ob[k], obs, a_v)
a.append(a_)
v.append(v_)
s.append(s_)
return a, v, s
self.step = step
def value(obs, av):
v = []
ob = np.concatenate(obs, axis=1)
for k in range(num_agents):
a_v = np.concatenate([multionehot(av[i], self.n_actions[i])
for i in range(num_agents) if i != k], axis=1)
v_ = step_model[k].value(ob, a_v)
v.append(v_)
return v
self.value = value
self.initial_state = [step_model[k].initial_state for k in range(num_agents)]
class Runner(object):
def __init__(self, env, model, discriminator, nsteps, nstack, gamma, lam, disc_type):
self.env = env
self.model = model
self.discriminator = discriminator
self.disc_type = disc_type
self.num_agents = len(env.observation_space)
self.nenv = nenv = env.num_envs
self.batch_ob_shape = [
(nenv * nsteps, nstack * env.observation_space[k].shape[0]) for k in range(self.num_agents)
]
self.obs = [
np.zeros((nenv, nstack * env.observation_space[k].shape[0])) for k in range(self.num_agents)
]
self.actions = [np.zeros((nenv, )) for _ in range(self.num_agents)]
obs = env.reset()
self.update_obs(obs)
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.n_actions = [env.action_space[k].n for k in range(self.num_agents)]
self.dones = [np.array([False for _ in range(nenv)]) for k in range(self.num_agents)]
def update_obs(self, obs):
# TODO: Potentially useful for stacking.
self.obs = obs
# for k in range(self.num_agents):
# ob = np.roll(self.obs[k], shift=-1, axis=1)
# ob[:, -1] = obs[:, 0]
# self.obs[k] = ob
# self.obs = [np.roll(ob, shift=-1, axis=3) for ob in self.obs]
# self.obs[:, :, :, -1] = obs[:, :, :, 0]
def run(self):
# mb_episode_r = [[] for _ in range(self.num_agents)]
mb_obs = [[] for _ in range(self.num_agents)]
mb_true_rewards = [[] for _ in range(self.num_agents)]
mb_rewards = [[] for _ in range(self.num_agents)]
mb_actions = [[] for _ in range(self.num_agents)]
mb_values = [[] for _ in range(self.num_agents)]
mb_dones = [[] for _ in range(self.num_agents)]
mb_masks = [[] for _ in range(self.num_agents)]
mb_states = self.states
for n in range(self.nsteps):
actions, values, states = self.model.step(self.obs, self.actions)
self.actions = actions
if self.disc_type == 'decentralized':
mul = [multionehot(self.actions[k], self.n_actions[k]) for k in range(self.num_agents)]
rewards = [np.squeeze(self.discriminator[k].get_reward(
self.obs[k], np.concatenate(mul, axis=1)))
for k in range(self.num_agents)]
elif self.disc_type == 'decentralized-all':
mul = [multionehot(self.actions[k], self.n_actions[k]) for k in range(self.num_agents)]
rewards = [np.squeeze(self.discriminator[k].get_reward(
np.concatenate(self.obs, axis=1), np.concatenate(mul, axis=1)))
for k in range(self.num_agents)]
else:
assert False
for k in range(self.num_agents):
mb_obs[k].append(np.copy(self.obs[k]))
mb_actions[k].append(actions[k])
mb_values[k].append(values[k])
mb_dones[k].append(self.dones[k])
mb_rewards[k].append(rewards[k])
actions_list = []
for i in range(self.nenv):
actions_list.append([onehot(actions[k][i], self.n_actions[k]) for k in range(self.num_agents)])
obs, true_rewards, dones, _ = self.env.step(actions_list)
self.states = states
self.dones = dones
for k in range(self.num_agents):
for ni, done in enumerate(dones[k]):
if done:
self.obs[k][ni] = self.obs[k][ni] * 0.0
self.update_obs(obs)
for k in range(self.num_agents):
mb_true_rewards[k].append(true_rewards[k])
for k in range(self.num_agents):
mb_dones[k].append(self.dones[k])
# batch of steps to batch of rollouts
# print(mb_rewards[0])
for k in range(self.num_agents):
# mb_episode_r[k] = np.sum(mb_rewards[k]) / np.shape(mb_rewards[k])[-1]
mb_obs[k] = np.asarray(mb_obs[k], dtype=np.float32).swapaxes(1, 0).reshape(self.batch_ob_shape[k])
mb_true_rewards[k] = np.asarray(mb_true_rewards[k], dtype=np.float32).swapaxes(1, 0)
mb_rewards[k] = np.asarray(mb_rewards[k], dtype=np.float32).swapaxes(1, 0)
mb_actions[k] = np.asarray(mb_actions[k], dtype=np.int32).swapaxes(1, 0)
mb_values[k] = np.asarray(mb_values[k], dtype=np.float32).swapaxes(1, 0)
mb_dones[k] = np.asarray(mb_dones[k], dtype=np.bool).swapaxes(1, 0)
mb_masks[k] = mb_dones[k][:, :-1]
mb_dones[k] = mb_dones[k][:, 1:]
# last_values = self.model.value(self.obs, self.actions)
#
# mb_advs = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
# mb_returns = [[] for _ in range(self.num_agents)]
#
# lastgaelam = 0.0
# for k in range(self.num_agents):
# for t in reversed(range(self.nsteps)):
# if t == self.nsteps - 1:
# nextnonterminal = 1.0 - self.dones[k]
# nextvalues = last_values[k]
# else:
# nextnonterminal = 1.0 - mb_dones[k][:, t + 1]
# nextvalues = mb_values[k][:, t + 1]
# delta = mb_rewards[k][:, t] + self.gamma * nextvalues * nextnonterminal - mb_values[k][:, t]
# mb_advs[k][:, t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
# mb_returns[k] = mb_advs[k] + mb_values[k]
# mb_returns[k] = mb_returns[k].flatten()
# mb_masks[k] = mb_masks[k].flatten()
# mb_values[k] = mb_values[k].flatten()
# mb_actions[k] = mb_actions[k].flatten()
mb_returns = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
mb_true_returns = [np.zeros_like(mb_rewards[k]) for k in range(self.num_agents)]
last_values = self.model.value(self.obs, self.actions)
# discount/bootstrap off value fn
for k in range(self.num_agents):
for n, (rewards, true_rewards, dones, value) in enumerate(zip(mb_rewards[k], mb_true_rewards[k], mb_dones[k], last_values[k].tolist())):
rewards = rewards.tolist()
dones = dones.tolist()
true_rewards = true_rewards.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
true_rewards = discount_with_dones(true_rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
true_rewards = discount_with_dones(true_rewards, dones, self.gamma)
mb_returns[k][n] = rewards
mb_true_returns[k][n] = true_rewards
for k in range(self.num_agents):
mb_returns[k] = mb_returns[k].flatten()
mb_masks[k] = mb_masks[k].flatten()
mb_values[k] = mb_values[k].flatten()
mb_actions[k] = mb_actions[k].flatten()
mh_actions = [multionehot(mb_actions[k], self.n_actions[k]) for k in range(self.num_agents)]
mb_all_obs = np.concatenate(mb_obs, axis=1)
mh_all_actions = np.concatenate(mh_actions, axis=1)
return mb_obs, mb_states, mb_returns, mb_masks, mb_actions,\
mb_values, mb_all_obs, mh_actions, mh_all_actions, mb_rewards, mb_true_rewards, mb_true_returns#, mb_episode_r
def learn(policy, expert, env, env_id, seed, total_timesteps=int(40e6), gamma=0.99, lam=0.95, log_interval=1, nprocs=32,
nsteps=20, nstack=1, ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
kfac_clip=0.001, save_interval=50, lrschedule='linear', dis_lr=0.001, disc_type='decentralized',
bc_iters=500, identical=None, d_iters=1):
tf.reset_default_graph()
set_global_seeds(seed)
buffer = None
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
num_agents = (len(ob_space))
make_model = lambda: Model(policy, ob_space, ac_space, nenvs, total_timesteps, nprocs=nprocs, nsteps=nsteps,
nstack=nstack, ent_coef=ent_coef, vf_coef=vf_coef, vf_fisher_coef=vf_fisher_coef,
lr=lr, max_grad_norm=max_grad_norm, kfac_clip=kfac_clip,
lrschedule=lrschedule, identical=identical)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if disc_type == 'decentralized':
discriminator = [
Discriminator(model.sess, ob_space, ac_space, nstack, k, disc_type=disc_type,
scope="Discriminator_%d" % k, # gp_coef=gp_coef,
total_steps=total_timesteps // (nprocs * nsteps),
lr_rate=dis_lr) for k in range(num_agents)
]
elif disc_type == 'dentralized-all':
discriminator = [
Discriminator(model.sess, ob_space, ac_space, nstack, k, disc_type=disc_type,
scope="Discriminator_%d" % k, # gp_coef=gp_coef,
total_steps=total_timesteps // (nprocs * nsteps),
lr_rate=dis_lr) for k in range(num_agents)
]
else:
assert False
tf.global_variables_initializer().run(session=model.sess)
runner = Runner(env, model, discriminator, nsteps=nsteps, nstack=nstack, gamma=gamma, lam=lam, disc_type=disc_type)
nbatch = nenvs * nsteps
tstart = time.time()
coord = tf.train.Coordinator()
# enqueue_threads = [q_runner.create_threads(model.sess, coord=coord, start=True) for q_runner in model.q_runner]
for _ in range(bc_iters):
e_obs, e_actions, _, _ = expert.get_next_batch(nenvs * nsteps)
e_a = [np.argmax(e_actions[k], axis=1) for k in range(len(e_actions))]
lld_loss = model.clone(e_obs, e_a)
# print(lld_loss)
for update in range(1, total_timesteps // nbatch + 1):
obs, states, rewards, masks, actions, values, all_obs,\
mh_actions, mh_all_actions, mh_rewards, mh_true_rewards, mh_true_returns = runner.run()#, mh_episode_r = runner.run()
# d_iters = 1
g_loss, e_loss = np.zeros((num_agents, d_iters)), np.zeros((num_agents, d_iters))
idx = 0
idxs = np.arange(len(all_obs))
random.shuffle(idxs)
all_obs = all_obs[idxs]
mh_actions = [mh_actions[k][idxs] for k in range(num_agents)]
mh_obs = [obs[k][idxs] for k in range(num_agents)]
mh_values = [values[k][idxs] for k in range(num_agents)]
if buffer:
buffer.update(mh_obs, mh_actions, None, all_obs, mh_values)
else:
buffer = Dset(mh_obs, mh_actions, None, all_obs, mh_values, randomize=True, num_agents=num_agents)
d_minibatch = nenvs * nsteps
for d_iter in range(d_iters):
e_obs, e_actions, e_all_obs, _ = expert.get_next_batch(d_minibatch)
g_obs, g_actions, g_all_obs, _ = buffer.get_next_batch(batch_size=d_minibatch)
if disc_type == 'decentralized':
for k in range(num_agents):
g_loss[k, d_iter], e_loss[k, d_iter], _, _ = discriminator[k].train(
g_obs[k],
np.concatenate(g_actions, axis=1),
e_obs[k],
np.concatenate(e_actions, axis=1)
)
elif disc_type == 'decentralized-all':
for k in range(num_agents):
g_loss[k, d_iter], e_loss[k, d_iter], _, _ = discriminator[k].train(
g_all_obs,
np.concatenate(g_actions, axis=1),
e_all_obs,
np.concatenate(e_actions, axis=1))
else:
assert False
idx += 1
if update > 10:
policy_loss, value_loss, policy_entropy = model.train(obs, states, rewards, masks, actions, values)
model.old_obs = obs
nseconds = time.time() - tstart
fps = int((update * nbatch) / nseconds)
if update % log_interval == 0 or update == 1:
ev = [explained_variance(values[k], rewards[k]) for k in range(model.num_agents)]
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", update * nbatch)
logger.record_tabular("fps", fps)
for k in range(model.num_agents):
logger.record_tabular("explained_variance %d" % k, float(ev[k]))
# logger.record_tabular("episode_reward %d" % k, float(mh_episode_r[k]))
if update > 10:
logger.record_tabular("policy_entropy %d" % k, float(policy_entropy[k]))
logger.record_tabular("policy_loss %d" % k, float(policy_loss[k]))
logger.record_tabular("value_loss %d" % k, float(value_loss[k]))
try:
logger.record_tabular('pearson %d' % k, float(
pearsonr(rewards[k].flatten(), mh_true_returns[k].flatten())[0]))
logger.record_tabular('reward %d' % k, float(np.mean(rewards[k])))
logger.record_tabular('spearman %d' % k, float(
spearmanr(rewards[k].flatten(), mh_true_returns[k].flatten())[0]))
except:
pass
# logger.record_tabular("episode_sum_reward", float(np.sum(mh_episode_r[k])))
if update > 10 and env_id == 'simple_tag':
try:
logger.record_tabular('in_pearson_0_2', float(
pearsonr(rewards[0].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_pearson_1_2', float(
pearsonr(rewards[1].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_spearman_0_2', float(
spearmanr(rewards[0].flatten(), rewards[2].flatten())[0]))
logger.record_tabular('in_spearman_1_2', float(
spearmanr(rewards[1].flatten(), rewards[2].flatten())[0]))
except:
pass
g_loss_m = np.mean(g_loss, axis=1)
e_loss_m = np.mean(e_loss, axis=1)
# g_loss_gp_m = np.mean(g_loss_gp, axis=1)
# e_loss_gp_m = np.mean(e_loss_gp, axis=1)
for k in range(num_agents):
logger.record_tabular("g_loss %d" % k, g_loss_m[k])
logger.record_tabular("e_loss %d" % k, e_loss_m[k])
# logger.record_tabular("g_loss_gp %d" % k, g_loss_gp_m[k])
# logger.record_tabular("e_loss_gp %d" % k, e_loss_gp_m[k])
logger.dump_tabular()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir():
savepath = osp.join(logger.get_dir(), 'm_%.5i' % update)
print('Saving to', savepath)
model.save(savepath)
if disc_type == 'decentralized':
for k in range(num_agents):
savepath = osp.join(logger.get_dir(), 'd_%d_%.5i' % (k, update))
discriminator[k].save(savepath)
elif disc_type == 'decentralized-all':
for k in range(num_agents):
savepath = osp.join(logger.get_dir(), 'd_%d_%.5i' % (k, update))
discriminator[k].save(savepath)
coord.request_stop()
# coord.join(enqueue_threads)
env.close()
| [
"ericliuof97@gmail.com"
] | ericliuof97@gmail.com |
8ef65c6fea59304140275f2c14c1aed2bd814595 | be26f8fa36ea4e52ef94555fc69a104e88d3fd16 | /models/Juan's Models/old scripts/gen_logits_and_embeddings.py | 2427ab44a4892dcff7b042188c28007e2df740d6 | [] | no_license | davidday99/twitter-data-collector | 599eeed1bf0fd995da28053d3547f3b95ddb2385 | bf7d203c380dc727d30f1e17b0297f1c011642b0 | refs/heads/master | 2022-06-02T09:41:26.940830 | 2020-05-06T19:20:47 | 2020-05-06T19:20:47 | 254,766,551 | 3 | 3 | null | 2020-04-27T08:35:19 | 2020-04-11T00:59:04 | Python | UTF-8 | Python | false | false | 13,350 | py | import random
import os
import pandas as pd
import numpy as np
from transformers import BertTokenizer, BertForSequenceClassification, BertModel, BertPreTrainedModel, BertConfig
from transformers import AdamW, BertConfig, get_linear_schedule_with_warmup
from keras.preprocessing.sequence import pad_sequences
import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, Subset, DataLoader, RandomSampler, SequentialSampler, TensorDataset
import io
from sklearn.model_selection import train_test_split
df_train = pd.read_csv('data/profile_data_train.csv')
df_test = pd.read_csv('data/profile_data_test.csv')
df_train = df_train.drop('Unnamed: 0', axis=1).reset_index(drop=True)
df_test = df_test.drop('Unnamed: 0', axis=1).reset_index(drop=True)
# Split train dataframe into 8 stratified chunks
X_train = df_train.drop('age_group', axis=1)
Y_train = df_train.age_group.values.tolist()
X_test = df_test.drop('age_group', axis=1)
Y_test = df_test.age_group.values.tolist()
# First split: 50-50
X_1, X_2, Y_1, Y_2 = train_test_split(X_train, Y_train, stratify=Y_train, test_size=0.5, random_state=42)
# Second split: 25-25-25-25
X_1, X_3, Y_1, Y_3 = train_test_split(X_1, Y_1, stratify=Y_1, test_size=0.5, random_state=42)
X_2, X_4, Y_2, Y_4 = train_test_split(X_2, Y_2, stratify=Y_2, test_size=0.5, random_state=42)
# Third split: 12.5 x 8
X_1, X_5, Y_1, Y_5 = train_test_split(X_1, Y_1, stratify=Y_1, test_size=0.5, random_state=42)
X_2, X_6, Y_2, Y_6 = train_test_split(X_2, Y_2, stratify=Y_2, test_size=0.5, random_state=42)
X_3, X_7, Y_3, Y_7 = train_test_split(X_3, Y_3, stratify=Y_3, test_size=0.5, random_state=42)
X_4, X_8, Y_4, Y_8 = train_test_split(X_4, Y_4, stratify=Y_4, test_size=0.5, random_state=42)
X_1 = X_1.reset_index(drop=True)
X_2 = X_2.reset_index(drop=True)
X_3 = X_3.reset_index(drop=True)
X_4 = X_4.reset_index(drop=True)
X_5 = X_5.reset_index(drop=True)
X_6 = X_6.reset_index(drop=True)
X_7 = X_7.reset_index(drop=True)
X_8 = X_8.reset_index(drop=True)
X_splits = [X_1, X_2, X_3, X_4, X_5, X_6, X_7, X_8]
Y_splits = [Y_1, Y_2, Y_3, Y_4, Y_5, Y_6, Y_7, Y_8]
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Compute accuracy given logits and true labels
def accuracy(logits, labels):
preds = np.argmax(logits, axis=1).flatten()
labels = labels.flatten()
return np.sum(preds == labels) / len(labels)
# Train a model over one epoch
def train_one_epoch(model, train_dataloader, optimizer, scheduler):
print("*************STARTING EPOCH*************")
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
total_loss = 0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Epoch
model.train()
for step, batch in enumerate(train_dataloader):
if step % 100 == 0:
print("Batch " + str(step) + ' of ' + str(len(train_dataloader)))
batch_input_ids = batch[0].to(device)
batch_input_masks = batch[1].to(device)
batch_labels = batch[2].to(device)
model.zero_grad()
outputs = model(batch_input_ids, token_type_ids=None, attention_mask=batch_input_masks, labels=batch_labels)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
print("Done training epoch. Average training loss: " + str(avg_train_loss))
return avg_train_loss
# Evaluate a model's accuracy and return predictions, true labels, embeddings
def eval_profiles(model, test_dataloader):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.eval()
predictions, true_labels, embedding_layers = [], [], []
eval_accuracy, eval_steps = 0, 0
for step, batch in enumerate(test_dataloader):
batch = tuple(t.to(device) for t in batch)
if step % 100 == 0:
print("batch " + str(step) + ' of ' + str(len(test_dataloader)))
batch_input_ids, batch_input_mask, batch_labels = batch
with torch.no_grad():
outputs = model(batch_input_ids, token_type_ids=None, attention_mask=batch_input_mask)
logits = outputs[0]
logits = logits.detach().cpu().numpy()
labels = batch_labels.to('cpu').numpy()
embed = outputs[1][-1].detach().cpu().numpy()
predictions.append(logits)
true_labels.append(labels)
embedding_layers.append(embed)
eval_accuracy += accuracy(logits, labels)
eval_steps += 1
print("Calculated accuracy on eval set: " + str(eval_accuracy / eval_steps))
return predictions, true_labels, embedding_layers
# Assign initial training logits
df_train = df_train.assign(Logit0=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit1=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit2=np.zeros(df_train.shape[0]))
df_train = df_train.assign(Logit3=np.zeros(df_train.shape[0]))
epochs = 7
batch_size = 8
for holdout_idx in range(8):
print(
"************************************************************************************************************")
print("************************************GENERATING LOGITS FOR HOLDOUT " + str(
holdout_idx + 1) + " OF 8************************************")
print(
"************************************************************************************************************\n")
X_temp, Y_temp = [], []
X_holdout, Y_holdout = [], []
# Generate temporary X and Y for training and holdout
for idx in range(8):
if idx != holdout_idx:
X_temp = X_temp + X_splits[idx].tweets_text.values.tolist()
Y_temp = Y_temp + Y_splits[idx]
else:
X_holdout = X_splits[idx].tweets_text.values.tolist()
Y_holdout = Y_splits[idx]
# Tokenize and pad inputs
input_ids, holdout_input_ids = [], []
for tweet in X_temp:
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
input_ids.append(encoded)
input_ids = pad_sequences(input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
for tweet in X_holdout:
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
holdout_input_ids.append(encoded)
holdout_input_ids = pad_sequences(holdout_input_ids, maxlen=512, dtype='long', value=0, padding='post',
truncating='post')
# Attention masks to ignore padded tokens
attention_masks, holdout_attention_masks = [], []
for tweet in input_ids:
mask = [int(token_id > 0) for token_id in tweet]
attention_masks.append(mask)
for tweet in holdout_input_ids:
mask = [int(token_id > 0) for token_id in tweet]
holdout_attention_masks.append(mask)
# Prep torch data
train_inputs = torch.tensor(input_ids)
train_labels = torch.tensor(Y_temp)
train_masks = torch.tensor(attention_masks)
holdout_inputs = torch.tensor(holdout_input_ids)
holdout_labels = torch.tensor(Y_holdout)
holdout_masks = torch.tensor(holdout_attention_masks)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
holdout_data = TensorDataset(holdout_inputs, holdout_masks, holdout_labels)
holdout_dataloader = DataLoader(holdout_data, sampler=None, batch_size=batch_size)
# Load empty model
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4, output_attentions=False,
output_hidden_states=True)
model.cuda()
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# Train over epochs
losses = []
for epoch in range(epochs):
print("Epoch " + str(epoch + 1))
loss = train_one_epoch(model, train_dataloader, optimizer, scheduler)
losses.append(loss)
predictions, true_labels, embedding_layers = eval_profiles(model, holdout_dataloader)
# Assign logits and embeddings to dataframe
logits = [item for sublist in predictions for item in sublist]
logits = np.array(logits)
embeddings = np.array([item for sublist in embedding_layers for item in sublist]) # N x 512 x 768
embeddings = embeddings[:, 0, :] # Embedding of [CLS] token represents sentence, N x 768
X_splits[holdout_idx] = X_splits[holdout_idx].reset_index(drop=True)
for idx, row in X_splits[holdout_idx].iterrows():
handle = row['handle']
df_train.loc[df_train['handle'] == handle, 'Logit0'] = logits[idx][0]
df_train.loc[df_train['handle'] == handle, 'Logit1'] = logits[idx][1]
df_train.loc[df_train['handle'] == handle, 'Logit2'] = logits[idx][2]
df_train.loc[df_train['handle'] == handle, 'Logit3'] = logits[idx][3]
for i in range(768):
feat_name = 'embed' + str(i)
df_train.loc[df_train['handle'] == handle, feat_name] = embeddings[idx][i]
del model # Free cuda memory, prevent information leakage
# Save generated training features
df_train.to_csv('train.csv')
print("Successfully saved training features:")
print(df_train.head())
input_ids = []
for tweet in X_train.tweets_text.values.tolist():
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
input_ids.append(encoded)
input_ids = pad_sequences(input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
attention_masks = []
for tweet in input_ids:
mask = [int(token_id > 0) for token_id in tweet]
attention_masks.append(mask)
train_inputs = torch.tensor(input_ids)
train_labels = torch.tensor(Y_train)
train_masks = torch.tensor(attention_masks)
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size)
# Load empty model
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=4, output_attentions=False,
output_hidden_states=True)
model.cuda()
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# Train over epochs
losses = []
for epoch in range(epochs):
print("Epoch " + str(epoch + 1))
loss = train_one_epoch(model, train_dataloader, optimizer, scheduler)
losses.append(loss)
test_input_ids = []
for tweet in X_test.tweets_text.values.tolist():
encoded = tokenizer.encode(tweet, add_special_tokens=True, max_length=512)
test_input_ids.append(encoded)
test_input_ids = pad_sequences(test_input_ids, maxlen=512, dtype='long', value=0, padding='post', truncating='post')
test_attention_masks = []
for tweet in test_input_ids:
mask = [int(token_id > 0) for token_id in tweet]
test_attention_masks.append(mask)
test_inputs = torch.tensor(test_input_ids)
test_labels = torch.tensor(Y_test)
test_masks = torch.tensor(test_attention_masks)
test_data = TensorDataset(test_inputs, test_masks, test_labels)
test_dataloader = DataLoader(test_data, sampler=None, batch_size=batch_size)
# Make predictions for test sest and get logits
predictions, true_labels, embedding_layers = eval_profiles(model, test_dataloader)
logits = [item for sublist in predictions for item in sublist]
logits = np.array(logits)
embeddings = np.array([item for sublist in embedding_layers for item in sublist]) # N x 512 x 768
embeddings = embeddings[:, 0, :] # Embedding of [CLS] token represents sentence
# Assign test logits
df_test = df_test.assign(Logit0=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit1=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit2=np.zeros(df_test.shape[0]))
df_test = df_test.assign(Logit3=np.zeros(df_test.shape[0]))
# Save logits to df
X_test = X_test.reset_index(drop=True)
for idx, row in X_test.iterrows():
handle = row['handle']
df_test.loc[df_test['handle'] == handle, 'Logit0'] = logits[idx][0]
df_test.loc[df_test['handle'] == handle, 'Logit1'] = logits[idx][1]
df_test.loc[df_test['handle'] == handle, 'Logit2'] = logits[idx][2]
df_test.loc[df_test['handle'] == handle, 'Logit3'] = logits[idx][3]
for i in range(768):
feat_name = 'embed' + str(i)
df_test.loc[df_test['handle'] == handle, feat_name] = embeddings[idx][i]
df_test.to_csv('test.csv')
print("Successfully saved training features:")
print(df_test.head())
# Save fine-tuned model
output_dir = './bert_finetuned/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Saving model to %s" % output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
| [
"juanpaez@utexas.edu"
] | juanpaez@utexas.edu |
aa51cfc869fb53ffd850f220d3edca3c6f687c19 | bb9f832674635e264e1950d041c03acdec7bd83e | /Day 27/main.py | d2dc8b3910956424adeeeaec0b72a63f4f5c1ce5 | [] | no_license | guilhermeaugusto9/100daysofpython | 4e8030a2ae63b5d210b72844e2dccd5cfcf95ee3 | 8100ac0c80ae6af8fb4af49fe5973b0b3575ee0d | refs/heads/master | 2023-08-11T16:22:22.681346 | 2021-10-11T14:40:34 | 2021-10-11T14:40:34 | 415,956,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | from tkinter import *
# Creating a new window and configurations
window = Tk()
window.title("Widget Examples")
window.minsize(width=500, height=500)
# Labels
label = Label(text="This is old text")
label.config(text="This is new text")
label.pack()
# Buttons
def action():
print("Do something")
# calls action() when pressed
button = Button(text="Click Me", command=action)
button.pack()
# Entries
entry = Entry(width=30)
# Add some text to begin with
entry.insert(END, string="Some text to begin with.")
# Gets text in entry
print(entry.get())
entry.pack()
# Text
text = Text(height=5, width=30)
# Puts cursor in textbox.
text.focus()
# Adds some text to begin with.
text.insert(END, "Example of multi-line text entry.")
# Get's current value in textbox at line 1, character 0
print(text.get("1.0", END))
text.pack()
# Spinbox
def spinbox_used():
# gets the current value in spinbox.
print(spinbox.get())
spinbox = Spinbox(from_=0, to=10, width=5, command=spinbox_used)
spinbox.pack()
# Scale
# Called with current scale value.
def scale_used(value):
print(value)
scale = Scale(from_=0, to=100, command=scale_used)
scale.pack()
# Checkbutton
def checkbutton_used():
# Prints 1 if On button checked, otherwise 0.
print(checked_state.get())
# variable to hold on to checked state, 0 is off, 1 is on.
checked_state = IntVar()
checkbutton = Checkbutton(
text="Is On?", variable=checked_state, command=checkbutton_used)
checked_state.get()
checkbutton.pack()
# Radiobutton
def radio_used():
print(radio_state.get())
# Variable to hold on to which radio button value is checked.
radio_state = IntVar()
radiobutton1 = Radiobutton(text="Option1", value=1,
variable=radio_state, command=radio_used)
radiobutton2 = Radiobutton(text="Option2", value=2,
variable=radio_state, command=radio_used)
radiobutton1.pack()
radiobutton2.pack()
# Listbox
def listbox_used(event):
# Gets current selection from listbox
print(listbox.get(listbox.curselection()))
listbox = Listbox(height=4)
fruits = ["Apple", "Pear", "Orange", "Banana"]
for item in fruits:
listbox.insert(fruits.index(item), item)
listbox.bind("<<ListboxSelect>>", listbox_used)
listbox.pack()
window.mainloop()
| [
"guilherme.augusto9@outlook.com"
] | guilherme.augusto9@outlook.com |
a5b7936e3d25a8f5c8f9deb635771dc4d7de386f | 898f25bb22cd08c191b461934e27c2c6b5bcff42 | /finalcnn.py | 9e0d18cfce01691895ebb723d8b5e5fe2e8a0c03 | [] | no_license | alkadafare01/multiclasscnnmodel | ca4e042d91d3b59774b472d53c6f2622a32e1159 | 2cf12defd0d3c81dc28ee1c06380bf7d6969bdad | refs/heads/main | 2023-04-19T00:12:26.681393 | 2021-05-07T12:36:49 | 2021-05-07T12:36:49 | 322,548,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,957 | py | from keras.callbacks import EarlyStopping, CSVLogger, ModelCheckpoint
from keras.models import Sequential
from keras.layers import Conv2D, Dropout
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
#from tensorflow_core.python.client import session
import pathlib
session = 'simpleNASNet'
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu',kernel_initializer='he_uniform', padding='same'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.2))
# Adding a second convolutional layer
classifier.add(Conv2D(64, (3, 3), input_shape = (64, 64, 3), activation = 'relu',kernel_initializer='he_uniform', padding='same'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
classifier.add(Dropout(0.2))
#Adding a third convolutional layer
classifier.add(Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_uniform', padding='same'))
classifier.add(MaxPooling2D((2, 2)))
classifier.add(Dropout(0.2))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu', kernel_initializer='he_uniform'))
classifier.add(Dropout(0.2))
classifier.add(Dense(units = 3, activation = 'softmax'))
# Compiling the CNN
classifier.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# Part 2 - Fitting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('data/train',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('data/test',
target_size = (64, 64),
batch_size = 32,
class_mode = 'categorical')
logfile = session + '-train' + '.log'
csv_logger = CSVLogger(logfile, append=True)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1, mode='auto')
best_model_filename=session+'-weights.{epoch:02d}-{val_loss:.2f}.h5'
best_model = ModelCheckpoint(best_model_filename, monitor='val_acc', verbose=1, save_best_only=True)
# this is the augmentation configuration we will use for training
##classifier.fit_generator(
# generator=training_set,
# epochs=10,
# verbose=1,
#validation_data=test_set,
#callbacks=[best_model, csv_logger, early_stopping])##
model = classifier.fit_generator(training_set,
steps_per_epoch = 1000,
epochs = 3,
validation_data = test_set,
validation_steps = 32)
classifier.save("model.h5")
print("Saved model to disk")
# Part 3 - Making new predictions
import numpy as np
from keras.preprocessing import image
test_image = image.load_img('data/val/person1_bacteria_1.jpeg', target_size = (64, 64))
#test_image = image.load_img('inputImage.jpg', target_size = (64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
print(result)
training_set.class_indices
if result[1][0][0] == 1:
prediction = 'Normal'
print(prediction)
elif result[0][1][0] == 1:
prediction = 'Pneumonia'
print(prediction)
elif result[0][0][1] == 1:
prediction = 'Covid'
print(prediction) | [
"pinki.dafare@gmail.com"
] | pinki.dafare@gmail.com |
afc1a666a4f46b1ed0c0e581248921cb5c0ed709 | 3a9ab7e571e992b7af50c23673cb9b31971a8868 | /python essentials/indexing and slicing/indexing.py | 45ac5d5eedadc0db9495a51a432e1fd02c3abf73 | [] | no_license | chenkeyu1997/python | 6e3f3997aa62d245699e1b9066c201a2b015c434 | 9a638242b06735cfa48ad074909842452b104651 | refs/heads/master | 2022-12-18T11:07:10.126932 | 2020-09-21T06:00:05 | 2020-09-21T06:00:05 | 296,557,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | s='hello world'
print(s[0])
"""负向索引,负几则表示倒数第几个元素"""
print(s[-2])
try:
print(s[123])
except IndexError:
print("string index out of range")
| [
"cky123"
] | cky123 |
2a1904b74022c083eb628ee0359acd8f7fb3c450 | 4b93aa80436d4683d0254c4bd4b1e95c41e8c6ce | /ZenPacks/community/zenAppProfiler/ProfileSets.py | 5e8074b104b9d14da519239077c4f3a688f2f899 | [] | no_license | j053ph4/ZenPacks.community.zenAppProfiler | 984d5aa04766cc6209ee76a055ceab3373baa0bd | e41d030d67b5df8e41a40f180fa1f57656f20a03 | refs/heads/master | 2021-01-22T22:39:16.448979 | 2011-10-20T20:39:36 | 2011-10-20T20:39:36 | 2,608,336 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,993 | py | import re
import os
import string
import Globals
from Products.ZenModel.ZenPackable import ZenPackable
from ProfileData import ProfileData
class ProfileSets(ZenPackable):
''' Class containing logic for evaluating rule outcomes.
'''
def __init__(self,dmd):
self.dmd = dmd
self.data = ProfileData(self.dmd)
def evalSets(self,sets,matchAll):
""" compute union or intersection of a set of sets
matchAll of True == intersection, union otherwise
"""
resultSet = None
for i in range(len(sets)):
setI = sets[i]
if resultSet == None:
resultSet = setI
for j in range(i):
setJ = sets[j]
if matchAll == True:
matchSet = setI.intersection(setJ)
resultSet = resultSet.intersection(matchSet)
else:
matchSet = setI.union(setJ)
resultSet = resultSet.union(matchSet)
return resultSet
def evalRulesets(self,rulesets,matchAll):
""" evaluate multiple rulesets
"""
sets = []
for ruleset in rulesets:
rset = self.evalRuleset(ruleset)
sets.append(rset)
return self.evalSets(sets,matchAll)
def evalRuleset(self,ruleset,easy=False):
""" evaluate all rules in a ruleset, return set of matching devices
"""
sets = [] # array containing sets of rule-matched devices
if ruleset != None:
rules = ruleset.rules()
for rule in rules:
results = []
if rule.enabled == True:
if easy == True:
results = self.evalRuleSimple(rule)
else:
results = self.evalRule(rule)
#if len(results) > 0:
sets.append(results)
if len(sets) > 0:
return self.evalSets(sets,ruleset.matchAll)
else:
"returning sets"
return sets
def evalRuleSimple(self,rule):
""" faster testing assuming that matches are already built
"""
ruleMatches = []
ruleMatches += rule.getRulePotentialMatches()
ruleMatches += rule.getRuleCurrentMatches()
return set(ruleMatches)
def evalRule(self,rule):
""" evaluate a rule, return set of matching devices
"""
if rule.ruleKey == 'Ruleset':
ruleSet = self.dmd.Profiles.findRuleset(rule.ruleValue)
return self.evalRuleset(ruleSet,easy=True)
ruleMatches = set()
for device in self.dmd.Devices.getSubDevices():
if self.data.evalRuleOnDevice(rule,device) == True:
ruleMatches.add(device)
return ruleMatches
def evalRuleComponents(self,rule,devices,getAll=True):
""" evaluate a rule, return set of matching components
"""
components = []
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Ruleset' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
for device in devices:
components += self.data.evalRuleWithObjects(rule,device)
if rule.ruleKey == 'Ruleset':
rs = self.dmd.Profiles.findRuleset(rule.ruleValue)
if getAll == True:
components += self.getRulesetComponents(rs,devices)
else:
components += self.getRulesetFilteredComponents(rs,devices)
#print "found",len(components),"components on rule",rule.ruleKey,rule.ruleValue,"for",len(devices),"devices"
rule.ruleComponents = components
return components
def getRulesetComponents(self,ruleset,devices):
print "components on ruleset",ruleset.id,"for",len(devices),"devices"
components = []
for rule in ruleset.rules():
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
comps = self.evalRuleComponents(rule,devices)
components += comps
#print "found",len(components),"components"
return components
def getRulesetFilteredComponents(self,ruleset,devices):
#print "components on ruleset",ruleset.id,"for",len(devices),"devices"
componentsets = []
for rule in ruleset.rules():
if rule.ruleKey != 'System' and rule.ruleKey != 'Group' and rule.ruleKey != 'Location' and rule.ruleKey != 'Device':
comps = self.evalRuleComponents(rule,devices,False)
if len(comps) > 0:
componentsets.append(set(comps))
rulesetcomponents = self.evalSets(componentsets,ruleset.matchAll)
#print "set of rs components",len(rulesetcomponents)
if rulesetcomponents != None:
return rulesetcomponents
else:
return []
| [
"janderson@atxg.com"
] | janderson@atxg.com |
a66199c42e47bad003812f72f14cdc6b3c5af967 | 54b52c70ebf3b3c17c72f7c5cc8219a060c1ffdf | /main/migrations/0001_initial.py | d78f2b40b82a4ca0837be9975207f8df6bde8129 | [] | no_license | arslan77/learnobot | e892d990736bd4a882ba6786cfbe00dc00acd326 | 7646cde351231a138b6e30529690f76b3989837e | refs/heads/master | 2020-03-29T10:40:46.554285 | 2018-09-24T03:53:48 | 2018-09-24T03:53:48 | 149,817,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,236 | py | # Generated by Django 2.1.1 on 2018-09-20 23:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('Course', '0004_auto_20180921_0417'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MyCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course_completed', models.BooleanField(default=False)),
('percentage', models.IntegerField()),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.Course')),
('current_course_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.CourseWork')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MyQuiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('marks', models.IntegerField(blank=True, null=True)),
('course_work', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Course.CourseWork')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MyQuizDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('selected_option', models.CharField(blank=True, max_length=5, null=True)),
('right_option', models.CharField(blank=True, max_length=5, null=True)),
('is_right', models.BooleanField(blank=True, default=False, null=True)),
('myQuiz', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.MyQuiz')),
],
),
]
| [
"arslanarshad07@gmail.com"
] | arslanarshad07@gmail.com |
f71f2963e3eed53924dacf78596b44a5360a72d6 | ea7555f1a0ae52a3f97268add832ba1e45d23c8e | /plate_tracker.py | e0573c66651ed579341fc45b01fe37f2c9464008 | [] | no_license | alexjfreij/Projects | bae27c580b29e9d40aa2071ac79afbc35d97838f | 71ba51929e62ff2ae0d3de4910bb32ce500b372f | refs/heads/master | 2021-05-25T21:17:02.814498 | 2020-04-07T22:19:10 | 2020-04-07T22:19:10 | 253,923,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | # Program to read a jpg files and convert it to one file
# with data that has a time stamp from the file and convert
# the content of the file into a character string of license plate
# and date and time to process
#
#
import os, datetime, time
from datetime import datetime
from os.path import getmtime
import argparse, csv, struct, array, sys, operator, binascii, keyword, os, string
import subprocess, re, codecs
from subprocess import Popen, PIPE
from shutil import copyfile
#entries = os.listdir('plates/')
#for entry in entries:
# print(entry)
# detect car coming in park
#
# Take a snapshot
#
file="arabic.jpg"
file="alex.jpg"
# get time and date
#******************
str1 = time.ctime(os.path.getctime(file)) # retrieve Date and Time
datetime_object = datetime.strptime(str1, '%a %b %d %H:%M:%S %Y')
# print (datetime_object)
line = datetime_object.strftime("%m/%d/%Y %H:%M:%S \n \n" ) # Date format change to 06/07/2013
#print (datetime_object.strftime)
#image_lp = open(file, 'rb')
#get license plates
#******************
command ="alpr.exe -c eu -n 1 " + file +" >> text.txt"
subprocess.call(command, shell=True)
#with open('text.txt', 'r') as f: # load file
# lines = f.read().splitlines() # read lines
# lines = lines[10:10]
#print (lines)
f = open('text.txt','r')
lines =f.read()
lines = lines[24:33]
f.close()
command ="del text.txt"
subprocess.call(command, shell=True)
fw = open("tracker.txt", "a")
fw.write(lines+line)
fw.close() | [
"noreply@github.com"
] | noreply@github.com |
4296ad62182dde60033292fbec716ff5b8b146d6 | fe45c03ac8ee55122e89b4ee2e541cd196c43b93 | /Coffee/apps.py | a1e1644ad55807044d97515b49cbaabd4c882566 | [] | no_license | ssemtner/CheckYoSelf | 593a23b25d36ce7e5550822364b713ec3cfa7a51 | 36b497df3cd87af03b77476d393b56d3ba436edc | refs/heads/main | 2023-02-16T09:23:26.179680 | 2021-01-13T18:23:03 | 2021-01-13T18:23:03 | 312,876,268 | 1 | 1 | null | 2020-12-10T02:25:52 | 2020-11-14T18:22:38 | Python | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class CoffeeConfig(AppConfig):
name = 'Coffee'
| [
"sjsemtner@gmail.com"
] | sjsemtner@gmail.com |
6884574422f998a6b1d4e95b2c63df25a2b77db1 | ddc7c07a4273aab5289a44fde5230856404a771b | /practica_02/p02e04.py | 6a44df4ae2e37f90f2693033f98b3c0245ae6e05 | [] | no_license | juan81mg/python2020 | 58736dd993461ab4b2952ccf4438df6e87523f54 | e9f8d740569727d95c30ec306ada2774570dd04f | refs/heads/master | 2021-03-16T20:19:56.648054 | 2020-06-22T16:49:27 | 2020-06-22T16:49:27 | 246,938,731 | 0 | 0 | null | 2020-05-25T19:55:44 | 2020-03-12T22:04:13 | Python | UTF-8 | Python | false | false | 597 | py | from random import shuffle
preguntas = [['Buenos Aires limita con Santiago del Estero', 'no'], ['Jujuy limita con Bolivia', 'si'], ['San Juan limita con Misiones', 'no']]
puntaje = 0 #inicializo el puntaje
shuffle(preguntas) #desordeno la lista
print('+++++ Juego de Preguntas +++++\n')
for p in preguntas:
print('>>> pregunta <<<\n', p[0])
res = input('respuesta (si/no):')
if (res == p[1]): #evaluo la respuesta
print('--->>> respuesta correcta\n')
puntaje = puntaje + 1
else:
print('--->>> respuesta incorrecta\n')
print('su puntaje es >>>>>', puntaje) | [
"juan81mg@gmail.com"
] | juan81mg@gmail.com |
b1bd3b08a695c7d52e2546ee8ac9436a091b47ac | 22a7e337eca6a244205b48e4d06cca67f2b5033a | /SimpleCode/PY_CookBook/chapter8/ceshi_yield.py | 06b50f2eafeb4ee2b61554155e44228699ac437a | [] | no_license | chen19901225/SimplePyCode | 48f373f66e486276ed6603b7d0d8e388bd236a6c | 8276c660e7663688d3d381391a77a50f90e61afa | refs/heads/master | 2021-01-01T19:34:57.697967 | 2014-11-12T10:05:14 | 2014-11-12T10:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py |
def echo(value=None):
print "Execution starts when 'next()' is called for the first time."
try:
while True:
try:
value=(yield value)
except Exception,e:
value=e
finally:
print "Don't forget to clean up when 'close()' is called."
geneator=echo(1)
print geneator.next()
print geneator.next()
print geneator.send(2)
| [
"guoning.leng@o2omobi.com"
] | guoning.leng@o2omobi.com |
5e7d45027e713ff5923387f455b14bddd9a31ef3 | 2ae53bf6063c0bb5c227e17c049c0c5963861d7f | /setup.py | 73d9782ed28756e09d357999e1812ddbc45eda21 | [] | no_license | PsychoDramatic/openshift_django | 207e5757e3479ce1baaa6196a4c9175afd089e3a | 0d76a07d3b9559429f3cd8031114949e3f141c62 | refs/heads/master | 2021-01-16T23:11:25.913630 | 2014-03-01T15:33:54 | 2014-03-01T15:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from setuptools import setup
setup(name='MyDjangoApp',
version='1.0',
description='Measuredly website',
author='Yourname',
author_email='your_email@somewhere.com',
url='http://www.python.org/sigs/distutils-sig/',
install_requires=['django==1.6','django-crispy-forms'],
)
| [
"steve@mocarski.com"
] | steve@mocarski.com |
2dd168e0be879c0702c0c7d22fff6db2edb3e519 | 528add9808b43905fb2f71c94fdf20d374b69878 | /usersApp/models.py | 8445c225db2dc6d46af37df5e1a28df75e4342fc | [] | no_license | paulsmalze/singleModelOrm | 5695804a492e1be06650bc0598779df9288c5dac | 30080a33a2a44ba13d595a7af136f5d9fd8ff12e | refs/heads/main | 2023-04-25T12:20:55.718554 | 2021-05-08T15:43:25 | 2021-05-08T15:43:25 | 365,535,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from django.db import models
# Create your models here.
class User(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email_address = models.EmailField(unique=True)
age = models.IntegerField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
| [
"waffwegs@yahoo.com"
] | waffwegs@yahoo.com |
1954811c048f96bc8ce065432708a723ac852065 | 24d9ab3789aa0f7c3c6fffdf876315abb8b3f0ee | /app.py | ede3560a2c5a86c85ad5d62f44c4022c88dd421f | [] | no_license | winocas/project3 | cc876362988c4f418a9c4bc98a3527d667be51c6 | 8e55cffc815eaef38c586aebefc6aec17ecbb351 | refs/heads/master | 2023-03-29T05:29:54.804122 | 2021-03-30T12:28:05 | 2021-03-30T12:28:05 | 352,922,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,160 | py | # from flask import Flask, url_for, redirect
# app = Flask(__name__)
# @app.route('/')
# def home():
# return redirect(url_for('main'))
# @app.route('/service')
# def main():
# return '서비스'
# if __name__=='__main__':
# app.run(debug=True)
from flask import Flask, render_template, redirect, request, url_for
import corona_data
app = Flask(__name__)
@app.route('/')
def index():
data1 = corona_data.get_tot_coro()
data2 = corona_data.get_total_coro()
return render_template('index.html', data1=data1, data2=data2)
@app.route('/city')
def region():
data = corona_data.get_city_coro()
return render_template('region.html', data=data)
@app.route('/coro/')
@app.route('/coro/<city>')
def inputTest(city=None):
data = corona_data.get_city_coro()
return render_template('main.html', city=city, data=data)
@app.route('/cityinfo',methods=['POST'])
def calculate(city=None):
data = corona_data.get_city_coro()
if request.method == 'POST':
temp = request.form['city']
else:
temp = None
return redirect(url_for('inputTest',city=temp))
if __name__ == '__main__':
app.run() | [
"sec021122@gmail.com"
] | sec021122@gmail.com |
575b513edcb70131289f04f5a2a9725d843d874e | 0875d9c4ed9bec3794aee6435eb49243ab84ac05 | /Machine Learning/IntroduccionML/Algoritmos/IntroVectorSR.py | 9716ba998a6b438171a84f7cd323db2249efb065 | [] | no_license | AngelSosaGonzalez/IntroduccionMachineLearning | b49ba29a717bd2c3bbd6c93615d3ef3bc5e1561d | d9e13efe5703e6a6375a971c87fd000ba98024c7 | refs/heads/main | 2023-03-05T14:08:23.066293 | 2021-02-20T04:31:34 | 2021-02-20T04:31:34 | 327,723,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,221 | py | """ Vectore de soporte de regresion: En este proyecto veremos en la marcha el concepto de como se puede aplicar este algoritmo de ML,
en este proyecto aplicaremos los conociemiento basicos aprendidos en ML, de los antiguos proyecto que estan en este recopilatorio,
te recomiendo revisar los proyecto que hablan sobre regrecion (Lineal, Multiple, Polinomial), antes de comenzar quiero aclarar que este proyecto se basa (o copia mas bien) del curso de
Machine Learning del canal de: AprendeIA con Ligdi Gonzalez, fuente: https://www.youtube.com/watch?v=zvB0cshd0TM&list=PLJjOveEiVE4Dk48EI7I-67PEleEC5nxc3&index=24 """
#Importaremos los modulos necesarios para el proyecto
#Importamos Numpy para los arreglos
import numpy
#Importamos el modulo de matplotlib para graficar
import matplotlib.pyplot as plt
#Ya para el DataSet (casas de boston) importaremos el modulo correspondiente
from sklearn import datasets
#Importamos el modulo que nos ayuda a separar los datos de prueba a los de entrenamiento
from sklearn.model_selection import train_test_split
#Importamos el algoritmo a seleccionar (en este caso es VSR o SVR)
from sklearn.svm import SVR
#Importamos nuestro DataSet en una variable (Esto para poder manipular los datos, ya lo hemos hecho en antiguos proyectos)
BostonDatos = datasets.load_boston()
#NOTA: Puede imprimir el DataSet para verificar si los datos son los correctos
#Al igual que el proyecto de "IntroRegresionPoli.py" obtendremos los datos que queremos
#Ahora vamos a seleccionar los datos necesarios para esto usaremos la cantidad de habitaciones
X_VR = BostonDatos.data[:, numpy.newaxis, 5]
#Obtendremos las etiquetas de los datos
Y_VR = BostonDatos.target
#Graficamos los datos que obtuvimos de data y target, para esto usamos matplotlib
plt.scatter(X_VR, Y_VR) #Recuerda que scatter son para graficas de dispercion
plt.show()
#Separamos los datos en entrenamiento y prueba
X_Entrena, X_Prueba, Y_Entrena, Y_Prueba = train_test_split(X_VR, Y_VR, test_size = 0.2) #Recuerda que test_size, es el tamaño de la muestra que obtendremos del DataSet
""" Invocamos a nuestro algoritmo
Atributos de la funcion de nuestro algoritmo:
- Kernel: Especificamos el tipo de datos a utilizar en nuestro algoritmo, como vimos en la grafica de dispercion nuestros datos
son de tipo lineal, por lo que tenemos que especificarle a nuestro algoritmo que tipo de datos usamos
- C: Parámetro de regularización. La fuerza de la regularización es inversamente proporcional a C.
Debe ser estrictamente positiva. La penalización es una penalización l2 al cuadrado.
- epsilon: Epsilon en el modelo epsilon-SVR. Especifica el tubo de épsilon dentro del cual no se
asocia ninguna penalización en la función de pérdida de entrenamiento con puntos predichos dentro de una distancia epsilon desde el valor real.
Todo esto lo puedes leer en la documentacion de la funcion: https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html """
AlgoSVR = SVR(kernel='linear', C=1.0, epsilon=0.2)
#Ya teniendo el algoritmo ya creado con sus atributos vamos a entrenarlo
AlgoSVR.fit(X_Entrena, Y_Entrena)
#Vemos el score que nos arroja nuestro algoritmo
print(AlgoSVR.score(X_Entrena, Y_Entrena)) #Usaremos lo datos de entrenamiento
print(AlgoSVR.score(X_Prueba, Y_Prueba)) #Ahora con los datos de prueba
#Vemos nos dio un resultamo muy bajo vamos a graficar para ver que cantidad de datos toma de muestra nuestro algoritmo
#Primero realizamos una prediccion
Y_Prediccion = AlgoSVR.predict(X_Prueba)
#Vamos a graficar, para este caso graficaremos igual que el algoritmo de regrecion polinomial
plt.scatter(X_Prueba, Y_Prueba)
plt.plot(X_Prueba, Y_Prediccion, color = 'red', linewidth = 2) #Recuerda que plot sirve para la graficas de linea (Para este caso, nos mostrara los datos que recolecta nuestro algoritmo)
plt.show()
""" Ahora gracias a la grafica veremos que solamente se dibuja una linea esto porque en la creacion del algoritmo en el atributo del Kernel
seleccionamos lineal, por lo tanto solo se dibujara una linea y los datos que agarra la linea son lo que usaremos para la prediccion """
#Perooo... te preguntaras como podemos mejorar el algoritmo, cambiando parametros es la mas acertada, pero vamos a experimentar solamente usando el algoritmo sin modificar parametros
#Primero eliminamos la variable de nuestro algortimo (Esto para no cargar mucho el sistema)
del AlgoSVR
#Ahora lo volvemos a crear (Invocamos el algoritmo)
AlgoSVR = SVR()
#Entrenamos nuestro algortimo
AlgoSVR.fit(X_Entrena, Y_Entrena)
#Vemos el score que nos arroja nuestro algoritmo
print(AlgoSVR.score(X_Entrena, Y_Entrena)) #Usaremos lo datos de entrenamiento
print(AlgoSVR.score(X_Prueba, Y_Prueba)) #Ahora con los datos de prueba
#Realzamos una prediccion
Y_Prediccion = AlgoSVR.predict(X_Prueba)
#Graficamos para ver como esta nuestro algoritmo
plt.scatter(X_Prueba, Y_Prueba)
plt.plot(X_Prueba, Y_Prediccion, color = 'red', linewidth = 2)
plt.show()
""" Comparando los resultados veremos que el algorito aumento el score, no mucho, como digo podemos mejorarlo cambiando parametros,
pero como usamos un kernel lineal una linea no agarra todos los datos necesarios para tener una buena prediccion """ | [
"angelsosagonz@gmail.com"
] | angelsosagonz@gmail.com |
eaafa5a0d6f534e474290d094f533956580c7495 | 689fcced10cc920c263e4d85bed5a51f85c76abb | /aragwas_server/aragwas/settings/dev.py | 8c4d30842e367920ea352a2e0638d1fd37694449 | [
"MIT"
] | permissive | 1001genomes/AraGWAS | ddb10ea3e476c8cee31e75f9db6dc2bd79f7f487 | be02c0480bf18228b07853740e63f249fe31d7e5 | refs/heads/master | 2022-12-29T00:12:59.936918 | 2020-08-31T16:32:04 | 2020-08-31T16:32:04 | 82,693,787 | 13 | 9 | MIT | 2022-12-06T20:20:08 | 2017-02-21T15:11:31 | Vue | UTF-8 | Python | false | false | 554 | py | """
Development settings using sqlite3 and DEBUG = TRUE
"""
import os
# Load defaults in order to then add/override with dev-only settings
from .defaults import *
DEBUG = True
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
DATACITE_REST_URL='https://mds.test.datacite.org/'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
| [
"uemit.seren@gmail.com"
] | uemit.seren@gmail.com |
cbc7ff8ebf0f55d2d54140505ce153702a872ce4 | 9784a90cac667e8e0aaba0ca599b4255b215ec67 | /convert_models.py | d3b743be02b1e3c57c11c482699a1440c12daa44 | [
"MIT"
] | permissive | osmr/imgclsmob | d2f48f01ca541b20119871393eca383001a96019 | f2993d3ce73a2f7ddba05da3891defb08547d504 | refs/heads/master | 2022-07-09T14:24:37.591824 | 2021-12-14T10:15:31 | 2021-12-14T10:15:31 | 140,285,687 | 3,017 | 624 | MIT | 2022-07-04T15:18:37 | 2018-07-09T12:57:46 | Python | UTF-8 | Python | false | false | 87,933 | py | """
Script for converting models between frameworks (MXNet, Gluon, PyTroch, Chainer, Keras, TensorFlow).
"""
import argparse
import logging
import re
import numpy as np
from common.logger_utils import initialize_logging
def parse_args():
parser = argparse.ArgumentParser(description="Convert models (Gluon/PyTorch/Chainer/MXNet/Keras/TF/TF2)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--src-fwk",
type=str,
required=True,
help="source model framework name")
parser.add_argument(
"--dst-fwk",
type=str,
required=True,
help="destination model framework name")
parser.add_argument(
"--src-model",
type=str,
required=True,
help="source model name")
parser.add_argument(
"--dst-model",
type=str,
required=True,
help="destination model name")
parser.add_argument(
"--src-params",
type=str,
default="",
help="source model parameter file path")
parser.add_argument(
"--dst-params",
type=str,
default="",
help="destination model parameter file path")
parser.add_argument(
"--load-ignore-extra",
action="store_true",
help="ignore extra layers in the source PyTroch model")
parser.add_argument(
"--remove-module",
action="store_true",
help="enable if stored PyTorch model has module")
parser.add_argument(
"--src-num-classes",
type=int,
default=1000,
help="number of classes for source model")
parser.add_argument(
"--src-in-channels",
type=int,
default=3,
help="number of input channels for source model")
parser.add_argument(
"--dst-num-classes",
type=int,
default=1000,
help="number of classes for destination model")
parser.add_argument(
"--dst-in-channels",
type=int,
default=3,
help="number of input channels for destination model")
parser.add_argument(
"--model-type",
type=str,
default="image",
help="model type (image or audio)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
args = parser.parse_args()
return args
def prepare_src_model(src_fwk,
src_model,
src_params_file_path,
dst_fwk,
ctx,
use_cuda,
load_ignore_extra=False,
remove_module=False,
num_classes=None,
in_channels=None):
ext_src_param_keys = None
ext_src_param_keys2 = None
src_net = None
if src_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
src_net = prepare_model_gl(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
src_params = src_net._collect_params_with_prefix()
src_param_keys = list(src_params.keys())
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b",
"oth_resnet101_v1b", "oth_resnet152_v1b"]:
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and key.endswith(".bias"))]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
src_param_keys = [key for key in src_param_keys if
not (key.startswith("features.") and
(key.endswith(".bn.gamma") or key.endswith(".bn.beta")))]
if dst_fwk == "chainer":
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".running_mean")) and
(not key.endswith(".running_var"))]
ext_src_param_keys = [key for key in src_param_keys_ if (key.endswith(".running_mean")) or
(key.endswith(".running_var"))]
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".index"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".index"))]
elif src_model.startswith("xdensenet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".mask"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".mask"))]
elif src_model.startswith("jasper") or src_model.startswith("quartznet"):
src_param_keys_ = src_param_keys.copy()
src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".window")) and
(not key.endswith(".fb"))]
ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".window")) or
(key.endswith(".fb"))]
elif src_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
src_net = prepare_model_pt(
model_name=src_model,
use_pretrained=False,
pretrained_model_file_path=src_params_file_path,
use_cuda=use_cuda,
use_data_parallel=False,
load_ignore_extra=load_ignore_extra,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
remove_module=remove_module)
src_params = src_net.state_dict()
src_param_keys = list(src_params.keys())
if dst_fwk != "pytorch":
src_param_keys = [key for key in src_param_keys if not key.endswith("num_batches_tracked")]
if src_model in ["oth_shufflenetv2_wd2"]:
src_param_keys = [key for key in src_param_keys if not key.startswith("network.0.")]
if src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = []
for i in range(2, 6):
src1_i = list(filter(re.compile("level{}".format(i)).search, src1))
if len(src1_i) == 0:
continue
max_len = max([len(k) for k in src1_i])
pattern_i = [k for k in src1_i if len(k) == max_len][0][:-21]
src2_i = list(filter(re.compile(pattern_i).search, src1))
src2 += src2_i
src_param_keys = src2 + src1n
elif src_fwk == "mxnet":
import mxnet as mx
src_sym, src_arg_params, src_aux_params = mx.model.load_checkpoint(
prefix=src_params_file_path,
epoch=0)
src_params = {}
src_params.update(src_arg_params)
src_params.update(src_aux_params)
src_param_keys = list(src_params.keys())
elif src_fwk == "tensorflow":
# import tensorflow as tf
# from tensorflow_.utils import prepare_model as prepare_model_tf
# src_net = prepare_model_tf(
# model_name=src_model,
# classes=num_classes,
# use_pretrained=False,
# pretrained_model_file_path=src_params_file_path)
# src_param_keys = [v.name for v in tf.global_variables()]
# src_params = {v.name: v for v in tf.global_variables()}
src_net = None
src_params = dict(np.load(src_params_file_path))
src_param_keys = list(src_params.keys())
elif (src_fwk == "tf2") and (dst_fwk == "tfl"):
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
src_net = prepare_model_tf2(
model_name=src_model,
use_pretrained=True,
pretrained_model_file_path="")
batch_size = 1
input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
src_net.data_format == "channels_first" else
(batch_size, src_net.in_size[0], src_net.in_size[1], 3))
src_net(tf.random.normal(input_shape))
src_params = None
src_param_keys = None
else:
raise ValueError("Unsupported src fwk: {}".format(src_fwk))
return src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net
def prepare_dst_model(dst_fwk,
dst_model,
src_fwk,
ctx,
use_cuda,
num_classes=None,
in_channels=None,
model_type="image"):
if dst_fwk == "gluon":
from gluon.utils import prepare_model as prepare_model_gl
dst_net = prepare_model_gl(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
dtype=np.float32,
tune_layers="",
classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels,
ctx=ctx)
dst_params = dst_net._collect_params_with_prefix()
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "pytorch":
from pytorch.utils import prepare_model as prepare_model_pt
dst_net = prepare_model_pt(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="",
use_cuda=use_cuda,
use_data_parallel=False,
num_classes=(num_classes if num_classes > 0 else None),
in_channels=in_channels)
dst_params = dst_net.state_dict()
dst_param_keys = list(dst_params.keys())
if src_fwk != "pytorch":
dst_param_keys = [key for key in dst_param_keys if not key.endswith("num_batches_tracked")]
elif dst_fwk == "chainer":
from chainer_.utils import prepare_model as prepare_model_ch
dst_net = prepare_model_ch(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_params = {i[0]: i[1] for i in dst_net.namedparams()}
dst_param_keys = list(dst_params.keys())
elif dst_fwk == "keras":
from keras_.utils import prepare_model as prepare_model_ke
dst_net = prepare_model_ke(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
# dst_param_keys = list(dst_net._arg_names) + list(dst_net._aux_names)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {}
for layer in dst_net.layers:
if layer.name:
for weight in layer.weights:
if weight.name:
dst_params.setdefault(weight.name, []).append(weight)
dst_params[weight.name] = (layer, weight)
elif dst_fwk == "tensorflow":
import tensorflow as tf
from tensorflow_.utils import prepare_model as prepare_model_tf
dst_net = prepare_model_tf(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
dst_param_keys = [v.name for v in tf.global_variables()]
dst_params = {v.name: v for v in tf.global_variables()}
elif dst_fwk == "tf2":
import tensorflow as tf
from tensorflow2.utils import prepare_model as prepare_model_tf2
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
dst_net = prepare_model_tf2(
model_name=dst_model,
use_pretrained=False,
pretrained_model_file_path="")
batch_size = 1
if model_type == "image":
input_shape = ((batch_size, 3, dst_net.in_size[0], dst_net.in_size[1]) if
dst_net.data_format == "channels_first" else
(batch_size, dst_net.in_size[0], dst_net.in_size[1], 3))
dst_net(tf.random.normal(input_shape))
else:
seq_len = 100 * 640
# input_shape = ((batch_size, dst_net.in_channels, seq_len) if
# dst_net.data_format == "channels_first" else
# (batch_size, seq_len, dst_net.in_channels))
input_shape = (batch_size, seq_len)
x_len = tf.convert_to_tensor(np.array([seq_len - 0], dtype=np.long))
dst_net(tf.random.normal(input_shape), x_len)
dst_param_keys = [v.name for v in dst_net.weights]
dst_params = {v.name: v for v in dst_net.weights}
elif dst_fwk == "tfl":
dst_net = None
dst_params = None
dst_param_keys = None
else:
raise ValueError("Unsupported dst fwk: {}".format(dst_fwk))
return dst_params, dst_param_keys, dst_net
def convert_mx2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
ctx):
if src_model in ["crunet56", "crunet116"]:
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^conv", "features.", key) for key in src_param_keys]
src_param_keys = [re.sub("^fc6", "output.1.", key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-a', '.body.conv1.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c3x3-b', '.body.conv2A.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-b', '.body.conv2B.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-c', '.body.conv3.', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__x_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__x_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convT.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(1\)_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_1x1_bases\[dim3\]_weight$', '_x__7.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(2\)_3x3_bases\[dim21\]_weight$', '_x__7.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_1x1_bases\[dim3\]_weight$', '_x__14.body.conv1.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__\(3\)_3x3_bases\[dim21\]_weight$', '_x__14.body.conv2.convQ.weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/2\)', '.input_convZ.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w_weight$', '.input_convZ.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/1\)', '.input_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('_c1x1-w\(s\/key\)', '.identity_conv.', key) for key in src_param_keys]
src_param_keys = [re.sub('__conv_weight$', '.conv.weight', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_beta$', '.bn.beta', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_gamma$', '.bn.gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_mean$', '.bn.running_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('__bn__bn_moving_var$', '.bn.running_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1__relu-sp__bn_', '1_x_1.conv.bnA.', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^features\.", "conv", key) for key in src_param_keys]
src_param_keys = [re.sub('^output\.1\.', 'fc6', key) for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convT\.weight$', '_x__x_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convT\.weight$', '_x__x_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv1\.convQ\.weight$', '_x__(1)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__1\.body\.conv2\.convQ\.weight$', '_x__(1)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv1\.convQ\.weight$', '_x__(2)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__7\.body\.conv2\.convQ\.weight$', '_x__(2)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv1\.convQ\.weight$', '_x__(3)_1x1_bases[dim3]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('_x__14\.body\.conv2\.convQ\.weight$', '_x__(3)_3x3_bases[dim21]_weight', key)
for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv1\.', '_c1x1-a', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2A\.', '_c3x3-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv2B\.', '_c1x1-b', key) for key in src_param_keys]
src_param_keys = [re.sub('\.body\.conv3\.', '_c1x1-c', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.conv\.weight$', '_c1x1-w_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_convZ\.', '_c1x1-w(s/2)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.input_conv\.', '_c1x1-w(s/1)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.identity_conv\.', '_c1x1-w(s/key)', key) for key in src_param_keys]
src_param_keys = [re.sub('\.conv\.weight$', '__conv_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.beta$', '__bn__bn_beta', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.gamma$', '__bn__bn_gamma', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_mean$', '__bn__bn_moving_mean', key) for key in src_param_keys]
src_param_keys = [re.sub('\.bn\.running_var$', '__bn__bn_moving_var', key) for key in src_param_keys]
src_param_keys = [re.sub('1_x_1\.conv\.bnA\.', '1_x_1__relu-sp__bn_', key) for key in src_param_keys]
dst_i = 0
for src_i, src_key in enumerate(src_param_keys):
dst_key = dst_param_keys[dst_i]
for tt in range(10):
if (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]) and\
(dst_params[dst_key].shape == src_params[src_key].shape):
break
assert (dst_key.split('.')[-1].split('_')[-1] == "weight")
dst_i += 1
dst_key = dst_param_keys[dst_i]
dst_i += 1
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1])
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
return
elif src_model in ["igcv3_w1"]:
src_param_keys = [key.replace("seq-", "features.") for key in src_param_keys]
src_param_keys = [key.replace("fc_", "output.1.") for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_beta', '.bn.beta') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_gamma', '.bn.gamma') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_mean', '.bn.running_mean') for key in src_param_keys]
src_param_keys = [key.replace('-batchnorm_moving_var', '.bn.running_var') for key in src_param_keys]
src_param_keys = [key.replace('-conv2d_weight', '.conv.weight') for key in src_param_keys]
src_param_keys = [key.replace('first-3x3-conv', 'features.A') for key in src_param_keys]
src_param_keys = [key.replace('last-1x1-conv', 'features.B') for key in src_param_keys]
src_param_keys = [key.replace('-exp', '.conv1') for key in src_param_keys]
src_param_keys = [key.replace('-depthwise', '.conv2') for key in src_param_keys]
src_param_keys = [key.replace('-linear', '.conv3') for key in src_param_keys]
src_param_keys = [key.replace("-block", ".block") for key in src_param_keys]
dst_param_keys = [key.replace('features.0.', 'features.A.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.6.', 'features.B.') for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace('.bn.beta', '-batchnorm_beta') for key in src_param_keys]
src_param_keys = [key.replace('.bn.gamma', '-batchnorm_gamma') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_mean', '-batchnorm_moving_mean') for key in src_param_keys]
src_param_keys = [key.replace('.bn.running_var', '-batchnorm_moving_var') for key in src_param_keys]
src_param_keys = [key.replace('.conv.weight', '-conv2d_weight') for key in src_param_keys]
src_param_keys = [key.replace('features.A', 'first-3x3-conv') for key in src_param_keys]
src_param_keys = [key.replace('features.B', 'last-1x1-conv') for key in src_param_keys]
src_param_keys = [key.replace('.conv1', '-exp') for key in src_param_keys]
src_param_keys = [key.replace('.conv2', '-depthwise', ) for key in src_param_keys]
src_param_keys = [key.replace('.conv3', '-linear') for key in src_param_keys]
src_param_keys = [key.replace("features.", "seq-") for key in src_param_keys]
src_param_keys = [key.replace("output.1.", "fc_") for key in src_param_keys]
src_param_keys = [key.replace(".block", "-block") for key in src_param_keys]
dst_param_keys = [key.replace('features.A.', 'features.0.') for key in dst_param_keys]
dst_param_keys = [key.replace('features.B.', 'features.6.') for key in dst_param_keys]
elif src_model in ["preresnet269b"]:
dst_net.features[1][0].body.conv1a.bn.initialize(ctx=ctx, verbose=True, force_reinit=True)
dst1 = list(filter(re.compile("^features.1.0.body.conv1.bn.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst1]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub('^classifier_', "output.", key) for key in src_param_keys]
src_param_keys = [re.sub('^res', "features.", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_weight$', '_conv1_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_weight$', '_conv2_aweight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_weight$', '_conv3_aweight', key) for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [re.sub("^output\.", "classifier_", key) for key in src_param_keys]
src_param_keys = [re.sub("^features\.", "res", key) for key in src_param_keys]
src_param_keys = [re.sub('_conv1_aweight$', '_conv1_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv2_aweight$', '_conv2_weight', key) for key in src_param_keys]
src_param_keys = [re.sub('_conv3_aweight$', '_conv3_weight', key) for key in src_param_keys]
for src_i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
assert (dst_params[dst_key].shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(src_params[src_key], ctx)
for param in dst_net.collect_params().values():
if param._data is not None:
continue
print("param={}".format(param))
param.initialize(ctx=ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ch(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ext_src_param_keys,
ext_src_param_keys2,
src_model):
if src_model.startswith("diares") or src_model.startswith("diapreres"):
src1 = list(filter(re.compile("^features\.[0-9]*\.\d*[1-9]\d*\.attention").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n
assert (len(src_param_keys) == len(dst_param_keys))
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/final_block/", "features/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/W", "/weight") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/body/", "/features/zbody/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_postactiv/", "features/stageN/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_block/", "features/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/final_conv/", "features/stageN/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/hg/", "/stage1_hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("quartznet") or src_model.startswith("jasper"):
dst_param_keys = [key.replace("features/zfinal_block/", "features/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight", "/W") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_postactiv/", "/final_postactiv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/features/zbody/", "/features/body/") for key in dst_param_keys]
dst_param_keys = [key.replace("features/stageN/final_conv/", "features/final_conv/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if not src_model.startswith("ibppose_coco"):
dst_param_keys = [key.replace("/stage1_hg/", "/hg/") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"):
ext2_src_param_keys = [key.replace('.conv.weight', '.bn.beta') for key in src_param_keys if
key.endswith(".conv.weight")]
ext2_src_param_keys.append("features.4.bn.beta")
ext2_dst_param_keys = [key.replace("/conv/W", "/bn/beta") for key in dst_param_keys if key.endswith("/conv/W")]
ext2_dst_param_keys.append("/features/post_activ/bn/beta")
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
else:
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".beta")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/beta")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys):
src_key1 = src_key.split(".")[-1]
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
if src_key1 == 'running_mean':
assert (obj.avg_mean.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.avg_mean.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.avg_mean.shape)
obj.avg_mean = src_params[src_key]._data[0].asnumpy()
elif src_key1 == 'running_var':
assert (obj.avg_var.shape == src_params[src_key].shape)
obj.avg_var = src_params[src_key]._data[0].asnumpy()
if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]:
assert (dst_net.output.fc.index.shape == src_params["output.1.index"].shape)
dst_net.output.fc.index = src_params["output.1.index"]._data[0].asnumpy().astype(np.int32)
ext_src_param_keys2.remove("output.1.index")
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-2]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-2], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.index.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.index.shape)
obj.index = src_params[src_key]._data[0].asnumpy().astype(np.int32)
elif src_model.startswith("xdensenet"):
ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")] +\
[key for key in src_param_keys if key.endswith(".conv2.conv.weight")]
ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")] +\
[key for key in dst_param_keys if key.endswith("/conv2/conv/W")]
ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)}
ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys))
for i, src_key in enumerate(ext_src_param_keys2):
src_key2 = ".".join(src_key.split(".")[:-1])
dst_ind = ext3_src_param_keys[src_key2]
dst_path = ext3_dst_param_keys[dst_ind]
obj = dst_net
for j, sub_path in enumerate(dst_path):
obj = getattr(obj, sub_path)
assert (obj.mask.shape == src_params[src_key].shape), \
"src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format(
src_key, dst_path, src_params[src_key].shape, obj.mask.shape)
obj.mask = src_params[src_key]._data[0].asnumpy()
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].array.shape == src_params[src_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape)
dst_params[dst_key].array = src_params[src_key]._data[0].asnumpy()
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape))
from chainer.serializers import save_npz
save_npz(
file=dst_params_file_path,
obj=dst_net)
def convert_gl2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
finetune,
src_model,
ctx):
if src_model.startswith("oth_danet_resnet"):
src6 = list(filter(re.compile("^head.sa.gamma").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
src7 = list(filter(re.compile("^head.conv51").search, src_param_keys))
src7n = [key for key in src_param_keys if key not in src7]
src_param_keys = src7n + src7
src8 = list(filter(re.compile("^head.conv6").search, src_param_keys))
src8n = [key for key in src_param_keys if key not in src8]
src_param_keys = src8n + src8
src1 = list(filter(re.compile("^head.conv5c").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^head.sc").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("^head.conv52").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("^head.conv7").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("^head.conv8").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
elif src_model.startswith("oth_icnet_resnet50_citys"):
src1 = list(filter(re.compile("^conv_sub1").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
src2 = list(filter(re.compile("^head").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
elif src_model.startswith("oth_fastscnn_citys"):
src1 = list(filter(re.compile("^feature_fusion").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst0 = list(filter(re.compile("^fusion").search, dst_param_keys))
dst0n = [key for key in dst_param_keys if key not in dst0]
dst_param_keys = dst0n + dst0
dst1 = list(filter(re.compile("^fusion.low_pw_conv.bn").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^fusion.high_conv.bn").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_params[dst_key].shape != src_params[src_key].shape:
logging.warning(
"dst_param.shape != src_param.shape, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
if finetune:
continue
else:
raise ValueError
if dst_key.split('.')[-1] != src_key.split('.')[-1]:
logging.warning(
"dst_key.suff != src_key.suff, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape))
dst_params[dst_key]._load_init(src_params[src_key]._data[0], ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_gl2ke(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
# dst_param_keys = list(np.unique(dst_param_keys))
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
dst_layer = dst_params[dst_key][0]
dst_weight = dst_params[dst_key][1]
if (dst_layer.__class__.__name__ in ["Conv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 1, 0))
if (dst_layer.__class__.__name__ in ["DepthwiseConv2D"]) and dst_key.endswith("kernel1") and\
(dst_layer.data_format == "channels_last"):
src_weight = np.transpose(src_weight, (2, 3, 0, 1))
if (dst_layer.__class__.__name__ in ["Dense"]) and dst_key.endswith("kernel1"):
src_weight = np.transpose(src_weight, (1, 0))
assert (dst_weight._keras_shape == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, dst_weight._keras_shape)
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, src_weight.shape, dst_weight._keras_shape))
dst_weight.bind(mx.nd.array(src_weight))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel1")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias1")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_gl2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import mxnet as mx
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "dw_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
assert (tuple(dst_params[dst_key].get_shape().as_list()) == src_weight.shape)
sess.run(dst_params[dst_key].assign(src_weight))
# print(dst_params[dst_key].eval(sess))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if dst_key.find("convgroup") >= 0:
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
# saver = tf.train.Saver()
# saver.save(
# sess=sess,
# save_path=dst_params_file_path)
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_gl2tf2(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model):
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".transition.", ".atransition.") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
if src_model.startswith("hrnet"):
src_param_keys = [key.replace(".atransition.", ".transition.") for key in src_param_keys]
dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys]
dst_param_keys = [key.replace("/depthwise_kernel:", "/weight_depthwise:") for key in dst_param_keys]
dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/transition/", "/atransition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/dw_conv/', '/z_dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/down", "features/z_down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("da_net/head/", "z_da_net/head/") for key in dst_param_keys]
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/weight_depthwise:", "/depthwise_kernel:") for key in dst_param_keys]
dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys]
if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\
(not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\
(not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\
(not src_model.startswith("jasper")):
dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys]
dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys]
if src_model.startswith("hrnet"):
dst_param_keys = [key.replace("/atransition/", "/transition/") for key in dst_param_keys]
if src_model.startswith("hardnet"):
# dst_param_keys = [key.replace('/z_dw_conv/', '/dw_conv/') for key in dst_param_keys]
dst_param_keys = [key.replace("features/z_down", "features/down") for key in dst_param_keys]
if src_model.startswith("centernet"):
dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys]
dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys]
# if src_model.startswith("danet"):
# dst_param_keys = [key.replace("z_da_net/head/", "da_net/head/") for key in dst_param_keys]
dst_param_keys_orig = dst_param_keys.copy()
dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s
for s in dst_param_keys]
dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True)
dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()])
assert (len(src_param_keys) == len(dst_param_keys))
def process_width(src_key, dst_key, src_weight):
if len(src_weight.shape) == 4:
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0))
elif len(src_weight.shape) == 2:
src_weight = np.transpose(src_weight, axes=(1, 0))
elif len(src_weight.shape) == 3:
if not ((src_model.startswith("jasper") or src_model.startswith("quartznet")) and
dst_key.split("/")[-1][:-2] == "fb"):
src_weight = np.transpose(src_weight, axes=(2, 1, 0))
if dst_key.split("/")[-1][:-2] == "depthwise_kernel":
assert(len(dst_params[dst_key].shape) == 4)
src_weight = np.expand_dims(src_weight, -1)
dst_weight = dst_params[dst_key]
assert (tuple(dst_weight.shape) == src_weight.shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, src_weight.shape, tuple(dst_weight.shape))
dst_weight.assign(src_weight)
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
# print("src_key={},\tsrc_key2={},\tdst_key={}".format(src_key, src_params[src_key].name, dst_key))
if dst_key.find("convgroup") >= 0:
import mxnet as mx
dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)]
dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)]
if src_key.endswith("weight"):
dst_keys = [s for s in dst_keys if s.endswith("kernel:0")]
elif src_key.endswith("bias"):
dst_keys = [s for s in dst_keys if s.endswith("bias:0")]
groups = len(dst_keys)
src_weight0 = src_params[src_key]._data[0]
src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups)
for gi in range(groups):
src_weight_gi = src_weight0_list[gi].asnumpy()
dst_key_gi = dst_keys[gi]
process_width(src_key, dst_key_gi, src_weight_gi)
else:
src_weight = src_params[src_key]._data[0].asnumpy()
process_width(src_key, dst_key, src_weight)
dst_net.save_weights(dst_params_file_path)
def convert_pt2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
src_model,
dst_model):
import torch
if src_model.startswith("oth_quartznet") or src_model.startswith("oth_jasper"):
src1 = list(filter(re.compile("\.res\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
dst1 = list(filter(re.compile("\.identity_block\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dicenet"):
src1 = list(filter(re.compile("\.conv_height\.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("\.conv_width\.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("\.linear_comb_layer\.").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("\.proj_layer\.").search, src3n))
src4n = [key for key in src3n if key not in src4]
src_param_keys = src4n + src1 + src2 + src3 + src4
dst1 = list(filter(re.compile("\.h_conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("\.w_conv\.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst3 = list(filter(re.compile("\.att\.").search, dst2n))
dst3n = [key for key in dst2n if key not in dst3]
dst4 = list(filter(re.compile("\.proj_conv\.").search, dst3n))
dst4n = [key for key in dst3n if key not in dst4]
dst_param_keys = dst4n + dst1 + dst2 + dst3 + dst4
elif src_model.startswith("oth_proxyless"):
src1 = src_param_keys[5]
del src_param_keys[5]
src_param_keys.insert(0, src1)
src2 = src_param_keys[-3]
del src_param_keys[-3]
src_param_keys.insert(-7, src2)
elif src_model.startswith("oth_scnet"):
pass
src1 = list(filter(re.compile(".k1.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile(".scconv.").search, src1n))
src2n = [key for key in src1n if key not in src2]
src_param_keys = src2n + src1 + src2
dst1 = list(filter(re.compile(".conv2a.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile(".conv2b.").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst2n + dst1 + dst2
elif src_model == "oth_bisenet":
src1 = list(filter(re.compile("^cp.conv_avg").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^cp.arm32").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^cp.conv_head32").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^cp.arm16").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^cp.conv_head16").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^ffm").search, src5n))
src6n = [key for key in src5n if key not in src6]
src_param_keys = src6n + src1 + src2 + src3 + src4 + src5 + src6
dst1 = list(filter(re.compile("^pool").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
elif src_model.startswith("oth_dla"):
src1 = list(filter(re.compile("\.project").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("\.project_conv").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1 + dst1n
elif dst_model == "ntsnet":
src1 = list(filter(re.compile("^proposal_net").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1 + src1n
dst1 = list(filter(re.compile("^navigator_unit\.branch\d+\.down").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst2 = list(filter(re.compile("^navigator_unit\.branch\d+\.tidy").search, dst1n))
dst2n = [key for key in dst1n if key not in dst2]
dst_param_keys = dst1 + dst2 + dst2n
elif dst_model == "fishnet150":
src1 = list(filter(re.compile("^(conv|fish\.fish\.[0-2])").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src2 = list(filter(re.compile("^fish\.fish\.6\.1").search, src1n))
src2n = [key for key in src1n if key not in src2]
src3 = list(filter(re.compile("^fish\.fish\.5\.1").search, src2n))
src3n = [key for key in src2n if key not in src3]
src4 = list(filter(re.compile("^fish\.fish\.4\.1").search, src3n))
src4n = [key for key in src3n if key not in src4]
src5 = list(filter(re.compile("^fish\.fish\.3\.[0-1]").search, src4n))
src5n = [key for key in src4n if key not in src5]
src6 = list(filter(re.compile("^fish\.fish\.3\.3").search, src5n))
src6n = [key for key in src5n if key not in src6]
src7 = list(filter(re.compile("^fish\.fish\.[3-6]").search, src6n))
src7n = [key for key in src6n if key not in src7]
src8 = list(filter(re.compile("^fish\.fish\.9\.1").search, src7n))
src8n = [key for key in src7n if key not in src8]
src9 = list(filter(re.compile("^fish\.fish\.8\.1").search, src8n))
src9n = [key for key in src8n if key not in src9]
src10 = list(filter(re.compile("^fish\.fish\.7\.1").search, src9n))
src10n = [key for key in src9n if key not in src10]
src_param_keys = src1 + src2 + src3 + src4 + src5 + src6 + src7 + src8 + src9 + src10 + src10n
elif dst_model == "bam_resnet50":
src_bams = list(filter(re.compile("^bam").search, src_param_keys))
src_param_keys = [key for key in src_param_keys if key not in src_bams]
src_param_keys = src_param_keys + src_bams
dst_bams = list(filter(re.compile("^features.stage[0-9].unit1.bam.").search, dst_param_keys))
dst_param_keys = [key for key in dst_param_keys if key not in dst_bams]
dst_param_keys = dst_param_keys + dst_bams
elif dst_model.startswith("sinet"):
src1 = list(filter(re.compile("\.vertical.weight").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("\.horizontal.weight").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src_param_keys = src2n + src2
src3 = list(filter(re.compile("\.B_v\.").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src_param_keys = src3n + src3
src4 = list(filter(re.compile("\.B_h\.").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src_param_keys = src4n + src4
src5 = list(filter(re.compile("bn_4\.").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src_param_keys = src5n + src5
src6 = list(filter(re.compile("bn_3\.").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src_param_keys = src6n + src6
dst1 = list(filter(re.compile("\.v_conv.conv\.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("\.h_conv.conv\.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("\.v_conv.bn\.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("\.h_conv.bn\.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("decoder.decode1.bn\.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("decoder.decode2.bn\.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
elif src_model.startswith("oth_ibppose"):
def sort_hg(src2):
src2b1 = list(filter(re.compile("^hourglass.[0-9].hg.0.1.").search, src2))
src2b2 = list(filter(re.compile("^hourglass.[0-9].hg.1.1.").search, src2))
src2b3 = list(filter(re.compile("^hourglass.[0-9].hg.2.1.").search, src2))
src2b4 = list(filter(re.compile("^hourglass.[0-9].hg.3.1.").search, src2))
src2b5 = list(filter(re.compile("^hourglass.[0-9].hg.3.2.").search, src2))
src2b6 = list(filter(re.compile("^hourglass.[0-9].hg.3.3.").search, src2))
src2b7 = list(filter(re.compile("^hourglass.[0-9].hg.2.2.").search, src2))
src2b8 = list(filter(re.compile("^hourglass.[0-9].hg.2.3.").search, src2))
src2b9 = list(filter(re.compile("^hourglass.[0-9].hg.1.2.").search, src2))
src2b10 = list(filter(re.compile("^hourglass.[0-9].hg.1.3.").search, src2))
src2b11 = list(filter(re.compile("^hourglass.[0-9].hg.0.2.").search, src2))
src2b12 = list(filter(re.compile("^hourglass.[0-9].hg.0.3.").search, src2))
src2b13 = list(filter(re.compile("^hourglass.[0-9].hg.0.0.").search, src2))
src2b14 = list(filter(re.compile("^hourglass.[0-9].hg.1.0.").search, src2))
src2b15 = list(filter(re.compile("^hourglass.[0-9].hg.2.0.").search, src2))
src2b16 = list(filter(re.compile("^hourglass.[0-9].hg.3.0.").search, src2))
src2b17 = list(filter(re.compile("^hourglass.[0-9].hg.3.4.").search, src2))
return src2b1 + src2b2 + src2b3 + src2b4 +\
src2b11 + src2b12 + src2b9 + src2b10 + src2b7 + src2b8 + src2b5 + src2b6 +\
src2b13 + src2b14 + src2b15 + src2b16 + src2b17
src1 = list(filter(re.compile("^pre.").search, src_param_keys))
src1n = [key for key in src_param_keys if key not in src1]
src_param_keys = src1n + src1
src2 = list(filter(re.compile("^hourglass.").search, src_param_keys))
src2n = [key for key in src_param_keys if key not in src2]
src2b1 = sort_hg(list(filter(re.compile("^hourglass.0.hg.").search, src2)))
src2b2 = sort_hg(list(filter(re.compile("^hourglass.1.hg.").search, src2)))
src2b3 = sort_hg(list(filter(re.compile("^hourglass.2.hg.").search, src2)))
src2b4 = sort_hg(list(filter(re.compile("^hourglass.3.hg.").search, src2)))
src_param_keys = src2n + src2b1 + src2b2 + src2b3 + src2b4
src3 = list(filter(re.compile("^features.[0-9].before_regress").search, src_param_keys))
src3n = [key for key in src_param_keys if key not in src3]
src3b = list(filter(re.compile("^features.[0-9].before_regress.0.").search, src3))
src_param_keys = src3n + src3b
src4 = list(filter(re.compile("^outs.[0-9].").search, src_param_keys))
src4n = [key for key in src_param_keys if key not in src4]
src4b = list(filter(re.compile("^outs.[0-9].0.").search, src4))
src_param_keys = src4n + src4b
src5 = list(filter(re.compile("^merge_features.[0-9].").search, src_param_keys))
src5n = [key for key in src_param_keys if key not in src5]
src5b = list(filter(re.compile("^merge_features.[0-9].0.").search, src5))
src_param_keys = src5n + src5b
src6 = list(filter(re.compile("^merge_preds.[0-9].").search, src_param_keys))
src6n = [key for key in src_param_keys if key not in src6]
src6b = list(filter(re.compile("^merge_preds.[0-9].0.").search, src6))
src_param_keys = src6n + src6b
dst1 = list(filter(re.compile("^backbone.").search, dst_param_keys))
dst1n = [key for key in dst_param_keys if key not in dst1]
dst_param_keys = dst1n + dst1
dst2 = list(filter(re.compile("^decoder.pass[1-9].hg.").search, dst_param_keys))
dst2n = [key for key in dst_param_keys if key not in dst2]
dst_param_keys = dst2n + dst2
dst3 = list(filter(re.compile("^decoder.pass[1-9].pre_block.").search, dst_param_keys))
dst3n = [key for key in dst_param_keys if key not in dst3]
dst_param_keys = dst3n + dst3
dst4 = list(filter(re.compile("^decoder.pass[1-9].post_block.").search, dst_param_keys))
dst4n = [key for key in dst_param_keys if key not in dst4]
dst_param_keys = dst4n + dst4
dst5 = list(filter(re.compile("^decoder.pass[1-9].pre_merge_block.").search, dst_param_keys))
dst5n = [key for key in dst_param_keys if key not in dst5]
dst_param_keys = dst5n + dst5
dst6 = list(filter(re.compile("^decoder.pass[1-9].post_merge_block.").search, dst_param_keys))
dst6n = [key for key in dst_param_keys if key not in dst6]
dst_param_keys = dst6n + dst6
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
if (src_model == "oth_shufflenetv2_wd2" and dst_model == "shufflenetv2_wd2") and \
(src_key == "network.8.weight"):
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy()[:, :, 0, 0])
else:
# print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
# src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size())))
assert (tuple(dst_params[dst_key].size()) == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size()))
assert (dst_key.split('.')[-1] == src_key.split('.')[-1])
dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_gl2pt(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import torch
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (tuple(dst_params[dst_key].size()) == src_params[src_key].shape)
dst_params[dst_key] = torch.from_numpy(src_params[src_key]._data[0].asnumpy())
torch.save(
obj=dst_params,
f=dst_params_file_path)
def convert_pt2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (dst_params[dst_key].shape == tuple(src_params[src_key].size())), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, tuple(src_params[src_key].size()), dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf2tf(dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys):
import re
src_param_keys = [key.replace("/W:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/b:", "/bias:") for key in src_param_keys]
src_param_keys = [key.replace("linear/", "output/") for key in src_param_keys]
src_param_keys = [key.replace("stage", "features/stage") for key in src_param_keys]
src_param_keys = [re.sub("^conv1/", "features/init_block/conv/", key) for key in src_param_keys]
src_param_keys = [re.sub("^conv5/", "features/final_block/conv/", key) for key in src_param_keys]
src_param_keys = [key.replace('/dconv_bn/', '/dconv/bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv_bn/', '/shortcut_dconv/bn/') for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/kernel:", "/W:") for key in src_param_keys]
src_param_keys = [key.replace("/bias:", "/b:") for key in src_param_keys]
src_param_keys = [key.replace("output/", "linear/") for key in src_param_keys]
src_param_keys = [key.replace("features/stage", "stage") for key in src_param_keys]
src_param_keys = [key.replace("features/init_block/conv/", 'conv1/') for key in src_param_keys]
src_param_keys = [key.replace("features/final_block/conv/", 'conv5/') for key in src_param_keys]
src_param_keys = [key.replace('/dconv/bn/', '/dconv_bn/') for key in src_param_keys]
src_param_keys = [key.replace('/shortcut_dconv/bn/', '/shortcut_dconv_bn/') for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
import tensorflow as tf
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
assert (src_params[src_key].shape == tuple(dst_params[dst_key].get_shape().as_list()))
sess.run(dst_params[dst_key].assign(src_params[src_key]))
from tensorflow_.utils import save_model_params
save_model_params(
sess=sess,
file_path=dst_params_file_path)
def convert_tf2gl(dst_net,
dst_params_file_path,
dst_params,
dst_param_keys,
src_params,
src_param_keys,
ctx):
import mxnet as mx
src_param_keys = [key.replace("/kernel:", "/weight:") for key in src_param_keys]
src_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in src_param_keys]
src_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in src_param_keys]
src_param_keys.sort()
src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
dst_param_keys.sort()
dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if
x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)])
src_param_keys = [key.replace("/weight:", "/kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in src_param_keys]
src_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in src_param_keys]
src_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in src_param_keys]
assert (len(src_param_keys) == len(dst_param_keys))
for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)):
src_weight = src_params[src_key]
if len(src_weight.shape) == 4:
if src_key.split("/")[-1][:-2] == "dw_kernel":
dst_weight = np.transpose(src_weight, axes=(2, 3, 0, 1))
else:
dst_weight = np.transpose(src_weight, axes=(3, 2, 0, 1))
elif len(src_weight.shape) == 2:
dst_weight = np.transpose(src_weight, axes=(1, 0))
else:
dst_weight = src_weight
assert (dst_weight.shape == dst_params[dst_key].shape), \
"src_key={}, dst_key={}, src_shape={}, dst_shape={}".format(
src_key, dst_key, dst_weight.shape, dst_params[dst_key].shape)
dst_params[dst_key]._load_init(mx.nd.array(dst_weight, ctx), ctx)
dst_net.save_parameters(dst_params_file_path)
def convert_tf22tfl(src_net,
dst_params_file_path):
import tensorflow as tf
converter = tf.lite.TFLiteConverter.from_keras_model(src_net)
tflite_model = converter.convert()
open(dst_params_file_path, "wb").write(tflite_model)
# batch_size = 1
# input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if
# src_net.data_format == "channels_first" else
# (batch_size, src_net.in_size[0], src_net.in_size[1], 3))
# input_data = tf.random.normal(input_shape)
# tf_results = src_net(input_data)
# interpreter = tf.lite.Interpreter(model_content=tflite_model)
# interpreter.allocate_tensors()
# input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
# input_data = np.array(np.random.random_sample(input_details[0]["shape"]), dtype=np.float32)
# interpreter.set_tensor(input_details[0]["index"], input_data)
# interpreter.invoke()
# tflite_results = interpreter.get_tensor(output_details[0]["index"])
# for tf_result, tflite_result in zip(tf_results, tflite_results):
# np.testing.assert_almost_equal(tf_result.numpy(), tflite_result, decimal=5)
def _init_ctx(args):
ctx = None
if args.src_fwk in ("gluon", "mxnet", "keras") or args.dst_fwk in ("gluon", "mxnet", "keras"):
import mxnet as mx
ctx = mx.cpu()
return ctx
def _prepare_src_model(args, ctx, use_cuda):
return prepare_src_model(
src_fwk=args.src_fwk,
src_model=args.src_model,
src_params_file_path=args.src_params,
dst_fwk=args.dst_fwk,
ctx=ctx,
use_cuda=use_cuda,
load_ignore_extra=args.load_ignore_extra,
remove_module=args.remove_module,
num_classes=args.src_num_classes,
in_channels=args.src_in_channels)
def _prepare_dst_model(args, ctx, use_cuda):
return prepare_dst_model(
dst_fwk=args.dst_fwk,
dst_model=args.dst_model,
src_fwk=args.src_fwk,
ctx=ctx,
use_cuda=use_cuda,
num_classes=args.dst_num_classes,
in_channels=args.dst_in_channels,
model_type=args.model_type)
def update_and_initialize_logging(args):
"""
Update arguments ans initialize logging.
Parameters:
----------
args : ArgumentParser
Main script arguments.
"""
packages = []
pip_packages = []
if (args.src_fwk == "gluon") or (args.dst_fwk == "gluon"):
packages += ["mxnet, numpy"]
pip_packages += ["mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "pytorch") or (args.dst_fwk == "pytorch"):
packages += ["torch", "torchvision"]
if (args.src_fwk == "chainer") or (args.dst_fwk == "chainer"):
packages += ["chainer"]
pip_packages += ["cupy-cuda110", "cupy-cuda112", "chainer"]
if (args.src_fwk == "keras") or (args.dst_fwk == "keras"):
packages += ["keras"]
pip_packages += ["keras", "keras-mxnet", "mxnet-cu110", "mxnet-cu112"]
if (args.src_fwk == "tensorflow") or (args.dst_fwk == "tensorflow"):
packages += ["tensorflow-gpu"]
pip_packages += ["tensorflow", "tensorflow-gpu", "tensorpack"]
if (args.src_fwk == "tf2") or (args.dst_fwk == "tf2") or (args.dst_fwk == "tfl"):
packages += ["tensorflow"]
pip_packages += ["tensorflow", "tensorflow-gpu"]
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=packages,
log_pip_packages=pip_packages)
def main():
args = parse_args()
ctx = None
use_cuda = False
if args.dst_fwk == "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
update_and_initialize_logging(args=args)
ctx = _init_ctx(args)
src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net =\
_prepare_src_model(args, ctx, use_cuda)
if args.dst_fwk != "tf2":
dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda)
if ((args.dst_fwk in ["keras", "tensorflow", "tf2"]) and any([s.find("convgroup") >= 0 for s in dst_param_keys]))\
or ((args.src_fwk == "mxnet") and (args.src_model in ["crunet56", "crunet116", "preresnet269b"])):
assert (len(src_param_keys) <= len(dst_param_keys))
elif ((args.dst_fwk == "chainer") and
(args.src_model.startswith("diaresnet") or args.src_model.startswith("diapreresnet"))) or\
args.src_model.startswith("oth_ibppose"):
assert (len(src_param_keys) >= len(dst_param_keys))
elif args.dst_fwk == "tfl":
pass
else:
assert (len(src_param_keys) == len(dst_param_keys))
if args.src_fwk == "gluon" and args.dst_fwk == "gluon":
convert_gl2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
finetune=((args.src_num_classes != args.dst_num_classes) or (args.src_in_channels != args.dst_in_channels)),
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "pytorch" and args.dst_fwk == "pytorch":
convert_pt2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
dst_model=args.dst_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "pytorch":
convert_gl2pt(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "chainer":
convert_gl2ch(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ext_src_param_keys=ext_src_param_keys,
ext_src_param_keys2=ext_src_param_keys2,
src_model=args.src_model)
elif args.src_fwk == "gluon" and args.dst_fwk == "keras":
convert_gl2ke(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tensorflow":
convert_gl2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "gluon" and args.dst_fwk == "tf2":
convert_gl2tf2(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model)
elif args.src_fwk == "pytorch" and args.dst_fwk == "gluon":
convert_pt2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "mxnet" and args.dst_fwk == "gluon":
convert_mx2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
src_model=args.src_model,
ctx=ctx)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "tensorflow":
convert_tf2tf(
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys)
elif args.src_fwk == "tensorflow" and args.dst_fwk == "gluon":
convert_tf2gl(
dst_net=dst_net,
dst_params_file_path=args.dst_params,
dst_params=dst_params,
dst_param_keys=dst_param_keys,
src_params=src_params,
src_param_keys=src_param_keys,
ctx=ctx)
elif args.src_fwk == "tf2" and args.dst_fwk == "tfl":
convert_tf22tfl(
src_net=src_net,
dst_params_file_path=args.dst_params)
else:
raise NotImplementedError
logging.info("Convert {}-model {} into {}-model {}".format(
args.src_fwk, args.src_model, args.dst_fwk, args.dst_model))
if __name__ == '__main__':
main()
| [
"osemery@gmail.com"
] | osemery@gmail.com |
3da3e17495525b485fd627a5d52d55b261e728ec | 8d50cc4f37c153fcb51de4501f3fa50c00394d9b | /test/benchmark/resnet_tl_benchmark.py | 0273e724c53f6d0c0598924637c84431b5b3fe0c | [
"MIT"
] | permissive | liujuanLT/InsightFace_TF | dbd239dfdda1866c348e82211932884f73cb3067 | 257b6e0dcf7e7c3523dc7e1c08ba529fab1bf75b | refs/heads/master | 2022-04-27T21:24:01.458277 | 2022-03-17T12:28:15 | 2022-03-17T12:28:15 | 463,040,192 | 0 | 0 | MIT | 2022-02-24T06:51:16 | 2022-02-24T06:51:15 | null | UTF-8 | Python | false | false | 1,255 | py | import tensorflow as tf
import tensorflow.contrib.slim.nets as nets
import numpy as np
from nets.resnet import get_resnet
slim = tf.contrib.slim
resnet = nets.resnet_v1
if __name__ == '__main__':
output_shape = 85164
batch_size = 128
image = tf.placeholder(name='input_x', shape=[None, 224, 224, 3], dtype=tf.float32)
labels = tf.placeholder(name='input_label', shape=[None, output_shape], dtype=tf.float32)
with slim.arg_scope(nets.resnet_utils.resnet_arg_scope()):
nets = get_resnet(image, output_shape, 50, type='resnet', sess=None, pretrained=False)
print(nets.outputs)
probabilities = tf.reduce_mean(tf.nn.softmax(nets.outputs, dim=-1))
print(probabilities)
losses = tf.norm(tf.subtract(probabilities, labels))
train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(losses)
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
while True:
datasets = np.random.randn(batch_size, 224, 224, 3).astype(np.float32)
datasets_labels = np.random.randn(batch_size, output_shape).astype(np.float32)
losses_val, _ = sess.run([losses, train_op], feed_dict={image: datasets, labels: datasets_labels})
print(losses_val) | [
"auroua@yeah.net"
] | auroua@yeah.net |
94f2093636ae67fdc8ec2d5431c2b52cbd51d7c2 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=25/params.py | 1e06a0ee411f4cd8e4e96c1df8f010d7336d6730 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.041500',
'max_util': '3.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.5',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 25,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
452d72297d0a3b0666a56a4a72ddda202448eb36 | c2c0f1f565a285146a30dcab99e08e8353e03e54 | /geocoding_cache/backup.py | a74de831b589d1e9793dc0a64c2e9ade43388c34 | [] | no_license | adamhammes/geocoding-cache | f522f9be6078ae32a62cf9dd427b94489662285e | 110d6607af73d3ed064e06f432e6da33af2afb80 | refs/heads/master | 2023-06-14T04:42:25.109278 | 2021-07-07T17:47:20 | 2021-07-07T17:47:20 | 274,026,001 | 0 | 0 | null | 2021-03-20T04:25:04 | 2020-06-22T02:55:40 | Python | UTF-8 | Python | false | false | 657 | py | import datetime
import sqlite3
import tempfile
import boto3
def backup_db(source: sqlite3.Connection):
print("Backing up the database")
s3_client = boto3.client("s3")
day_of_month = datetime.datetime.today().day
object_name = f"geocoding_cache/{day_of_month:02}.sqlite3"
with tempfile.NamedTemporaryFile() as destination_file:
dest = sqlite3.connect(destination_file.name)
print("Making a copy of the database...")
source.backup(dest)
dest.close()
print("Uploading to s3...")
s3_client.upload_file(destination_file.name, "kijiji-apartments", object_name)
print("...done")
| [
"ahammes@cortexmedia.ca"
] | ahammes@cortexmedia.ca |
e49952bb3039c47341a3a2001f153c1fcea8521c | 05169e203974411667ab947298a74575b8a179e0 | /packages/jet_bridge_base/jet_bridge_base/serializers/relationship_override.py | c985788ccf2e5eeaf886f08eb8bb093846f356d8 | [
"MIT"
] | permissive | jet-admin/jet-bridge | f6b563e1801985063483ddb02e9e1c3301dc0612 | c53d30fb308eed5822083eaf71f641c4098610cc | refs/heads/master | 2023-09-01T14:31:42.261427 | 2023-08-24T13:54:34 | 2023-08-24T13:54:34 | 163,167,532 | 1,564 | 166 | MIT | 2023-03-18T03:20:04 | 2018-12-26T10:27:33 | Python | UTF-8 | Python | false | false | 4,873 | py | from jet_bridge_base.models.model_relation_override import ModelRelationOverrideModel
from jet_bridge_base.store import store
from sqlalchemy import inspect
from jet_bridge_base import fields
from jet_bridge_base.db import get_mapped_base, reload_request_graphql_schema, get_request_connection
from jet_bridge_base.exceptions.validation_error import ValidationError
from jet_bridge_base.serializers.serializer import Serializer
from jet_bridge_base.logger import logger
class ModelDescriptionRelationOverrideSerializer(Serializer):
direction = fields.CharField()
local_field = fields.CharField()
related_model = fields.CharField()
related_field = fields.CharField()
class ModelDescriptionRelationOverridesSerializer(Serializer):
model = fields.CharField()
relations = ModelDescriptionRelationOverrideSerializer(many=True)
def get_model(self, request, name):
MappedBase = get_mapped_base(request)
return MappedBase.classes.get(name)
def generate_many_to_one_name(self, mapper, local_field, related_model, related_field):
name = '__'.join([local_field, 'to', related_model, related_field])
if name in mapper.columns:
name = name + '_relation'
logger.warning('Already detected column name, using {}'.format(name))
return name
def generate_one_to_many_name(self, mapper, local_field, related_model, related_field):
name = '__'.join([related_model, related_field, 'to', local_field])
if name in mapper.columns:
name = name + '_relation'
logger.warning('Already detected column name, using {}'.format(name))
return name
def validate(self, attrs):
request = self.context.get('request')
Model = self.get_model(request, attrs['model'])
if Model is None:
raise ValidationError('Unknown relation override model: {}'.format(attrs['model']))
mapper = inspect(Model)
for item in attrs['relations']:
if item['direction'] == 'MANYTOONE':
item['name'] = self.generate_many_to_one_name(mapper, item['local_field'], item['related_model'], item['related_field'])
elif item['direction'] == 'ONETOMANY':
item['name'] = self.generate_one_to_many_name(mapper, item['local_field'], item['related_model'], item['related_field'])
else:
raise ValidationError('Unknown relation direction: {}'.format(item['direction']))
return attrs
def save(self):
request = self.context.get('request')
connection = get_request_connection(request)
draft = bool(request.get_argument('draft', False))
with store.session() as session:
with session.begin():
for item in self.validated_data:
set_overrides = sorted(item['relations'], key=lambda x: x['name'])
existing_overrides = session.query(ModelRelationOverrideModel).filter(
ModelRelationOverrideModel.connection_id == connection['id'],
ModelRelationOverrideModel.model == item['model'],
draft == draft
).order_by(ModelRelationOverrideModel.name).all()
existing_overrides = list(existing_overrides)
for i, override in enumerate(set_overrides):
existing_override = existing_overrides[i] if i < len(existing_overrides) else None
if existing_override:
existing_override.name = override.get('name')
existing_override.direction = override.get('direction')
existing_override.local_field = override.get('local_field')
existing_override.related_model = override.get('related_model')
existing_override.related_field = override.get('related_field')
else:
session.add(ModelRelationOverrideModel(
connection_id=connection['id'],
model=item['model'],
draft=draft,
name=override.get('name'),
direction=override.get('direction'),
local_field=override.get('local_field'),
related_model=override.get('related_model'),
related_field=override.get('related_field')
))
delete_overrides = existing_overrides[len(item['relations']):]
for override in delete_overrides:
session.delete(override)
reload_request_graphql_schema(request, draft)
| [
"f1nal@cgaming.org"
] | f1nal@cgaming.org |
2307b6a1bef1cb4612e3ecc93f7bd664bb61386c | 522c2645e1b14727279b417c4e6de16e0248406b | /Livrables/ExtractJson.py | dea1eccd47b7406340421cbe1d6b0cba442a2ef1 | [] | no_license | jerem33620/P5_Pur_Beurre_OC | 532757abd66484398f0389abcd53e647e2a45327 | decc9a341e8f2659cdc9769a9a16659023e0a3a4 | refs/heads/master | 2020-04-23T02:23:14.508665 | 2019-02-19T14:53:23 | 2019-02-19T14:53:23 | 170,844,908 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | #! /usr/bin/env python3
# coding: utf-8
class ExtractFromJson:
""" Permet de recupérer les données brut dans le json
rendu par l'API et de lancer l'initialisation du json """
def __init__(self, json_data):
""" Pour chaque produit enregistré dans la DB, j'aurais besoin de :
id, product_name, categories, nutrition_grade, stores_tags, generic_name, url """
self.keys = [
"id",
"product_name",
"categories",
"nutrition_grades",
"stores_tags",
"generic_name",
"url"
]
self.json_data = json_data
def extract_json(self):
""" Obtient les données brut dans le JSON rendu par l'API et crée un nouveau JSON
avec seulement les données nécessaires pour ma base de données pur_beurre """
# Liste des produits retournés
products_list = []
black_list = []
for data in self.json_data["products"]:
temp_dict = {}
complete = True
# Je crée une black_list pour éviter les doublons du même produit
# Il est seulement basé sur les 4 premières lettres du produits
if data["product_name"][:4].lower() not in black_list:
black_list.append(data["product_name"][:4].lower())
# Je vérifie si le champs est vide
for key in self.keys:
# Si le champ n'est pas vide je l'ajoute au dict
if key in data and data[key] != "" and data[key] != []:
temp_dict[key] = data[key]
# Autrement je vais au produit suivant
else:
complete = False
break
# Si le dict est plein j'ajoute le produit à la liste
if complete:
products_list.append(temp_dict)
return products_list
| [
"jeremyguy@orange.fr"
] | jeremyguy@orange.fr |
042e3b16684fe9c630da6b06a670a60326e24702 | a63e53db61ef8a4596089eb3af99a94b128ce716 | /util/alemutdf.py | 7a5f495486dd37942f5cd7aa88f8e15e65724f49 | [
"MIT"
] | permissive | Aletechdev/ava | 6737214a99846e16b1cf51b4519b3651e000ee6a | fea1f382229c46f83b76ffbce32bf00ff05a37b9 | refs/heads/master | 2022-10-25T07:16:40.338415 | 2021-07-16T02:14:22 | 2021-07-16T02:14:22 | 231,140,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,513 | py | import os
import os.path
import pandas as pd
import numpy as np
from tqdm import tqdm
import os, sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from util.mut import get_del_size, get_ins_size, get_inv_size, get_con_size, get_sub_size, get_amp_size, is_frameshift, is_coding_mut, is_premature_stop_codon_SNP, is_readthrough_codon_SNP, is_start_codon_removal
# TODO: Integrate functionality to get the series of fixed mutations defined by a final mutation with a frequency larger than a given threshold into this list of scripts. This functionality is implemented in substitution_rate_decline.ipynb.
def remove_html_from_sample_name(raw_sample_name):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
cleantext = cleantext.replace("contaminatedhypermutated", '')
return cleantext
def get_coding_muts_df(mut_df):
coding_mut_df = mut_df[~mut_df["Details"].str.contains("intergenic")
& ~mut_df["Details"].str.contains("pseudogene")
& ~mut_df["Details"].str.contains("noncoding")].copy()
return coding_mut_df
def get_noncoding_muts_df(mut_df):
coding_mut_df = mut_df[mut_df["Details"].str.contains("intergenic")
| mut_df["Details"].str.contains("pseudogene")
| mut_df["Details"].str.contains("noncoding")].copy()
return coding_mut_df
def get_genetic_muts_df(mut_df):
df = mut_df[~mut_df["Details"].str.contains("intergenic")].copy()
return df
def get_mut_dataframe(CSV_file_path,
include_dups = False,
intragenic_muts_only = False):
# Step 1: Import database
raw_db = pd.read_csv(CSV_file_path)
if 'Function' in raw_db.columns:
raw_db = raw_db.drop('Function', axis=1)
if 'Product' in raw_db.columns:
raw_db = raw_db.drop('Product', axis=1)
if 'GO Process' in raw_db.columns:
raw_db = raw_db.drop('GO Process', axis=1)
if 'GO Component' in raw_db.columns:
raw_db = raw_db.drop('GO Component', axis=1)
# TODO: The below will crash if most than these columns. Needs to ignore other mutation columns.
# Step 2: Separate columns based on usage
keep_cols = ['Position','Mutation Type','Sequence Change','Details','Gene']
if "Reference Seq" in raw_db.columns: # For backwards compatibility with older exported mutation data
keep_cols.append("Reference Seq")
if "Mut ID" in raw_db.columns: # For backwards compatibility with older exported mutation data
keep_cols.append("Mut ID")
mut_cols = sorted(list(set(raw_db.columns) - set(keep_cols)))
file_name = os.path.basename(CSV_file_path)
exp_name_from_file = file_name.replace(".csv", '')
# Step 3: Shift mutation column names into row identifiers
csv_file_mutat_df = pd.DataFrame()
for col in tqdm(mut_cols):
df = raw_db[raw_db[col].notnull()][keep_cols]
exp_name = '_'.join(col.split(' ')[:-4])
if exp_name == '': # Will happen with newer mutation data exported from ALEdb
exp_name = exp_name_from_file
df['exp'] = exp_name
df['ale'] = int(col.split(' ')[-4][1:])
df['flask'] = int(col.split(' ')[-3][1:])
df['isolate'] = int(col.split(' ')[-2][1:])
df['tech_rep'] = int(col.split(' ')[-1][1:])
df['presence'] = raw_db[raw_db[col].notnull()][col]
csv_file_mutat_df = pd.concat([csv_file_mutat_df,df])
csv_file_mutat_df = csv_file_mutat_df[['exp','ale','flask','isolate','tech_rep','presence'] + keep_cols]
csv_file_mutat_df = csv_file_mutat_df.fillna('')
# Remove mutation entries with empty gene since they will screw up mutat_df.groupby(['Gene', ...])
csv_file_mutat_df = csv_file_mutat_df.loc[csv_file_mutat_df['Gene'] != '']
# Remove weird characters between gene names in multiple gene annotation.
csv_file_mutat_df['Gene'] = csv_file_mutat_df['Gene'].str.replace(" ", " ")
if not include_dups:
csv_file_mutat_df = csv_file_mutat_df.loc[csv_file_mutat_df['Details'] != 'Duplication']
if intragenic_muts_only:
csv_file_mutat_df = csv_file_mutat_df.loc[csv_file_mutat_df['Gene'].str.contains(',') == False]
return csv_file_mutat_df
def get_all_sample_mut_df(dir_path,
include_dups = False,
intragenic_muts_only = False):
mutat_df = pd.DataFrame()
mutat_df_list = []
for file_name in os.listdir(dir_path):
file_path = dir_path+'/'+file_name
print(file_path)
mutat_df_list.append(get_mut_dataframe(file_path, include_dups, intragenic_muts_only))
mutat_df = pd.concat(mutat_df_list)
return mutat_df
def _get_exp_ale_set(mut_df):
exp_ale_df = mut_df.copy()
exp_ale_df["exp ale"] = exp_ale_df["exp"] + ' ' + exp_ale_df["ale"].map(str)
exp_ale_set = set(exp_ale_df['exp ale'].tolist())
return exp_ale_set
def get_gene_mut_mat(exp_ale_mut_gene_df):
mut_df = exp_ale_mut_gene_df.copy()
column_to_delete_list = ["presence",
"tech_rep",
"isolate",
"flask",
"Position",
"Mutation Type",
"Sequence Change",
"Details"]
current_columns = list(mut_df.columns.values)
for column_to_delete in column_to_delete_list:
if column_to_delete in current_columns:
del mut_df[column_to_delete]
mut_df = mut_df.drop_duplicates()
mut_df["exp ale"] = mut_df["exp"]+' '+mut_df["ale"].map(str)
mut_df_column_name_set = _get_exp_ale_set(mut_df)
# Get mut_mat_df indexes of Gene names.
# Can't simply use the "Gene" column of trunc_mut_df since a gene may be mutated in
# more than one ALE exp, and we want a set of unique gene names.
mut_df_index_set = set(mut_df["Gene"].tolist())
mut_mat_df = pd.DataFrame(columns=mut_df_column_name_set, index=mut_df_index_set)
mut_mat_df = mut_mat_df.fillna(0)
for gene_name, all_gene_mut_df in mut_df.groupby("Gene"):
for index, mut_df_row in all_gene_mut_df.iterrows():
mut_mat_df.loc[gene_name, mut_df_row["exp ale"]] = 1
return mut_mat_df
def get_gene_mut_count_mat(exp_ale_mut_gene_df):
mut_df = exp_ale_mut_gene_df.copy()
mut_df["exp ale"] = mut_df["exp"]+' '+mut_df["ale"].map(str)
mut_df_column_name_set = _get_exp_ale_set(mut_df)
# Get mut_mat_df indexes of Gene names.
# Can't simply use the "Gene" column of trunc_mut_df since a gene may be mutated in
# more than one ALE exp, and we want a set of unique gene names.
mut_df_index_set = set(mut_df["Gene"].tolist())
mut_mat_df = pd.DataFrame(columns=mut_df_column_name_set, index=mut_df_index_set)
mut_mat_df = mut_mat_df.fillna(0)
for gene_name, all_gene_mut_df in mut_df.groupby("Gene"):
for index, mut_df_row in all_gene_mut_df.iterrows():
mut_mat_df.loc[gene_name, mut_df_row["exp ale"]] += 1
return mut_mat_df
def get_mut_mat(exp_ale_mut_gene_df):
mut_df = exp_ale_mut_gene_df.copy()
column_to_delete_list = ["presence",
"tech_rep",
"isolate",
"flask",
"Position",
"Mutation Type",
"Details"]
current_columns = list(mut_df.columns.values)
for column_to_delete in column_to_delete_list:
if column_to_delete in current_columns:
del mut_df[column_to_delete]
mut_df = mut_df.drop_duplicates()
mut_df["exp ale"] = mut_df["exp"] + ' ' + mut_df["ale"].map(str)
mut_df["gene seq change"] = mut_df["Gene"] + ' ' + mut_df["Sequence Change"]
mut_df_column_name_set = _get_exp_ale_set(mut_df) # Get unique set of exp+ALE#
mut_df_index_set = set(mut_df["gene seq change"].tolist()) # Get unique set of gene+(seq change) names.
mut_mat_df = pd.DataFrame(columns=mut_df_column_name_set, index=mut_df_index_set)
mut_mat_df = mut_mat_df.fillna(0)
for mut, all_mut_df in mut_df.groupby("gene seq change"):
for index, mut_df_row in all_mut_df.iterrows():
mut_mat_df.loc[mut, mut_df_row["exp ale"]] = 1
return mut_mat_df
def get_enrichment_muts(mut_df):
trunc_mut_df = mut_df.copy()
# If we are going to keep Duplications, though we want to remove the '[' and ']' from their gene annotations.
# trunc_mut_df["Gene"] = trunc_mut_df["Gene"].map(lambda x: x.lstrip('[').rstrip(']'))
# Removing duplications
trunc_mut_df = trunc_mut_df[trunc_mut_df["Details"] != "Duplication"]
# Removing unused columns
del trunc_mut_df["tech_rep"]
del trunc_mut_df["isolate"]
# Could have the same mutation, but with a different presence due to differences between clonal and population reseq'ing
del trunc_mut_df["presence"]
trunc_mut_df = trunc_mut_df.drop_duplicates()
enrichment_mut_df = pd.DataFrame()
for gene_mut_groupby in trunc_mut_df.groupby(["exp", "Gene"]):
gene_mut_df = gene_mut_groupby[1]
mutation_count = gene_mut_df.shape[0]
if mutation_count > 1:
enrichment_mut_df = enrichment_mut_df.append(gene_mut_df)
return enrichment_mut_df
def get_ALE_final_flask_df(ALE_df):
final_flask_num = ALE_df["flask"].max()
final_flask_df = ALE_df[ALE_df["flask"]==final_flask_num]
return final_flask_df
def get_exp_final_flask_df(exp_df):
exp_final_flask_mut_df = pd.DataFrame()
for ale, ale_mut_df in exp_df.groupby("ale"):
ALE_final_flask_df = get_ALE_final_flask_df(ale_mut_df)
exp_final_flask_mut_df = exp_final_flask_mut_df.append(ALE_final_flask_df)
return exp_final_flask_mut_df
def _get_max_freq_mut_df(mut_df):
max_mut_freq = mut_df["presence"].max()
max_mut_df = mut_df[mut_df["presence"] == max_mut_freq]
max_mut_df = max_mut_df.sort_values("flask")
earliest_flask_max_mut_df = max_mut_df[:1]
return earliest_flask_max_mut_df # only need to return first row of max_mut_df
def get_ALE_max_freq_mut_df(ALE_mut_df, endpoint_flask_only):
if endpoint_flask_only:
ALE_mut_df = get_ALE_final_flask_df(ALE_mut_df)
df = pd.DataFrame()
# List of everything that defines a mutation besides instance values (freq, isolate#, etc.)
mutation_descriptors_list = ["Position", "Mutation Type", "Sequence Change", "Details", "Gene"]
for mut_group in ALE_mut_df.groupby(mutation_descriptors_list):
mut_df = mut_group[1]
if len(mut_df) > 0:
df = df.append(_get_max_freq_mut_df(mut_df))
return df
def get_exp_max_freq_mut_df(exp_mut_df, endpoint_flask_only):
df = pd.DataFrame()
for ale_name, ALE_mut_df in exp_mut_df.groupby(["ale"]):
if len(ALE_mut_df) > 0:
df = df.append(get_ALE_max_freq_mut_df(ALE_mut_df, endpoint_flask_only))
return df
def get_multi_exp_max_freq_mut_df(mut_df, endpoint_flask_only):
df = pd.DataFrame()
for group_name, exp_ale_mut_df in mut_df.groupby(["exp", "ale"]):
if len(exp_ale_mut_df) > 0:
df = df.append(get_ALE_max_freq_mut_df(exp_ale_mut_df, endpoint_flask_only))
return df
def get_filtered_mut_df(fixed_mut_df, filter_mut_df):
if "flask" in filter_mut_df.columns:
filter_mut_df = filter_mut_df.drop("flask", axis=1)
if "isolate" in filter_mut_df.columns:
filter_mut_df = filter_mut_df.drop("isolate", axis=1)
if "tech_rep" in filter_mut_df.columns:
filter_mut_df = filter_mut_df.drop("tech_rep", axis=1)
if "presence" in filter_mut_df.columns:
filter_mut_df = filter_mut_df.drop("presence", axis=1)
return pd.merge(fixed_mut_df,
filter_mut_df,
how="inner",
on=["exp", "ale", "Position", "Mutation Type", "Sequence Change", "Details", "Gene"])
# Filtering for fixed mutation SERIES that have a max freq larger than mut_freq_floor.
def get_filtered_fixed_mut_series_df(fixed_mut_df, mut_freq_floor):
# TODO: Seems like building the filter_mut_df could be replaced with _get_max_freq_mut_df(...),
# though need to make unit test to clarify.
filter_mut_df_list = []
for exp_name, exp_mut_df in fixed_mut_df.groupby(["exp"]):
filter_mut_df_list.append(get_exp_max_freq_mut_df(exp_mut_df, endpoint_flask_only=True))
filter_mut_df = pd.concat(filter_mut_df_list)
filter_mut_df = filter_mut_df[filter_mut_df["presence"]>mut_freq_floor]
return get_filtered_mut_df(fixed_mut_df, filter_mut_df)
'''
# !!! get_ALE_mut_type_frac_df AND get_exp_avg_ale_mut_type_frac_df ARE OBSOLETE.
# USE get_mut_type_avg_frac_across_class_df INSTEAD
def get_ALE_mut_type_frac_df(mut_df, df_column_name):
mut_type_set = set(mut_df[df_column_name])
mut_type_frac_df = pd.DataFrame()
for exp_ale_tuple, exp_ale_max_mut_df in mut_df.groupby(["exp", "ale"]):
for mut_type in mut_type_set:
mut_type_count = len(exp_ale_max_mut_df[exp_ale_max_mut_df[df_column_name]==mut_type])
mut_type_fraction = mut_type_count/len(exp_ale_max_mut_df)
mut_type_frac_df = mut_type_frac_df.append(pd.DataFrame([[exp_ale_tuple[0],
exp_ale_tuple[1],
mut_type,
mut_type_fraction]],
columns=["exp",
"ale",
df_column_name,
"fraction"]))
return mut_type_frac_df
def get_exp_avg_ale_mut_type_frac_df(mut_df, column_name):
ale_mut_type_frac_df = get_ALE_mut_type_frac_df(mut_df, column_name)
exp_avg_ale_mut_type_frac_df = pd.DataFrame()
for exp_name_mut_type_tuple, mut_type_df in ale_mut_type_frac_df.groupby(["exp", column_name]):
exp_name = exp_name_mut_type_tuple[0]
mut_type = exp_name_mut_type_tuple[1]
mut_type_ale_mean = np.mean(mut_type_df["fraction"])
df = pd.DataFrame([[exp_name, mut_type, mut_type_ale_mean]],
columns=["experiment", column_name, "fraction"])
exp_avg_ale_mut_type_frac_df = exp_avg_ale_mut_type_frac_df.append(df)
return exp_avg_ale_mut_type_frac_df
'''
def get_mut_type_frac_across_class_df(mut_df, exp_level_l, mut_type_class, all_mut_type_class_set):
mut_type_frac_df = pd.DataFrame()
for class_tup, exp_ale_max_mut_df in mut_df.groupby(exp_level_l):
for mut_type in all_mut_type_class_set:
mut_type_count = len(exp_ale_max_mut_df[exp_ale_max_mut_df[mut_type_class]==mut_type])
mut_type_frac = mut_type_count/len(exp_ale_max_mut_df)
if len(exp_level_l) > 1:
l = list(class_tup)
else:
l = [class_tup]
data_l = l + [mut_type, mut_type_frac]
col_name_l = exp_level_l + [mut_type_class, "fraction"]
mut_type_frac_df = mut_type_frac_df.append(pd.DataFrame([data_l], columns=col_name_l))
return mut_type_frac_df
def get_mut_type_avg_frac_across_class_df(mut_df, exp_level_l, mut_type_class, all_mut_type_class_set):
mut_type_frac_df = get_mut_type_frac_across_class_df(mut_df, exp_level_l, mut_type_class, all_mut_type_class_set)
class_avg_mut_type_frac_df = pd.DataFrame()
# !!! Assuming the first class is the final class to consider penetration across
final_penetration_class = exp_level_l[0]
for tup, mut_type_df in mut_type_frac_df.groupby([final_penetration_class, mut_type_class]):
final_class_type = tup[0]
mut_type = tup[1]
mut_type_class_type_mean = np.mean(mut_type_df["fraction"])
df = pd.DataFrame([[final_class_type, mut_type, mut_type_class_type_mean]],
columns=[final_penetration_class, mut_type_class, "fraction"])
class_avg_mut_type_frac_df = class_avg_mut_type_frac_df.append(df)
return class_avg_mut_type_frac_df
| [
"pphaneuf@eng.ucsd.edu"
] | pphaneuf@eng.ucsd.edu |
ebcd6ed252031397cde74ec58dfbe8518d89830a | 835722d5632969c3857506eed7a8a5b678419190 | /wechatHeadImg.py | 81f9d83c78c4493b7d74797d1dba6d330ce7559f | [] | no_license | Derrick-Wang/wechatFriend | 176451cc83a90112fae107ab84170b0059b68dd9 | 9905ed6f3dd337b46e0e0a329b141938e5bd7444 | refs/heads/master | 2020-04-09T02:08:47.515340 | 2018-12-01T15:24:24 | 2018-12-01T15:24:24 | 159,930,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py | # coding:utf-8
import itchat
import os
import re
import math
import random
import PIL.Image as Image
import matplotlib.pyplot as plt
from wordcloud import WordCloud
# 获取头像
def headImg():
itchat.login()
friends = itchat.get_friends(update=True)
# 遍历好友数据
for count, f in enumerate(friends):
# 根据userName获取头像
img = itchat.get_head_img(userName=f["UserName"])
# 根据备注保存头像文件
imgFile = open("img/" + f["RemarkName"] + ".jpg", "wb")
imgFile.write(img)
imgFile.close()
# 头像拼接
def createImg():
x = 0
y = 0
imgs = os.listdir("img")
# 随机打乱头像
# random.shuffle(imgs)
# 创建图片,用于拼接小图
newImg = Image.new('RGBA', (640, 640))
# math.sqrt()开平方根计算小图宽高,
width = int(math.sqrt(640 * 640 / len(imgs)))
# 每行图片数
numLine = int(640 / width)
for i in imgs:
try:
img = Image.open("img/" + i)
# 缩小图片
img = img.resize((width, width), Image.ANTIALIAS)
# 拼接图片,一行排满,换行拼接
newImg.paste(img, (x * width, y * width))
x += 1
if x >= numLine:
x = 0
y += 1
except IOError:
print("img/ %s can not open"%(i))
newImg.save("weChatFriend.png")
# 获取签名
def getSignature():
itchat.login()
friends = itchat.get_friends(update=True)
file = open('sign.txt', 'a', encoding='utf-8')
for f in friends:
signature = f["Signature"].strip().replace("emoji", "").replace("span", "").replace("class", "")
rec = re.compile("1f\d+\w*|[<>/=]")
signature = rec.sub("", signature)
file.write(signature + "\n")
# 生成词云图
def create_word_cloud(filename):
# 读取文件内容
text = open("{}.txt".format(filename), encoding='utf-8').read()
# 注释部分采用结巴分词
# wordlist = jieba.cut(text, cut_all=True)
# wl = " ".join(wordlist)
# 设置词云
wc = WordCloud(
# 设置背景颜色
background_color="white",
# 设置最大显示的词云数
max_words=2000,
# 这种字体都在电脑字体中,window在C:\Windows\Fonts\下,mac下可选/System/Library/Fonts/PingFang.ttc 字体
font_path='C:\\Windows\\Fonts\\simfang.ttf',
height=500,
width=500,
# 设置字体最大值
max_font_size=60,
# 设置有多少种随机生成状态,即有多少种配色方案
random_state=30,
)
myword = wc.generate(text) # 生成词云 如果用结巴分词的话,使用wl 取代 text, 生成词云图
# 展示词云图
plt.imshow(myword)
plt.axis("off")
plt.show()
wc.to_file('signature.png') # 把词云保存下
# 微信好友头像拼接
headImg()
createImg()
# 个性签名统计
# getSignature()
# create_word_cloud("sign") | [
"wandereding@gmail.com"
] | wandereding@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.