blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1571db30fcb1897a7c23bbc6da84249efffefe12 | 13cf11440998376d3b52a49f1e4fb8936c360ac4 | /chainer_chemistry/saliency/visualizer/table_visualizer.py | 4e27c19c5a807d63f9d5844832ecaecdfb772adc | [
"MIT"
] | permissive | k-ishiguro/chainer-chemistry | 87e3db724de0e99042d9585cd4bd5fff38169339 | aec33496def16e76bdfbefa508ba01ab9f79a592 | refs/heads/master | 2021-07-06T22:58:20.127907 | 2019-02-04T02:51:34 | 2019-02-04T02:51:34 | 169,345,375 | 1 | 1 | MIT | 2020-07-30T06:04:13 | 2019-02-06T02:27:39 | Python | UTF-8 | Python | false | false | 3,289 | py | import numpy
import matplotlib.pyplot as plt
from chainer_chemistry.saliency.visualizer.base_visualizer import BaseVisualizer # NOQA
from chainer_chemistry.saliency.visualizer.visualizer_utils import abs_max_scaler # NOQA
class TableVisualizer(BaseVisualizer):
"""Saliency visualizer for table data"""
def visualize(self, saliency, feature_names=None, save_filepath=None,
num_visualize=-1, scaler=abs_max_scaler,
sort='descending', title='Feature Importance', color='b',
xlabel='Importance', bbox_inches='tight'):
"""Visualize or save `saliency` in bar plot.
Args:
saliency (numpy.ndarray): 1-dim saliency array (num_feature,)
feature_names (list or numpy.ndarray): Feature names of `saliency`
save_filepath (str or None): If specified, file is saved to path.
num_visualize (int): If positive value is set, only plot specified
number of features.
scaler (callable): function which takes `x` as input and outputs
scaled `x`, for plotting.
sort (str): Below sort options are supported.
none: not sort
ascending: plot in ascending order
descending: plot in descending order
title (str or None): title of plot
color (str): color of bar in plot
xlabel (str): x label legend
bbox_inches (str or Bbox or None): used for `plt.savefig` option.
"""
# --- type check ---
if saliency.ndim != 1:
raise ValueError("[ERROR] Unexpected value saliency.shape={}"
.format(saliency.shape))
num_total_feat = saliency.shape[0]
if feature_names is not None:
# type check
if len(feature_names) != num_total_feat:
raise ValueError(
"feature_names={} must have same length with `saliency`"
.format(feature_names))
else:
feature_names = numpy.arange(num_total_feat)
if sort == 'none':
indices = numpy.arange(num_total_feat)
elif sort == 'ascending':
indices = numpy.argsort(saliency)[::-1]
elif sort == 'descending':
indices = numpy.argsort(saliency)
else:
raise ValueError("[ERROR] Unexpected value sort={}".format(sort))
saliency = saliency[indices]
feature_names = numpy.asarray(feature_names)[indices]
if scaler is not None:
# Normalize to [-1, 1] or [0, 1]
saliency = scaler(saliency)
if num_visualize > 0:
saliency = saliency[:num_visualize]
if feature_names is not None:
feature_names = feature_names[:num_visualize]
else:
num_visualize = num_total_feat
plt.figure()
plt.clf()
if title is not None:
plt.title(title)
plt.barh(range(num_visualize), saliency, color=color, align='center')
plt.yticks(range(num_visualize), feature_names)
plt.xlabel(xlabel)
if save_filepath:
plt.savefig(save_filepath, bbox_inches=bbox_inches)
else:
plt.show()
| [
"acc1ssnn9terias@gmail.com"
] | acc1ssnn9terias@gmail.com |
75ca027e72150e7fcc19ed4fba648acdc5b2451e | f74f645082e7211ea2b84cd5f9e33b1f30ec0372 | /Adafruit_I2C.py | c797bbdb693e10284fade282e46aa86e1f4bf272 | [] | no_license | Dbroqua/Adafruit_CharLCDPlate | c859b52ff958cf2fab8d2fcc2744bb76c2953d3f | 69c2c2a568df3dd84a4ebe9cd180ff90785ad127 | refs/heads/master | 2016-09-06T13:49:20.994566 | 2013-09-23T11:20:56 | 2013-09-23T11:20:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,581 | py | #!/usr/bin/python
import smbus
# ===========================================================================
# Adafruit_I2C Class
# ===========================================================================
class Adafruit_I2C :
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Courtesy quick2wire-python-api
# https://github.com/quick2wire/quick2wire-python-api
try:
with open('/proc/cpuinfo','r') as f:
for line in f:
if line.startswith('Revision'):
return 1 if line.rstrip()[-1] in ['1','2'] else 2
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(
busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print('Error accessing 0x%02X: Check your I2C address' % self.address)
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print("I2C: Wrote 0x%02X to register 0x%02X" % (value, reg))
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" % (value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print("I2C: Writing list to register 0x%02X:" % reg)
print(list)
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
if __name__ == '__main__':
try:
bus = Adafruit_I2C(address=0)
print "Default I2C bus is accessible"
except:
print "Error accessing default I2C bus"
| [
"damien.broqua@gmail.com"
] | damien.broqua@gmail.com |
b26f46287c34e1c977675f1a1da4680ab338880a | d578dc0955028ee86656f06423ceaa4a50c5ba92 | /Final Project/Centroid.py | 6915378daa05fc205509cf703953b537c1f5ae35 | [] | no_license | ohsuz/CSI4106-Repository | 1b17482d9215c0dcfff60edb90494833d11e069a | d81c5a2600b7c8bf67dd02fbd30138a7f8245e47 | refs/heads/master | 2023-06-11T10:29:33.009843 | 2021-06-26T18:02:50 | 2021-06-26T18:02:50 | 230,781,524 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | #!/usr/bin/env python
# coding: utf-8
# In[33]:
#import packages
import glob
import os
import imutils
import cv2
#read the images from the folder
images = [cv2.imread(file) for file in glob.glob("C:/Users/cjtol/CSI4106/Pocket/*.png")]
#covert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blur to reduce high frequency noise
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#binarize the image with a threshold
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
#thresh = cv2.adaptiveThreshold(blurred,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 7, 2)[1]
#get the rock
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# In[34]:
# compute the center of the contour
def get_contour(image):
for c in cnts:
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
#draw contour and center of shape
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imwrite(os.path.join(path , "output.png"),image)
#display modified image
cv2.imshow("Image", image)
cv2.waitKey(0)
# In[ ]:
| [
"gl_gd@naver.com"
] | gl_gd@naver.com |
7b36da46e9525ecd5334eed1e12a60755f7ecced | 3691f0b571612fd550095af0d7c93f22d5a8061c | /ERP/ERP/urls.py | c6682a389f1e01fb60f097995d0cfef7300d1560 | [] | no_license | sambapython/db16 | 29db8c6be5a3628cd3063cc0d8e092ae8ea69d60 | 98d751ffc7277bb4e28f90b7cb470d667ab47593 | refs/heads/master | 2021-02-11T22:02:34.251113 | 2020-03-03T03:43:29 | 2020-03-03T03:43:29 | 244,532,780 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | """ERP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('sales/', include("sales.urls")),
path('pur/', include("pur.urls")),
path('accounting/', include("accounting.urls")),
path('stock/', include("stock.urls")),
]
| [
"sambapython@gmail.com"
] | sambapython@gmail.com |
3eb6d193517b5ddaa0e343c16513ad7fff94180c | 216ee8ab7ca468638aa2dc6ccb7f89ea76dd0b35 | /Project/Solutions/b_print_to_csv/scraper.py | 53d02a6e14c5fe2a789323404aefe9f094fd9c4d | [] | no_license | satishkbe/python-level-2 | 7b44d0f676bc830f0a94f823aeb6e0f628215628 | 834411f74d54019b9675a87004fd39072dc5fba0 | refs/heads/master | 2023-03-13T20:32:17.993938 | 2021-03-16T00:19:07 | 2021-03-16T00:19:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import csv
import requests
from bs4 import BeautifulSoup
URL = "https://en.wikipedia.org/wiki/Member_states_of_the_United_Nations"
# Todo: Update with your info
name = None
email = None
assert name and email
headers = {'User-Agent': f'{name} ({email})'}
response = requests.get(URL, headers=headers)
assert response.status_code == 200, f'Response got {response.status_code}'
html_doc = response.text
soup = BeautifulSoup(html_doc, 'html.parser')
table = soup.find('table', class_='wikitable')
countries = []
for row in table.find_all('tr'):
name_column = row.find('td')
if name_column:
country_dict = {}
name_link = name_column.find_all('a')[1]
name = name_link.string
country_dict['Name'] = name
date_column = row.find_all('td')[1]
date_joined = date_column.span.text
country_dict['Date Joined'] = date_joined
countries.append(country_dict)
with open('data/countries.csv', 'w') as file:
writer = csv.DictWriter(file, fieldnames=('Name', 'Date Joined'))
writer.writeheader()
writer.writerows(countries)
| [
"ariannedee@gmail.com"
] | ariannedee@gmail.com |
b8b6bb828f4ec50c41c2757085e46ecb0e773461 | 38a4b0b758125419b21ea417530bd418c5eeb0d1 | /Homework/Barker HW4-1.py | 3d991d85ddc48a0a9878c15ab1eb43b045ae8463 | [] | no_license | acbarker19/PHY299-Class-Assignments | 4c12146bf83ac3737748dc25587a720ac2a61da8 | d17d445e70c8228c247f92c772bb69471e69024f | refs/heads/master | 2021-07-03T04:32:57.555060 | 2021-05-18T01:59:59 | 2021-05-18T01:59:59 | 236,010,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | # HW 4-1
# Alec Barker
import math
# Q2.5.2
a = 1
b = math.sqrt(2)
while a != b:
stored_a = a
stored_b = b
a = 0.5 * (stored_a + stored_b)
b = math.sqrt(stored_a * stored_b)
print(a, b)
G = 1 / a
print("Gauss's Constant: ", a) | [
"acbarker19@gmail.com"
] | acbarker19@gmail.com |
c9b5f52e264f390c417cae5e4792a29258632755 | 14ecd6759326918a9875372244d3a340a8d7adb5 | /lib/liba/__init__.py | db7e4b0f994814c0aef317c213a4b11cc6b86ea3 | [] | no_license | tkhr-ssk/SamplePythonProject | 65cf264c158d4705e5afc9c6acd45d16f4858334 | a833946ed52a85b20812130351d5076073b6a8f0 | refs/heads/master | 2022-04-22T21:08:40.348668 | 2020-04-15T16:37:13 | 2020-04-15T16:37:13 | 255,956,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | from .liba import *
| [
"tkhr.ssk@gmail.com"
] | tkhr.ssk@gmail.com |
4d2d3bc59a972fe23893c7a994e048f6ef031328 | e1591e08d14518483aad7058003a8b56f144e4fe | /route.py | 24138f21cd1f31bffcfff06d147324bb4160fd18 | [
"Apache-2.0"
] | permissive | ZhouRR/Baibumblebee | 672db67c705e92c237ff319c2ab3bc6368b7b6ed | 3100b9c2dc8cc4674ed1975bcda096f2f5d99033 | refs/heads/master | 2022-11-14T18:05:14.566928 | 2020-07-08T08:21:03 | 2020-07-08T08:21:03 | 277,741,367 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | import os
from fastapi import FastAPI
import urls
from models.models import *
from views import media_view, headline_view, contents_view
app = FastAPI()
@app.get(urls.MEDIAS)
async def get_medias():
return media_view.get_medias()
@app.post(urls.MEDIAS)
async def post_medias(media: Media):
return media_view.post_medias(media)
@app.get(urls.HEAD_LINES)
async def get_headlines():
return headline_view.get_headlines()
@app.post(urls.HEAD_LINES)
async def post_headlines(media: Media):
return headline_view.post_headlines(media)
@app.get(urls.CONTENTS)
async def get_contents():
return contents_view.get_contents()
@app.post(urls.CONTENTS)
async def post_contents(headline: HeadLine):
return contents_view.post_contents(headline)
def execute_from_command_line(cmd=None):
os.system(cmd)
def main():
execute_from_command_line('uvicorn route:app --reload --host 0.0.0.0 --port 8000')
pass
if __name__ == '__main__':
main()
| [
"zhourr@ohs-sys.com"
] | zhourr@ohs-sys.com |
fd7326df55080e803d9ef5dcf9ef75f5bfd70c6c | 2c872fedcdc12c89742d10c2f1c821eed0470726 | /pbase/day12/code/text2.py | 416a82954f3664da8fa1e1eb23e2cb329b0f8028 | [] | no_license | zuigehulu/AID1811 | 581c3c7a37df9fa928bc632e4891fc9bafe69201 | 10cab0869875290646a9e5d815ff159d0116990e | refs/heads/master | 2020-04-19T16:33:04.174841 | 2019-01-30T07:58:24 | 2019-01-30T07:58:24 | 168,307,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # 编写函数fun 基功能是计算下列多项式的和
# Sn = 1 + 1/1! + 1/2! + 1/3! + .... + 1/n!
# (建议用数学模块中的factorial)
# 求当n得20时 Sn的值
# 即:
# print(fun(20)) # 2.718281828...
import math
# def sumfun(n):
# Sn = 1
# for x in range(1,n+1):
# Sn += 1/math.factorial(x)
# return Sn
# print(sumfun(20))
def sumfun(n):
s = sum(map(lambda x :1/math.factorial(x),range(n+1)))
print(s)
sumfun(20) | [
"442315617@qq.com"
] | 442315617@qq.com |
1fcd20eceee08a20fa3e8e20698adae133f508da | 86cd22354f2431087c9b3ff06188f071afb3eb72 | /71.Simplify Path.py | e9e05e520df721762f6fc00d81f8232f8e74ae22 | [] | no_license | tlxxzj/leetcode | 0c072a74d7e61ef4700388122f2270e46c4ac22e | 06dbf4f5b505a6a41e0d93367eedd231b611a84b | refs/heads/master | 2023-08-31T11:04:34.585532 | 2023-08-31T08:25:51 | 2023-08-31T08:25:51 | 94,386,828 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
ret = []
for i in path.split('/'):
if i == '' or i == '.': continue
if i == '..':
if len(ret)>0: ret.pop()
else:
ret.append(i)
return '/' + '/'.join(ret)
| [
"tlxxzj@163.com"
] | tlxxzj@163.com |
6cdda0b14cf4b2da2cea3b94f55cf433cdb26ca6 | bdc68211250e7945bb6705f65c6ab3a3246fb2b6 | /src/util/tree/sequence_tokenize.py | 471e40aa03260adee5c2307c0e9fc3ea67cfe88f | [] | no_license | dannykh/PCFG_mmn13_openu | 83aba350c6e23c1fc8eb1d898abe22e39f447a84 | d5d02666807fed3c71718f0833ad3769d31c8231 | refs/heads/master | 2020-06-16T18:56:18.717872 | 2019-07-09T22:27:58 | 2019-07-09T22:35:01 | 195,670,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | def consume_tag(sequence, pos):
tag = ''
while sequence[pos] != ' ' and sequence[pos] != ')' and pos < len(sequence):
tag += sequence[pos]
pos += 1
return tag, pos
def tokenize(sequence):
''' tokenizes the bracketed notation '''
tokens = []
pos = 0
while pos < len(sequence):
current = sequence[pos]
if current == '(' or current == ')':
tokens.append(current)
pos += 1
elif current == ' ':
pos += 1
else:
tag, pos = consume_tag(sequence, pos)
tokens.append(tag)
return tokens
| [
"danny.kh93@gmail.com"
] | danny.kh93@gmail.com |
04994d6cae6c4f70af3f4f30afa2278ef9092dfa | b38d648c04d766bc87b650aae9ec63b0c55e523b | /chapter2/k_bandit.py | 33ac126a53bda17dd28c6f2bb4c77205d11a06fc | [] | no_license | MLasserre/reinforcement-learning | 6346ccf39fb39d69b295246abcb1f3659ed310b9 | 4817c3c0ee1e0bcc43cc0e66d8e1601b4b5ae963 | refs/heads/master | 2022-12-17T06:04:09.550224 | 2020-09-25T14:09:22 | 2020-09-25T14:09:22 | 291,924,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,719 | py | import numpy as np
import matplotlib as mpl
import matplotlib.ticker as mtick
import matplotlib.pyplot as plt
mpl.rc('text', usetex=True)
mpl.rc('font', family='serif')
# Functions initializing the true action values
def normal_initialization(k, mu=0., sigma=1.):
return np.random.normal(mu, sigma, size=k)
def constant_initialization(k, c=0.):
return np.full(k,c)
# Function controlling the time evolution of true action values
# in a non-stationary k-bandit
def GRW_evolution(q, sigma):
return q + np.random.normal(scale=sigma, size=len(q))
class bandit:
def __init__(self, k, q, update_function=lambda x: x):
self.__k = k
self.__q = q
self.__ba = np.argmax(self.__q) # Best action
self.__uf = update_function
def computeReward(self, A):
# The action must be an integer
# such that 0 <= A <= k - 1
if (A < 0) or (A >= k):
return # Add exception
reward = np.random.normal(loc=self.__q[A])
self.__q = self.__uf(self.__q)
self.__updateBestAction()
return reward
def getBestAction(self):
return self.__ba
def getNActions(self):
return self.__k
def __updateBestAction(self):
return np.argmax(self.__q)
class epsilon_greedy:
def __init__(self, bandit, epsilon=0.1, n_step=1000,
update_function=lambda N: N+1, debug=False):
self.__epsilon = epsilon
self.__k = bandit.getNActions()
self.__n_step = n_step
self.__uf = update_function
self.__Q = np.zeros(k)
self.__N = np.zeros(k)
self.__bandit = bandit
self.__debug = debug
if self.__debug:
self.__opt_act_taken = []
self.__list_rewards = []
def __selectAction(self):
is_greedy = np.random.uniform() > self.__epsilon
if is_greedy:
return self.__exploitation()
else:
return self.__exploration()
def __exploitation(self):
return np.argmax(self.__Q) # Would be better with random
def __exploration(self):
return np.random.randint(0, k)
def __updateValues(self, action, reward):
self.__N[action] = self.__uf(self.__N[action])
self.__Q[action] += (reward - self.__Q[action]) / self.__N[action]
def getInfo(self):
if self.__debug:
return (self.__opt_act_taken, self.__list_rewards)
else:
return # Add exception
def learn(self):
for t in range(self.__n_step):
action = self.__selectAction()
reward = self.__bandit.computeReward(action)
self.__updateValues(action, reward)
if self.__debug:
self.__opt_act_taken.append(action == self.__bandit.getBestAction())
self.__list_rewards.append(reward)
if __name__ == "__main__":
save = True
k = 10
n_run = 2000
n_time = 1000
epsilons = [0., 0.1, 0.01]
results = {}
oa = '\% Optimal action'
ar = 'Average reward'
for epsilon in epsilons:
print("Epsilon: ", epsilon)
runs = {oa:[], ar:[]}
for i in range(n_run):
#q = constant_initialization(k) # True values
#B = bandit(k, q, lambda q: GRW_evolution(q, 0.01))
#learner = epsilon_greedy(B, epsilon, n_time,
# lambda N: 10, debug=True)
q = normal_initialization(k) # True values
B = bandit(k, q)
learner = epsilon_greedy(B, epsilon, n_time, debug=True)
learner.learn()
infos = learner.getInfo()
runs[oa].append(infos[0])
runs[ar].append(infos[1])
runs[oa] = np.array(runs[oa]).mean(axis=0)*100
runs[ar] = np.array(runs[ar]).mean(axis=0)
results[epsilon] = runs
fig, ax = plt.subplots()
for epsilon in results:
ax.plot(list(range(n_time)), results[epsilon][ar],
label='$\epsilon = ' + str(epsilon) + '$', linewidth=1)
ax.set_ylabel(ar)
ax.set_xlim([0,n_time])
ax.set_xlabel('Steps')
ax.legend()
if save:
fig.savefig("average_reward.pdf", transparent=True)
else:
plt.show()
fig, ax= plt.subplots()
for epsilon in results:
ax.plot(list(range(n_time)), results[epsilon][oa],
label='$\epsilon = ' + str(epsilon) + '$', linewidth=1)
ax.set_ylabel(oa)
ax.set_xlabel('Steps')
ax.set_xlim([0,n_time])
ax.set_ylim([0,100])
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.legend()
if save:
fig.savefig("optimal_action.pdf", transparent=True)
else:
plt.show()
| [
"marvin.lasserre@lip6.fr"
] | marvin.lasserre@lip6.fr |
874f4efd56ce1004c56e15fdb3c4e2f34e10535b | 6bd89a5be0e2052d987894c9c490b0cf4a483313 | /Server/classLibrary/CreditDataClass.py | 716a45acddca1cd2980ff0588883938a207f6840 | [] | no_license | a0935210570602/credit_card_analize_project | c3a833174a13d5653d326e3158e5b9d580cfe255 | 46b8d14c0f92c3e289e16e04baab1a03bd461077 | refs/heads/master | 2023-01-24T06:00:22.205642 | 2020-11-16T07:28:18 | 2020-11-16T07:28:18 | 286,956,997 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,991 | py | class CreditData():
def __init__(self, datalist):
self.datalist = datalist
self.resultlist = []
self.result = []
self.flag = list(range(0,self.datalist.count()))
for count in range (0,self.datalist.count()):
self.resultlist.append({})
for key in self.datalist[0]:
self.resultlist[count][key] = self.datalist[count][key]
def recommendation(self,weight):
self.card_comperation = []
for count in range (0,len(self.resultlist)):
sum = 0
for key in weight:
sum += weight[key] * self.datalist[count][key]
self.card_comperation.append(sum)
def getResultInformation(self, number):
for i in range (0,len(self.resultlist)-1):
for j in range (i+1, len(self.resultlist)):
if self.card_comperation[j] > self.card_comperation[i]:
self.card_comperation[j], self.card_comperation[i] = self.card_comperation[i], self.card_comperation[j]
self.flag[j],self.flag[i] = self.flag[i],self.flag[j]
for i in range(0, number):
self.result.append(self.datalist[self.flag[i]])
return self.result
def createData(self):
for key in self.resultlist[0]:
try:
number = self.resultlist[0][key]+10
except Exception as e:
continue
max_number = self.findMax(key)
self.optimization(max_number,key)
def optimization(self,divisor,key):
for count in range (0 ,len(self.resultlist)):
try:
self.resultlist[count][key] /= divisor
except Exception as e:
print("zero can't be divided")
def findMax(self, key):
max = 0
for count in range (0, len (self.resultlist)):
if self.resultlist[count][key] > max :
max = self.resultlist[count][key]
return max | [
"t106820003@ntut.edu.tw"
] | t106820003@ntut.edu.tw |
dd713f3a180a0c82f82e9b6a9e9358a8c8649ab4 | f4d78406cda8cb7e8232873dfd4d735763a36f07 | /result/migrations/0004_auto_20170331_2017.py | 5ac9fa86e89e90bb810b3663b4339951b7cc5e5d | [] | no_license | pratulyab/ipu-results-bot | 722b646a098f95e21bb12a47bcaff69d7e8a034a | aa000f28cad79ad49d14547203877247fae7327d | refs/heads/master | 2022-07-13T18:03:39.107959 | 2019-06-24T20:45:56 | 2019-06-24T20:45:56 | 193,555,061 | 0 | 0 | null | 2022-06-21T22:12:19 | 2019-06-24T18:05:45 | Python | UTF-8 | Python | false | false | 550 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-31 14:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0003_auto_20170331_1806'),
]
operations = [
migrations.RemoveField(
model_name='score',
name='verdict',
),
migrations.AddField(
model_name='score',
name='passed',
field=models.BooleanField(default=True),
),
]
| [
"pratulyabubna@outlook.com"
] | pratulyabubna@outlook.com |
cd6c93b19fecb396cb0458d2561de26a3b8b110a | f40ad51a600e64f12710fc4847c356a35cd0a3d2 | /S08/oop/geometry.py | 69181f8bd1a3113ef4a969527338183d111a722a | [] | no_license | pymft/py-mft1 | 0aa1b854ea80e17e18c0eacc6f4dc7428a71af39 | f4657fe17e56b6f54bdc8b1076edfc388b85cb05 | refs/heads/master | 2020-05-09T09:32:59.020361 | 2019-07-05T13:59:19 | 2019-07-05T13:59:19 | 181,006,072 | 1 | 5 | null | 2019-05-03T20:06:03 | 2019-04-12T12:42:38 | Python | UTF-8 | Python | false | false | 940 | py | import math
class Parallelogram:
def __init__(self, a, b, angle):
print("paralleogram", self.__class__)
self.a = a
self.b = b
self.angle = angle
@property
def area(self):
return self.a * self.b * math.sin(math.radians(self.angle))
@property
def perimeter(self):
return (self.a + self.b) * 2
class Diamond(Parallelogram):
def __init__(self, a, angle):
print("diamond")
super().__init__(a, a, angle)
class Rectangle(Parallelogram):
def __init__(self, w, h):
print("rect")
super().__init__(w, h, 90)
#
# class Square(Rectangle):
# def __init__(self, a):
# super().__init__(a, a)
class Square(Diamond):
def __init__(self, a):
print("square")
super().__init__(a, 90)
#
# r = Rectangle(10, 4)
# print(r.area, r.perimeter)
s = Diamond(7, 45)
print(s.area, s.perimeter)
#
# print(s, hex(id(s)))
| [
"naeini.v@gmail.com"
] | naeini.v@gmail.com |
e1797abbb517a5b0d9e49e93536eb28f286dff74 | a214e706c875e0af7221c0c9ae193d9d93ee20a7 | /merge_pedmap.py | a521b00da550343f20052204c786390bad354afb | [] | no_license | inambioinfo/bioinformatics_scripts | fa2292e91ad4134204a09ace27c8a91ae70fa34c | 3a23611f382b7f3dd60e5e2abe841b84408c0d44 | refs/heads/master | 2020-03-20T21:17:10.163061 | 2017-03-28T23:41:39 | 2017-03-28T23:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import os
__author__ = "Raony Guimarães"
__copyright__ = "Copyright 2012, Filter Analysis"
__credits__ = ["Raony Guimarães"]
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Raony Guimarães"
__email__ = "raonyguimaraes@gmail.com"
__status__ = "Production"
#run example
#python gatk.py -i alignment/exome.sorted.bam
parser = OptionParser()
parser.add_option("-p", dest="p1",
help="PED File", metavar="pedfile")
parser.add_option("-q", dest="p2",
help="PED File", metavar="pedfile")
parser.add_option("-o", dest="outfile",
help="PED File", metavar="pedfile")
(options, args) = parser.parse_args()
f1 = ".".join(options.p1.split("/")[-1].split(".")[:-1])
f1 = options.p1.replace('.ped','')
f2 = ".".join(options.p2.split("/")[-1].split(".")[:-1])
f2 = options.p2.replace('.ped','')
outfile = options.outfile
plink_dir = '/projects/relatedness/plink-1.07-x86_64'
#first identify the ones to remove
command = '%s/plink --file %s --merge %s.ped %s.map --recode --out %s --noweb --geno 0' % (plink_dir, f1, f2, f2, outfile)
os.system(command)
#commando remove snps
command = 'mv %s.missnp removesnps' % (outfile)
os.system(command)
print 'remove snps in file one'
command = '%s/plink --file %s --recode --out %s.snpsless --noweb --exclude removesnps' % (plink_dir, f1, f1)
os.system(command)
print 'remove snps in file two'
command = '%s/plink --file %s --recode --out %s.snpsless --noweb --exclude removesnps' % (plink_dir, f2, f2)
os.system(command)
print 'finally merge'
command = '%s/plink --file %s.snpsless --merge %s.snpsless.ped %s.snpsless.map --recode --out %s --noweb --geno 0' % (plink_dir, f1, f2, f2, options.outfile)
os.system(command)
| [
"raonyguimaraes@gmail.com"
] | raonyguimaraes@gmail.com |
b5d478ae0c2d08d401203a1943d48fbf5d4a2357 | 7e46b27029a2ef5c04dca00d1f631c16f59d03e0 | /class_tree/class_tree/wsgi.py | 3e2d7d6351c027444cb013150d1bb137e7e13dd5 | [] | no_license | MCGallaspy/class_tree_proof | 4141454e0154985548fc63297a11a930953176b8 | eef71e950e2a7663ccc0924acd8f0cf288b90fec | refs/heads/master | 2021-01-10T17:00:53.345047 | 2016-02-03T22:37:25 | 2016-02-03T22:37:25 | 50,004,234 | 0 | 1 | null | 2021-10-11T22:55:40 | 2016-01-20T04:26:08 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for class_tree project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "class_tree.settings")
application = get_wsgi_application()
| [
"gallaspy.michael@gmail.com"
] | gallaspy.michael@gmail.com |
836236628e6676f74198897e9bb09b26ef6e8926 | 37b0de1e37bc313ad5c4735d288f0f2ccc6bca88 | /tests/test_paired.py | 9cf3c361cabf9ee88f9f5d935c87a3044e68aeb9 | [
"MIT"
] | permissive | kingdynasty/cutadapt | a65b46eb192fbff00ab404324d5960f1ab22cb79 | 49aa33ac46c5183a39acddbe85d58103ff7eecb8 | refs/heads/master | 2020-03-28T14:04:13.798989 | 2018-09-10T09:21:48 | 2018-09-10T09:21:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,702 | py | import os.path
import shutil
import tempfile
from itertools import product
import pytest
from cutadapt.__main__ import main
from utils import run, assert_files_equal, datapath, cutpath, redirect_stderr, temporary_path
@pytest.fixture(params=[1, 2])
def cores(request):
return request.param
def run_paired(params, in1, in2, expected1, expected2, cores):
if type(params) is str:
params = params.split()
params += ['--cores', str(cores), '--buffer-size=512']
with temporary_path('tmp1-' + expected1) as p1:
with temporary_path('tmp2-' + expected2) as p2:
params += ['-o', p1, '-p', p2]
params += [datapath(in1), datapath(in2)]
assert main(params) is None
assert_files_equal(cutpath(expected1), p1)
assert_files_equal(cutpath(expected2), p2)
def run_interleaved(params, inpath1, inpath2=None, expected1=None, expected2=None, cores=1):
"""
Interleaved input or output (or both)
"""
assert not (inpath1 and inpath2 and expected1 and expected2)
assert not (expected2 and not expected1)
assert not (inpath2 and not inpath1)
if type(params) is str:
params = params.split()
params += ['--interleaved', '--cores', str(cores), '--buffer-size=512']
with temporary_path('tmp1-' + expected1) as tmp1:
params += ['-o', tmp1]
paths = [datapath(inpath1)]
if inpath2:
paths += [datapath(inpath2)]
if expected2:
with temporary_path('tmp2-' + expected2) as tmp2:
params += ['-p', tmp2]
assert main(params + paths) is None
assert_files_equal(cutpath(expected2), tmp2)
else:
assert main(params + paths) is None
assert_files_equal(cutpath(expected1), tmp1)
def test_paired_separate():
"""test separate trimming of paired-end reads"""
run('-a TTAGACATAT', 'paired-separate.1.fastq', 'paired.1.fastq')
run('-a CAGTGGAGTA', 'paired-separate.2.fastq', 'paired.2.fastq')
def test_paired_end_legacy(cores):
"""--paired-output, not using -A/-B/-G"""
# the -m 14 filters out one read, which should then also be filtered out in the second output file
# -q 10 should not change anything: qualities in file 1 are high enough,
# qualities in file 2 should not be inspected.
run_paired(
'-a TTAGACATAT -m 14 -q 10',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired.m14.1.fastq', expected2='paired.m14.2.fastq',
cores=cores
)
def test_untrimmed_paired_output():
with temporary_path("tmp-untrimmed.1.fastq") as untrimmed1:
with temporary_path("tmp-untrimmed.2.fastq") as untrimmed2:
run_paired(
['-a', 'TTAGACATAT',
'--untrimmed-output', untrimmed1,
'--untrimmed-paired-output', untrimmed2],
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-trimmed.1.fastq', expected2='paired-trimmed.2.fastq',
cores=1
)
assert_files_equal(cutpath('paired-untrimmed.1.fastq'), untrimmed1)
assert_files_equal(cutpath('paired-untrimmed.2.fastq'), untrimmed2)
def test_explicit_format_with_paired():
# Use --format=fastq with input files whose extension is .txt
with temporary_path("paired.1.txt") as txt1:
with temporary_path("paired.2.txt") as txt2:
shutil.copyfile(datapath("paired.1.fastq"), txt1)
shutil.copyfile(datapath("paired.2.fastq"), txt2)
run_paired(
'--format=fastq -a TTAGACATAT -m 14',
in1=txt1, in2=txt2,
expected1='paired.m14.1.fastq',
expected2='paired.m14.2.fastq',
cores=1
)
def test_no_trimming_legacy():
# make sure that this doesn't divide by zero
main([
'-a', 'XXXXX', '-o', '/dev/null', '-p', '/dev/null',
datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_no_trimming():
# make sure that this doesn't divide by zero
main([
'-a', 'XXXXX', '-A', 'XXXXX', '-o', '/dev/null', '-p', '/dev/null',
datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_missing_file():
with pytest.raises(SystemExit):
with redirect_stderr():
main(['-a', 'XX', '--paired-output', 'out.fastq', datapath('paired.1.fastq')])
def test_first_too_short(cores):
with pytest.raises(SystemExit):
with temporary_path("truncated.1.fastq") as trunc1:
# Create a truncated file in which the last read is missing
with open(datapath('paired.1.fastq')) as f:
lines = f.readlines()
lines = lines[:-4]
with open(trunc1, 'w') as f:
f.writelines(lines)
with redirect_stderr():
main(
'-a XX -o /dev/null --paired-output out.fastq'.split()
+ ['--cores', str(cores)]
+ [trunc1, datapath('paired.2.fastq')]
)
def test_second_too_short(cores):
with pytest.raises(SystemExit):
with temporary_path("truncated.2.fastq") as trunc2:
# Create a truncated file in which the last read is missing
with open(datapath('paired.2.fastq')) as f:
lines = f.readlines()
lines = lines[:-4]
with open(trunc2, 'w') as f:
f.writelines(lines)
with redirect_stderr():
main('-a XX -o /dev/null --paired-output out.fastq'.split()
+ ['--cores', str(cores)]
+ [datapath('paired.1.fastq'), trunc2])
def test_unmatched_read_names(cores):
with pytest.raises(SystemExit):
with temporary_path("swapped.1.fastq") as swapped:
# Create a file in which reads 2 and 1 are swapped
with open(datapath('paired.1.fastq')) as f:
lines = f.readlines()
lines = lines[0:4] + lines[8:12] + lines[4:8] + lines[12:]
with open(swapped, 'w') as f:
f.writelines(lines)
with redirect_stderr():
main('-a XX -o out1.fastq --paired-output out2.fastq'.split()
+ ['--cores', str(cores)]
+ [swapped, datapath('paired.2.fastq')])
def test_p_without_o(cores):
"""Option -p given but -o missing"""
with pytest.raises(SystemExit):
main('-a XX -p /dev/null'.split()
+ ['--cores', str(cores)]
+ [datapath('paired.1.fastq'), datapath('paired.2.fastq')])
def test_paired_but_only_one_input_file(cores):
"""Option -p given but only one input file"""
with pytest.raises(SystemExit):
main('-a XX -o /dev/null -p /dev/null'.split()
+ ['--cores', str(cores)]
+ [datapath('paired.1.fastq')])
def test_legacy_minlength(cores):
"""Ensure -m is not applied to second read in a pair in legacy mode"""
run_paired(
'-a XXX -m 27',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-m27.1.fastq', expected2='paired-m27.2.fastq',
cores=cores
)
def test_paired_end(cores):
"""single-pass paired-end with -m"""
run_paired(
'-a TTAGACATAT -A CAGTGGAGTA -m 14',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired.1.fastq', expected2='paired.2.fastq',
cores=cores
)
def test_paired_anchored_back_no_indels():
run_paired(
'-a BACKADAPTER$ -A BACKADAPTER$ -N --no-indels',
in1='anchored-back.fasta', in2='anchored-back.fasta',
expected1='anchored-back.fasta', expected2="anchored-back.fasta",
cores=1
)
def test_paired_end_qualtrim(cores):
"""single-pass paired-end with -q and -m"""
run_paired(
'-q 20 -a TTAGACATAT -A CAGTGGAGTA -m 14 -M 90',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='pairedq.1.fastq', expected2='pairedq.2.fastq',
cores=cores
)
def test_paired_end_qualtrim_swapped(cores):
"""single-pass paired-end with -q and -m, but files swapped"""
run_paired(
'-q 20 -a CAGTGGAGTA -A TTAGACATAT -m 14',
in1='paired.2.fastq', in2='paired.1.fastq',
expected1='pairedq.2.fastq', expected2='pairedq.1.fastq',
cores=cores
)
def test_paired_end_cut(cores):
run_paired(
'-u 3 -u -1 -U 4 -U -2',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='pairedu.1.fastq', expected2='pairedu.2.fastq',
cores=cores
)
def test_paired_end_upper_a_only(cores):
run_paired(
'-A CAGTGGAGTA',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-onlyA.1.fastq', expected2='paired-onlyA.2.fastq',
cores=cores
)
def test_discard_untrimmed(cores):
# issue #146
# the first adapter is a sequence cut out from the first read
run_paired(
'-a CTCCAGCTTAGACATATC -A XXXXXXXX --discard-untrimmed',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='empty.fastq', expected2='empty.fastq',
cores=cores
)
def test_discard_trimmed(cores):
run_paired(
'-A C -O 1 --discard-trimmed', # applies everywhere
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='empty.fastq', expected2='empty.fastq',
cores=cores
)
def test_interleaved_in_and_out(cores):
"""Single-pass interleaved paired-end with -q and -m"""
run_interleaved(
'-q 20 -a TTAGACATAT -A CAGTGGAGTA -m 14 -M 90',
inpath1='interleaved.fastq', expected1='interleaved.fastq',
cores=cores
)
def test_interleaved_in(cores):
"""Interleaved input, two files output"""
run_interleaved(
'-q 20 -a TTAGACATAT -A CAGTGGAGTA -m 14 -M 90',
inpath1='interleaved.fastq',
expected1='pairedq.1.fastq', expected2='pairedq.2.fastq',
cores=cores
)
def test_interleaved_out(cores):
"""Two files input, interleaved output"""
run_interleaved(
'-q 20 -a TTAGACATAT -A CAGTGGAGTA -m 14 -M 90',
inpath1='paired.1.fastq', inpath2='paired.2.fastq',
expected1='interleaved.fastq',
cores=cores
)
def test_interleaved_neither_nor():
"""Option --interleaved used, but pairs of files given for input and output"""
with temporary_path("temp-paired.1.fastq") as p1:
with temporary_path("temp-paired.2.fastq") as p2:
params = '-a XX --interleaved'.split()
with redirect_stderr():
params += ['-o', p1, '-p1', p2, 'paired.1.fastq', 'paired.2.fastq']
with pytest.raises(SystemExit):
main(params)
def test_pair_filter_both(cores):
run_paired(
'--pair-filter=both -a TTAGACATAT -A GGAGTA -m 14',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-filterboth.1.fastq', expected2='paired-filterboth.2.fastq',
cores=cores
)
def test_pair_filter_first(cores):
run_paired(
'--pair-filter=first -a TTAGACATAT -A GGAGTA -m 14',
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-filterfirst.1.fastq', expected2='paired-filterfirst.2.fastq',
cores=cores
)
def test_too_short_paired_output():
with temporary_path("temp-too-short.1.fastq") as p1:
with temporary_path("temp-too-short.2.fastq") as p2:
run_paired(
'-a TTAGACATAT -A CAGTGGAGTA -m 14 --too-short-output '
'{0} --too-short-paired-output {1}'.format(p1, p2),
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired.1.fastq', expected2='paired.2.fastq',
cores=1
)
assert_files_equal(cutpath('paired-too-short.1.fastq'), p1)
assert_files_equal(cutpath('paired-too-short.2.fastq'), p2)
def test_too_long_output():
with temporary_path('temp-too-long.1.fastq') as p1:
with temporary_path('temp-too-long.2.fastq') as p2:
run_paired(
'-a TTAGACATAT -A CAGTGGAGTA -M 14 --too-long-output '
'{0} --too-long-paired-output {1}'.format(p1, p2),
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired-too-short.1.fastq', expected2='paired-too-short.2.fastq',
cores=1
)
assert_files_equal(cutpath('paired.1.fastq'), p1)
assert_files_equal(cutpath('paired.2.fastq'), p2)
def test_too_short_output_paired_option_missing():
with temporary_path('temp-too-short.1.fastq') as p1:
with pytest.raises(SystemExit):
run_paired(
'-a TTAGACATAT -A CAGTGGAGTA -m 14 --too-short-output '
'{0}'.format(p1),
in1='paired.1.fastq', in2='paired.2.fastq',
expected1='paired.1.fastq', expected2='paired.2.fastq',
cores=1
)
def test_nextseq_paired(cores):
run_paired('--nextseq-trim 22', in1='nextseq.fastq', in2='nextseq.fastq',
expected1='nextseq.fastq', expected2='nextseq.fastq',
cores=cores)
def test_paired_demultiplex():
tempdir = tempfile.mkdtemp(prefix='cutadapt-tests.')
multiout1 = os.path.join(tempdir, 'demultiplexed.{name}.1.fastq')
multiout2 = os.path.join(tempdir, 'demultiplexed.{name}.2.fastq')
params = [
'-a', 'first=AACATTAGACA', '-a', 'second=CATTAGACATATCGG',
'-A', 'ignored=CAGTGGAGTA', '-A', 'alsoignored=AATAACAGTGGAGTA',
'-o', multiout1, '-p', multiout2,
datapath('paired.1.fastq'), datapath('paired.2.fastq')]
assert main(params) is None
assert_files_equal(cutpath('demultiplexed.first.1.fastq'), multiout1.format(name='first'))
assert_files_equal(cutpath('demultiplexed.second.1.fastq'), multiout1.format(name='second'))
assert_files_equal(cutpath('demultiplexed.unknown.1.fastq'), multiout1.format(name='unknown'))
assert_files_equal(cutpath('demultiplexed.first.2.fastq'), multiout2.format(name='first'))
assert_files_equal(cutpath('demultiplexed.second.2.fastq'), multiout2.format(name='second'))
assert_files_equal(cutpath('demultiplexed.unknown.2.fastq'), multiout2.format(name='unknown'))
shutil.rmtree(tempdir)
@pytest.mark.parametrize('name_op,l1,l2,m', list(product(
(('m', lambda x, y: x >= y), ('M', lambda x, y: x <= y)),
range(1, 5),
range(1, 5),
[(2, 3), (2, None), (None, 3)]
)))
def test_separate_minmaxlength(tmpdir, name_op, l1, l2, m):
"""Separate minimum lengths for R1 and R2"""
m1, m2 = m
name, func = name_op
inpath = str(tmpdir.join('separate_minlength.fasta'))
expected = str(tmpdir.join('separate_minlength_expected.fasta'))
outpath = str(tmpdir.join('out.fasta'))
record = '>r{}:{}\n{}\n'.format(l1, l2, 'A' * l1)
record += '>r{}:{}\n{}'.format(l1, l2, 'A' * l2)
with open(inpath, 'w') as f:
print(record, file=f)
with open(expected, 'w') as f:
if (m1 is None or func(l1, m1)) and (m2 is None or func(l2, m2)):
print(record, file=f)
assert os.path.exists(inpath)
assert os.path.exists(expected)
if m1 is None:
m1 = ''
if m2 is None:
m2 = ''
main(['--interleaved', '-o', outpath, '-' + name, '{}:{}'.format(m1, m2), inpath])
assert_files_equal(expected, outpath)
def test_separate_minlength_single():
"""Using separate minlengths for single-end data"""
with pytest.raises(SystemExit):
main(['-m', '5:7', datapath('small.fastq')])
| [
"marcel.martin@scilifelab.se"
] | marcel.martin@scilifelab.se |
a950a7a778b32ddbcd98fa1c1ccb038943d402eb | 499df219e4698a9f7c1f507fb197bcd9c1d70750 | /wiki_data_extraction.py | 8d919722ae1a62071fffab44ca02ae4054c17f6c | [] | no_license | relwell/nlp-rest-client | 020c2e75f8ca0c475d0edf9611f1a9a77bfa244e | 3f8e8e6dd9db31d8c1f97737fcdffe4cc494e6ec | refs/heads/master | 2020-04-12T21:02:58.160323 | 2013-10-03T18:26:33 | 2013-10-03T18:26:33 | 12,444,248 | 0 | 0 | null | 2013-09-23T16:41:59 | 2013-08-28T20:49:19 | Python | UTF-8 | Python | false | false | 1,170 | py | import requests
import json
import traceback
import sys
from boto import connect_s3
from boto.s3.prefix import Prefix
from multiprocessing import Pool
from nlp_client.services import TopEntitiesService, EntityDocumentCountsService, TopHeadsService
from nlp_client.caching import useCaching
useCaching(writeOnly=True)
def callServices(wid):
print "Working on", wid
try:
print wid, TopEntitiesService().nestedGet(wid)
print wid, EntityDocumentCountsService().nestedGet(wid)
print wid, TopHeadsService().nestedGet(wid)
print "Finished with", wid
return 1
except:
print "Problem with", wid
exc_type, exc_value, exc_traceback = sys.exc_info()
print "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
"""
bucketList = connect_s3().bucket('nlp-data').list(prefix='xml/', delimiter='/')
pool = Pool(processes=4)
result = pool.map(sendToWiki, bucketList)
"""
while True:
wids = [prefix.name.split('/')[-2] for prefix in connect_s3().get_bucket('nlp-data').list(prefix='xml/', delimiter='/') if isinstance(prefix, Prefix)]
Pool(processes=3).map(callServices, wids)
| [
"robert@wikia-inc.com"
] | robert@wikia-inc.com |
572b84a3f569162ee860e6f7b20ac524c04a19b9 | 6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5 | /katas/kyu_7/linked_lists_get_nth_node.py | 1f2b39f86f418fb40df8cc42b845bc21a735c961 | [
"MIT"
] | permissive | mveselov/CodeWars | e4259194bfa018299906f42cd02b8ef4e5ab6caa | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | refs/heads/master | 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | class Node(object):
def __init__(self, data):
self.data = data
self.next = None
def get_nth(node, index):
current = node
dex = -1
while current is not None:
dex += 1
if dex == index:
return current
current = current.next
raise Exception
| [
"the-zebulan@users.noreply.github.com"
] | the-zebulan@users.noreply.github.com |
3103c090ae865762051578e4e5a1c6dd75ac7c27 | 4f96a22dc45d85047b36b270142a375c3e485587 | /flossColor.py | 3d2485e8b54d0b3da94f28de002764645b9b91a2 | [] | no_license | rickbanghart/PythonDev | 2e530f8f4ac8950422b103a5daf05079c1f1b936 | a81b6288d41348f7c4c6b0277fe39a48e82a229d | refs/heads/master | 2021-01-11T14:54:51.140472 | 2017-01-30T01:02:24 | 2017-01-30T01:02:24 | 80,249,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import mysql.connector
import csv
from Tkinter import Tk, Frame, BOTH
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Simple")
self.pack(fill=BOTH, expand=1)
def update_color(red, green, blue, dmc):
query = "INSERT INTO colors (red, green, blue, dmc) VALUES (%s,%s,%s,%s)"
data = (red, green, blue,dmc)
cursor.execute(query,data)
cnx.commit()
class MyDialect(csv.Dialect):
strict = True
skipinitialspace = True
quoting = csv.QUOTE_ALL
quotechar = '"'
delimiter = ','
lineterminator = '\n'
cnx = mysql.connector.connect(user='root', password='',
host='127.0.0.1',
database='test')
cursor = cnx.cursor()
fname = 'DMCFlossRGB.csv'
root = Tk()
root.geometry("250x150+300+300")
app = Example(root)
root.mainloop()
#fh = open(fname,'r')
my_list = []
all_text = '';
with open(fname,'r') as fh:
reader = csv.reader(fh)
for row in reader:
print row[0]
cnx.close() | [
"banghart@msu.edu"
] | banghart@msu.edu |
d7f63dcc0bc4be0be92e1b193db9abad6b55f611 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/microsoft_intune/icon_microsoft_intune/actions/get_managed_apps/schema.py | 950f7ebf08d7248b32cb5c69cb6007c0c35c5b04 | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 4,712 | py | # GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Returns InTune manageable apps"
class Input:
APP = "app"
class Output:
MANAGED_APPS = "managed_apps"
class GetManagedAppsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"app": {
"type": "string",
"title": "App",
"description": "Application ID or name, if empty returns all applications",
"order": 1
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetManagedAppsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"managed_apps": {
"type": "array",
"title": "Managed Apps",
"description": "Application details",
"items": {
"$ref": "#/definitions/value"
},
"order": 1
}
},
"definitions": {
"value": {
"type": "object",
"title": "value",
"properties": {
"@odata.context": {
"type": "string",
"title": "Odata Context",
"description": "Odata context",
"order": 2
},
"@odata.type": {
"type": "string",
"title": "Odata Type",
"description": "Odata type",
"order": 1
},
"appAvailability": {
"type": "string",
"title": "App Availability",
"description": "App availability",
"order": 17
},
"appStoreUrl": {
"type": "string",
"title": "App Store URL",
"description": "App store URL",
"order": 20
},
"createdDateTime": {
"type": "string",
"title": "Created Datetime",
"description": "Created datetime",
"order": 8
},
"description": {
"type": "string",
"title": "Description",
"description": "Description",
"order": 5
},
"developer": {
"type": "string",
"title": "Developer",
"description": "Developer",
"order": 14
},
"displayName": {
"type": "string",
"title": "Display Name",
"description": "Display Name",
"order": 4
},
"id": {
"type": "string",
"title": "ID",
"description": "ID",
"order": 3
},
"informationUrl": {
"type": "string",
"title": "Information URL",
"description": "Information URL",
"order": 12
},
"isFeatured": {
"type": "boolean",
"title": "Is Featured",
"description": "Is featured",
"order": 10
},
"largeIcon": {
"type": "object",
"title": "Large Icon",
"description": "Large icon",
"order": 7
},
"lastModifiedDateTime": {
"type": "string",
"title": "Last Modified Datetime",
"description": "Last modified datetime",
"order": 9
},
"minimumSupportedOperatingSystem": {
"type": "object",
"title": "Minimum Supported Operating System",
"description": "Minimum supported operating system",
"order": 21
},
"notes": {
"type": "string",
"title": "Notes",
"description": "Notes",
"order": 15
},
"owner": {
"type": "string",
"title": "Owner",
"description": "Owner",
"order": 13
},
"packageId": {
"type": "string",
"title": "Package ID",
"description": "Package ID",
"order": 19
},
"privacyInformationUrl": {
"type": "string",
"title": "Privacy Information URL",
"description": "Privacy information URL",
"order": 11
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Publisher",
"order": 6
},
"publishingState": {
"type": "string",
"title": "Publishing State",
"description": "Publishing state",
"order": 16
},
"version": {
"type": "string",
"title": "Version",
"description": "Version",
"order": 18
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"noreply@github.com"
] | noreply@github.com |
92ec5116f94e22513a33ddba3fce5e3a11905699 | 52ceddf5260bb96bbf34c1057c7973cfc1a265d9 | /auctions/migrations/0002_auctionlistings.py | 8d5cec639a04af032dccc704592dee2024d34e55 | [] | no_license | soumyadeepm04/Project2 | 607ba4601abd7487c779d4187950ef581c4832f7 | 8ab40befc12555470ba0255f86b75c76672e7ba8 | refs/heads/master | 2023-06-01T05:37:27.855994 | 2021-06-17T20:08:44 | 2021-06-17T20:08:44 | 377,622,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | # Generated by Django 3.2.3 on 2021-06-12 02:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AuctionListings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('description', models.CharField(max_length=1000)),
('starting_bid', models.IntegerField()),
('urls', models.URLField(blank=True, null=True)),
],
),
]
| [
"bmahapatra@c02xk3umjgh5.myfiosgateway.com"
] | bmahapatra@c02xk3umjgh5.myfiosgateway.com |
56e49ec8b756e2762d4f46ee992731ee54be86f1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/379.py | 45af30e36a16a8b8f0a6a9536d9e5d1ddb753e2b | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py | def hasLine(squares, symbol):
for i in range(4):
if squares[i][0] == squares[i][1] == squares[i][2] == squares[i][3] == symbol:
return True
for i in range(4):
if squares[0][i] == squares[1][i] == squares[2][i] == squares[3][i] == symbol:
return True
if squares[0][0] == squares[1][1] == squares[2][2] == squares[3][3] == symbol:
return True
if squares[0][3] == squares[1][2] == squares[2][1] == squares[3][0] == symbol:
return True
return False
def hasEmpty(squares):
for i in range(4):
for j in range(4):
if squares[i][j] == '.':
return True
return False
file = open("A-large.in")
n = int(file.readline())
for case in range(n):
squares = [list(file.readline()) for i in range(4)]
file.readline()
print("Case #{:d}:".format(case+1)),
Tpos = None
for i in range(4):
if 'T' in squares[i]:
index = squares[i].index('T')
Tpos = (i, index)
break
if Tpos != None:
squares[Tpos[0]][Tpos[1]] = 'X'
if hasLine(squares, 'X'):
print("X won")
else:
if Tpos != None:
squares[Tpos[0]][Tpos[1]] = 'O'
if hasLine(squares, 'O'):
print("O won")
else:
if hasEmpty(squares):
print("Game has not completed")
else:
print("Draw")
file.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
a68b946bee03f149a3cf844c5ea961d9f4502911 | 6c0389947b6a6c69254955f3646bca8c2882ad41 | /app/static/lib/clasificador.py | 7e161b706e11519ca8248acece71cfd74f514cb1 | [] | no_license | Redoxfox/mysite | e9380e23164e170609decf14549f0c8e67e3d36b | 318684e538227286680edcd05935258d7e70de11 | refs/heads/master | 2022-06-28T12:44:41.560625 | 2022-06-06T04:26:44 | 2022-06-06T04:26:44 | 190,919,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,520 | py | def clasificar(liga, id_eq1, id_eq2):
import decimal
import pymysql.cursors
#from Ligas.lib.Bbinaria import binaria
#from Ligas.lib.Oquicksort import quicksort
from Bbinaria import binaria
from Oquicksort import quicksort
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='123456',
db='perfume',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
resultado = {}
resultado1 = {}
cursor = connection.cursor()
# Read a single record
url = "http://localhost:8000/actualizar/" + str(id) + "/" + str(liga) + "/"
sql1 = "SELECT nombre FROM equipos WHERE id= %s "
cursor.execute(sql1, (id_eq1))
nom_eq1 = cursor.fetchall()
sql2 = "SELECT nombre FROM equipos WHERE id= %s "
cursor.execute(sql2, (id_eq2))
nom_eq2 = cursor.fetchall()
sql3 = "select nro_fecha, equipo_1, equipo_2, gol_eq1, gol_eq2 from calendario where id_liga= %s and estado='JUGADO';"
cursor.execute(sql3, (liga))
goles_liga = cursor.fetchall()
id_e1 = int(id_eq1)
id_e2 = int(id_eq2)
historial = []
cuota_local = []
cuota_visitante = []
p_derrota = []
p_empate = []
p_victoria = []
derrota = []
empate = []
victoria = []
empate_eq1_local = 0
victoria_eq1_local = 0
derrota_eq1_local = 0
empate_eq1_visitante = 0
victoria_eq1_visitante = 0
derrota_eq1_visitante = 0
empate_eq2 = 0
victoria_eq2 = 0
derrota_eq2 = 0
empate_eq2_local = 0
victoria_eq2_local = 0
derrota_eq2_local = 0
empate_eq2_visitante = 0
victoria_eq2_visitante = 0
derrota_eq2_visitante = 0
for rows in goles_liga:
gol_eq1 = int(rows["gol_eq1"])
gol_eq2 = int(rows["gol_eq2"])
equipo_1 = int(rows["equipo_1"])
equipo_2 = int(rows["equipo_2"])
nro_equipos = len(historial)
empate_eq1 = 0
victoria_eq1 = 0
derrota_eq1 = 0
empate_eq2 = 0
victoria_eq2 = 0
derrota_eq2 = 0
Goles_favoreq1 = 0
Goles_favoreq2 = 0
Goles_contraeq1 = 0
Goles_contraeq2 = 0
puntos_eq1 = 0
puntos_eq2 = 0
if gol_eq1 == gol_eq2:
victoria_eq1 += 0
derrota_eq1 += 0
empate_eq1 += 1
victoria_eq2 += 0
derrota_eq2 += 0
empate_eq2 += 1
Goles_favoreq1 += gol_eq1
Goles_favoreq2 += gol_eq2
Goles_contraeq1 += gol_eq2
Goles_contraeq2 += gol_eq1
puntos_eq1 += 1
puntos_eq2 += 1
if id_e1 == equipo_1:
empate_eq1_local += 1
if id_e2 == equipo_2:
empate_eq2_visitante += 1
if id_e2 == equipo_1:
empate_eq2_local += 1
if id_e1 == equipo_2:
empate_eq1_visitante += 1
if gol_eq1 > gol_eq2:
victoria_eq1 += 1
derrota_eq1 += 0
empate_eq1 += 0
victoria_eq2 += 0
derrota_eq2 += 1
empate_eq2 += 0
Goles_favoreq1 += gol_eq1
Goles_favoreq2 += gol_eq2
Goles_contraeq1 += gol_eq2
Goles_contraeq2 += gol_eq1
puntos_eq1 += 3
puntos_eq2 += 0
if id_e1 == equipo_1:
victoria_eq1_local += 1
if id_e2 == equipo_2:
derrota_eq2_visitante += 1
if id_e2 == equipo_1:
derrota_eq2_local += 1
if id_e1 == equipo_2:
victoria_eq1_visitante += 1
if gol_eq1 < gol_eq2:
victoria_eq1 += 0
derrota_eq1 += 1
empate_eq1 += 0
victoria_eq2 += 1
derrota_eq2 += 0
empate_eq2 += 0
Goles_favoreq1 += gol_eq1
Goles_favoreq2 += gol_eq2
Goles_contraeq1 += gol_eq2
Goles_contraeq2 += gol_eq1
puntos_eq1 += 0
puntos_eq2 += 3
if id_e1 == equipo_1:
derrota_eq1_local += 1
if id_e2 == equipo_2:
victoria_eq2_visitante += 1
if id_e2 == equipo_1:
victoria_eq2_local += 1
if id_e1 == equipo_2:
derrota_eq1_visitante += 1
if nro_equipos == 0:
POS = len(historial) + 1
Nre_E = int(equipo_1)
PJ = 1
GF = Goles_favoreq1
GC = Goles_contraeq1
GD = Goles_favoreq1 - Goles_contraeq1
PG = victoria_eq1
PP = derrota_eq1
PE = empate_eq1
PTOS = puntos_eq1
historial.append([POS, Nre_E, PJ, GF, GC, GD, PG, PP, PE, PTOS])
POS = len(historial)
Nre_E = int(equipo_2)
PJ = 1
GF = Goles_favoreq2
GC = Goles_contraeq2
GD = Goles_favoreq2 - Goles_contraeq2
PG = victoria_eq2
PP = derrota_eq2
PE = empate_eq2
PTOS = puntos_eq2
historial.append([POS, Nre_E, PJ, GF, GC, GD, PG, PP, PE, PTOS])
# print(historial)
else:
derecha = len(historial) - 1
ordenadoM = quicksort(historial, 0, derecha, 1)
datos1 = binaria(ordenadoM, int(equipo_1), 1)
datos2 = binaria(ordenadoM, int(equipo_2), 1)
Nre_E = int(equipo_1)
GF = Goles_favoreq1
GC = Goles_contraeq1
GD = Goles_favoreq1 - Goles_contraeq1
PG = victoria_eq1
PP = derrota_eq1
PE = empate_eq1
PTOS = puntos_eq1
Nre_E2 = int(equipo_2)
GF2 = Goles_favoreq2
GC2 = Goles_contraeq2
GD2 = Goles_favoreq2 - Goles_contraeq2
PG2 = victoria_eq2
PP2 = derrota_eq2
PE2 = empate_eq2
PTOS2 = puntos_eq2
if datos1 != None:
ordenadoM[datos1][0] = datos1 + 1
ordenadoM[datos1][1] = Nre_E
ordenadoM[datos1][3] = ordenadoM[datos1][3] + GF
ordenadoM[datos1][4] = ordenadoM[datos1][4] + GC
ordenadoM[datos1][5] = ordenadoM[datos1][5] + GD
ordenadoM[datos1][6] = ordenadoM[datos1][6] + PG
ordenadoM[datos1][7] = ordenadoM[datos1][7] + PP
ordenadoM[datos1][8] = ordenadoM[datos1][8] + PE
ordenadoM[datos1][2] = ordenadoM[datos1][6] + ordenadoM[datos1][7] + ordenadoM[datos1][8]
ordenadoM[datos1][9] = ordenadoM[datos1][9] + PTOS
if datos2 != None:
ordenadoM[datos2][0] = datos2 + 1
ordenadoM[datos2][1] = Nre_E2
ordenadoM[datos2][2] = ordenadoM[datos2][2] + 1
ordenadoM[datos2][3] = ordenadoM[datos2][3] + GF2
ordenadoM[datos2][4] = ordenadoM[datos2][4] + GC2
ordenadoM[datos2][5] = ordenadoM[datos2][5] + GD2
ordenadoM[datos2][6] = ordenadoM[datos2][6] + PG2
ordenadoM[datos2][7] = ordenadoM[datos2][7] + PP2
ordenadoM[datos2][8] = ordenadoM[datos2][8] + PE2
ordenadoM[datos2][9] = ordenadoM[datos2][9] + PTOS2
if datos1 == None:
POS = len(historial) + 1
Nre_E = int(equipo_1)
PJ = 1
GF = Goles_favoreq1
GC = Goles_contraeq1
GD = Goles_favoreq1 - Goles_contraeq1
PG = victoria_eq1
PP = derrota_eq1
PE = empate_eq1
PTOS = puntos_eq1
historial.append([POS, Nre_E, PJ, GF, GC, GD, PG, PP, PE, PTOS])
if datos2 == None:
POS = len(historial)
Nre_E = int(equipo_2)
PJ = 1
GF = Goles_favoreq2
GC = Goles_contraeq2
GD = Goles_favoreq2 - Goles_contraeq2
PG = victoria_eq2
PP = derrota_eq2
PE = empate_eq2
PTOS = puntos_eq2
historial.append([POS, Nre_E, PJ, GF, GC, GD, PG, PP, PE, PTOS])
resultado[0] = (0, 0, id, url, liga)
ordenadoM = quicksort(historial, 0, len(historial) - 1, 9)
pos_eq1 = 0
pos_eq2 = 0
cont = 0
for i in range(len(ordenadoM)):
indice = len(ordenadoM) - 1 - i
idclud = ordenadoM[indice][1]
sql1 = "SELECT nombre FROM equipos WHERE id= %s "
cursor.execute(sql1, (idclud))
nom_clud = cursor.fetchall()
cont = cont + 1
pos = i + 1
clud = nom_clud[0]
# clud = ordenadoM[indice][1]
pj = ordenadoM[indice][2]
gf = ordenadoM[indice][3]
gc = ordenadoM[indice][4]
dg = ordenadoM[indice][5]
pg = ordenadoM[indice][6]
pp = ordenadoM[indice][7]
pe = ordenadoM[indice][8]
ptos = ordenadoM[indice][9]
# datos = binaria(ordenadoM, int(pos2))
resultado1[i] = (pos, clud, pj, gf, gc, dg, pg, pp, pe, ptos)
if idclud == id_e1:
pos_eq1 = indice
if idclud == id_e2:
pos_eq2 = indice
#print(pos_eq1)
#print(pos_eq2)
division = decimal.Decimal(72) / decimal.Decimal(7)
# print(division)
P_GJEQ1 = decimal.Decimal(ordenadoM[pos_eq1][6]) / decimal.Decimal(ordenadoM[pos_eq1][2])
P_GJLEQ1 = decimal.Decimal(victoria_eq1_local) / decimal.Decimal((derrota_eq1_local + empate_eq1_local + victoria_eq1_local))
P_GJVEQ1 = decimal.Decimal(victoria_eq1_visitante) / decimal.Decimal((derrota_eq1_visitante + empate_eq1_visitante + victoria_eq1_visitante))
P_PJEQ1 = decimal.Decimal(ordenadoM[pos_eq1][7]) / decimal.Decimal(ordenadoM[pos_eq1][2])
P_PJLEQ1 = decimal.Decimal(derrota_eq1_local) / decimal.Decimal((derrota_eq1_local + empate_eq1_local + victoria_eq1_local))
P_PJVEQ1 = decimal.Decimal(derrota_eq1_visitante) / decimal.Decimal((derrota_eq1_visitante + empate_eq1_visitante + victoria_eq1_visitante))
P_EJEQ1 = decimal.Decimal(ordenadoM[pos_eq1][8]) / decimal.Decimal(ordenadoM[pos_eq1][2])
P_EJLEQ1 = decimal.Decimal(empate_eq1_local) / decimal.Decimal((derrota_eq1_local + empate_eq1_local + victoria_eq1_local))
P_EJVEQ1 = decimal.Decimal(empate_eq1_visitante )/ decimal.Decimal((derrota_eq1_visitante + empate_eq1_visitante + victoria_eq1_visitante))
P_GJEQ2 = decimal.Decimal(ordenadoM[pos_eq2][6]) / decimal.Decimal(ordenadoM[pos_eq2][2])
P_GJLEQ2 = decimal.Decimal(victoria_eq2_local) / decimal.Decimal((derrota_eq2_local + empate_eq2_local + victoria_eq2_local))
P_GJVEQ2 = decimal.Decimal(victoria_eq2_visitante) / decimal.Decimal((derrota_eq2_visitante + empate_eq2_visitante + victoria_eq2_visitante))
P_PJEQ2 = decimal.Decimal(ordenadoM[pos_eq2][7]) / decimal.Decimal(ordenadoM[pos_eq2][2])
P_PJLEQ2 = decimal.Decimal(derrota_eq2_local) / decimal.Decimal((derrota_eq2_local + empate_eq2_local + victoria_eq2_local))
P_PJVEQ2 = decimal.Decimal(derrota_eq2_visitante) / decimal.Decimal((derrota_eq2_visitante + empate_eq2_visitante + victoria_eq2_visitante))
P_EJEQ2 = decimal.Decimal(ordenadoM[pos_eq2][8]) / decimal.Decimal(ordenadoM[pos_eq2][2])
P_EJLEQ2 = decimal.Decimal(empate_eq2_local) / decimal.Decimal((derrota_eq2_local + empate_eq2_local + victoria_eq2_local))
P_EJVEQ2 = decimal.Decimal(empate_eq2_visitante) / decimal.Decimal((derrota_eq2_visitante + empate_eq2_visitante + victoria_eq2_visitante))
p_derrota.append(P_PJEQ1)
p_derrota.append(P_PJLEQ1)
p_derrota.append(P_PJVEQ1)
p_derrota.append(P_PJEQ2)
p_derrota.append(P_PJLEQ2)
p_derrota.append(P_PJVEQ2)
p_empate.append(P_EJEQ1)
p_empate.append(P_EJLEQ1)
p_empate.append(P_EJVEQ1)
p_empate.append(P_EJEQ2)
p_empate.append(P_EJLEQ2)
p_empate.append(P_EJVEQ2)
p_victoria.append(P_GJEQ1)
p_victoria.append(P_GJLEQ1)
p_victoria.append(P_GJVEQ1)
p_victoria.append(P_GJEQ2)
p_victoria.append(P_GJLEQ2)
p_victoria.append(P_GJVEQ2)
derrota.append(ordenadoM[pos_eq1][7])
derrota.append(ordenadoM[pos_eq2][7])
derrota.append(derrota_eq1_local)
derrota.append(derrota_eq1_visitante)
derrota.append(derrota_eq2_local)
derrota.append(derrota_eq2_visitante)
empate.append(ordenadoM[pos_eq1][8])
empate.append(ordenadoM[pos_eq2][8])
empate.append(empate_eq1_local)
empate.append(empate_eq1_visitante)
empate.append(empate_eq2_local)
empate.append(empate_eq2_visitante)
victoria.append(ordenadoM[pos_eq1][6])
victoria.append(ordenadoM[pos_eq2][6])
victoria.append(victoria_eq1_local)
victoria.append(victoria_eq1_visitante)
victoria.append(victoria_eq2_local)
victoria.append(victoria_eq2_visitante)
p_victoria_local = (P_GJEQ1 * P_GJLEQ1) / (P_GJEQ1 * P_GJLEQ1 + P_PJEQ1 * P_PJLEQ1 + P_EJEQ1 * P_EJLEQ1)
p_derrota_local = (P_PJEQ1 * P_PJLEQ1) / (P_GJEQ1 * P_GJLEQ1 + P_PJEQ1 * P_PJLEQ1 + P_EJEQ1 * P_EJLEQ1)
p_empate_local = (P_EJEQ1 * P_EJLEQ1) / (P_GJEQ1 * P_GJLEQ1 + P_PJEQ1 * P_PJLEQ1 + P_EJEQ1 * P_EJLEQ1)
p_victoria_visitante = (P_GJEQ2 * P_GJVEQ2) / (P_GJEQ2 * P_GJVEQ2 + P_PJEQ2 * P_PJVEQ2 + P_EJEQ2 * P_EJVEQ2)
p_derrota_visitante = (P_PJEQ2 * P_PJVEQ2) / (P_GJEQ2 * P_GJVEQ2 + P_PJEQ2 * P_PJVEQ2 + P_EJEQ2 * P_EJVEQ2)
p_empate_visitante = (P_EJEQ2 * P_EJVEQ2) / (P_GJEQ2 * P_GJVEQ2 + P_PJEQ2 * P_PJVEQ2 + P_EJEQ2 * P_EJVEQ2)
if p_victoria_local > 0:
p_victoria_local = 1 / p_victoria_local
else:
p_victoria_local = 100
if p_derrota_local > 0:
p_derrota_local = 1 / p_derrota_local
else:
p_derrota_local = 100
if p_empate_local > 0:
p_empate_local = 1 / p_empate_local
else:
p_empate_local = 100
if p_victoria_visitante > 0:
p_victoria_visitante = 1 / p_victoria_visitante
else:
p_victoria_visitante = 100
if p_derrota_visitante > 0:
p_derrota_visitante = 1 / p_derrota_visitante
else:
p_derrota_visitante = 100
if p_empate_visitante > 0:
p_empate_visitante = 1 / p_empate_visitante
else:
p_empate_visitante = 100
cuota_local.append(p_victoria_local)
cuota_local.append(p_derrota_local)
cuota_local.append(p_empate_local)
cuota_local.append(p_victoria_visitante)
cuota_local.append(p_derrota_visitante)
cuota_local.append(p_empate_visitante)
Equipo_ganador = 0
if p_victoria_local < p_victoria_visitante:
Equipo_ganador = id_e1
else:
Equipo_ganador = id_e2
print(Equipo_ganador)
return Equipo_ganador
def porcentaje_acierto(liga):
import decimal
import pymysql.cursors
#from Ligas.lib.Bbinaria import binaria
#from Ligas.lib.Oquicksort import quicksort
from Bbinaria import binaria
from Oquicksort import quicksort
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='123456',
db='perfume',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
resultado = {}
resultado1 = {}
cursor = connection.cursor()
# Read a single record
url = "http://localhost:8000/actualizar/" + str(id) + "/" + str(liga) + "/"
sql3 = "select nro_fecha, equipo_1, equipo_2, gol_eq1, gol_eq2 from calendario where id_liga= %s and estado='JUGADO';"
cursor.execute(sql3, (liga))
goles_liga = cursor.fetchall()
historial = []
cuota_local = []
cuota_visitante = []
p_derrota = []
p_empate = []
p_victoria = []
derrota = []
empate = []
victoria = []
empate_eq1_local = 0
victoria_eq1_local = 0
derrota_eq1_local = 0
empate_eq1_visitante = 0
victoria_eq1_visitante = 0
derrota_eq1_visitante = 0
empate_eq2 = 0
victoria_eq2 = 0
derrota_eq2 = 0
empate_eq2_local = 0
victoria_eq2_local = 0
derrota_eq2_local = 0
empate_eq2_visitante = 0
victoria_eq2_visitante = 0
derrota_eq2_visitante = 0
nro_partido = 0
Aciertos = 0
Desaciertos = 0
Porcentaje = 0
for rows in goles_liga:
gol_eq1 = int(rows["gol_eq1"])
gol_eq2 = int(rows["gol_eq2"])
equipo_1 = int(rows["equipo_1"])
equipo_2 = int(rows["equipo_2"])
id_ganador = 0
if gol_eq1 == gol_eq2:
id_ganador = 0
if gol_eq1 > gol_eq2:
id_ganador = equipo_1
if gol_eq1 < gol_eq2:
id_ganador = equipo_2
print(id_ganador)
li = clasificar(liga, equipo_1, equipo_2)
#print(li)
if li == id_ganador:
Aciertos += 1
nro_partido += 1
else:
Desaciertos += 1
nro_partido += 1
Porcentaje = (decimal.Decimal(Aciertos)/decimal.Decimal(nro_partido))*100
cuota_local.append(nro_partido)
cuota_local.append(Aciertos)
cuota_local.append(Desaciertos)
cuota_local.append(Porcentaje)
return cuota_local
#A = leeLista()
#A = [[5, 8, 5], [8, 5, 5], [2, 7, 5],[9, 6, 5], [20, 4, 5]]
#liga= 10
#l=porcentaje_acierto(liga)
print(l)
| [
"cmtrfenix@gmail.com"
] | cmtrfenix@gmail.com |
5a991fed1d4a0e7596274c8eb7335d9e09592e6a | 8f5f0c3ef83fdd482387973149738f6178477a42 | /medium/trees/next_right_pointer.py | e1eb4ce5802ddc9adc9779869feb56faa06352f2 | [] | no_license | nicokuzak/leetcode | 79a5771ad83786cc7dbfd790f8fffcf1ce58794e | 39b0235dc429a97a7cba0689d44641a6af6d7a32 | refs/heads/main | 2023-04-06T21:02:09.553185 | 2021-04-14T22:21:20 | 2021-04-14T22:21:20 | 336,847,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | """You are given a perfect binary tree where all leaves are on the same level, and every parent has two children. The binary tree has the following definition:
struct Node {
int val;
Node *left;
Node *right;
Node *next;
}
Populate each next pointer to point to its next right node. If there is no next right node, the next pointer should be set to NULL.
Initially, all next pointers are set to NULL.
Follow up:
You may only use constant extra space.
Recursive approach is fine, you may assume implicit stack space does not count as extra space for this problem.
Example 1:
Input: root = [1,2,3,4,5,6,7]
Output: [1,#,2,3,#,4,5,6,7,#]
Explanation: Given the above perfect binary tree (Figure A), your function should populate each next pointer to point to its next right node, just like in Figure B. The serialized output is in level order as connected by the next pointers, with '#' signifying the end of each level.
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if root is None or root.left is None:
return root
root.left.next = root.right #Child left -> Child Right
if root.next: #If it is a left node that has something to the right
root.right.next = root.next.left #Child right next is parent right's left
self.connect(root.left)
self.connect(root.right)
return root | [
"nicokuzak95@gmail.com"
] | nicokuzak95@gmail.com |
5858ee9c33f1c567825190fe61a5f7264ab65ba8 | e9853ad41b738c2704287740ade0936c86438366 | /garden.py | 0601a998016e508ab9aef969f09f9b0878e1821c | [] | no_license | pierre-geeraert/Leek-Wars | 37d2ca5d995e4e94d79c32f4f69e81d1716e0c58 | d54fbbd460a658216c7de98033bef80f9d519929 | refs/heads/master | 2021-01-13T01:21:10.893041 | 2019-08-09T08:23:49 | 2019-08-09T08:23:49 | 81,462,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,775 | py | from random import randint
from pipenv.utils import requests
import globalVar
from function_essential import request_api
import function_essential
import json
source_api_address=function_essential.source_api_address+"garden/"
def getGardenForLeek(leek_id):
try:
data_out_GetGardenForLeek = request_api(source_api_address + "get-leek-opponents/" + leek_id)
except:
print("request impossible for GetGardenForLeek")
return data_out_GetGardenForLeek
def getSoloChallenge(leek_id):
try:
data_out_getSoloChallenge = request_api(source_api_address + "get-solo-challenge/" + leek_id)
except:
print("request impossible for GetGardenForLeek")
return data_out_getSoloChallenge
def getAI():
try:
data_out_getAI = request_api(function_essential.source_api_address + "ai/get-farmer-ais")
except:
print("request impossible for getAI")
return data_out_getAI
def getLeekOpponents(leek_id):
try:
data_out_getFarmerOpponents = request_api(source_api_address+"get-leek-opponents/"+leek_id)
tab_opponents = data_out_getFarmerOpponents['opponents']
opponents1 = tab_opponents[0]
opponents2 = tab_opponents[1]
opponents3 = tab_opponents[2]
opponents4 = tab_opponents[3]
opponents5 = tab_opponents[4]
except:
print("request impossible for getLeekOpponents")
data_out_getFarmerOpponents,opponents1,opponents2,opponents3,opponents4,opponents5 = "error"
return opponents1,opponents2,opponents3,opponents4,opponents5
def theBaddestOpponents():
opponents1 = ""
opponents2 = ""
opponents3 = ""
opponents4 = ""
opponents5 = ""
opponents1,opponents2,opponents3,opponents4,opponents5 = getLeekOpponents(globalVar.leek_id)
data_out_theBaddestOpponents = baddestOpponents(opponents1,opponents2,opponents3,opponents4,opponents5)
return data_out_theBaddestOpponents
def baddestOpponents(opponents1,opponents2,opponents3,opponents4,opponents5):
global baddest_talent
baddest_talent = 1000000
global baddest_talent_opponents
for opponents in opponents1,opponents2,opponents3,opponents4,opponents5:
if opponents['talent'] < baddest_talent:
baddest_talent = opponents['talent']
baddest_talent_opponents = opponents
return baddest_talent_opponents
def bestOpponents(opponents1,opponents2,opponents3,opponents4,opponents5):
global best_talent
best_talent=0
global best_talent_opponents
for opponents in opponents1,opponents2,opponents3,opponents4,opponents5:
if opponents['talent'] > best_talent:
best_talent = opponents['talent']
best_talent_opponents = opponents
return best_talent_opponents | [
"pi@pi.fr"
] | pi@pi.fr |
d09cbbe00b827f394ca2273cd1219aa9bad0fd43 | 9c7091f82a5108261cbc3e5209f0e6df42f55530 | /node/src/fuzzers/peach_fuzzbang.py | 3a1d9883ba450b54c00eee87fd997b6a106f6edc | [
"MIT"
] | permissive | hatRiot/PeachOrchard | 881b24bdf8ceb5c1e23c989fdb612f8b70dfd192 | cd11ab0ccbcce2349408d5c2e4b651eb99a4e9c1 | refs/heads/master | 2021-06-18T03:27:03.835834 | 2019-09-23T19:24:02 | 2019-09-23T19:24:02 | 23,305,215 | 46 | 26 | MIT | 2021-06-10T19:48:39 | 2014-08-25T07:12:20 | Python | UTF-8 | Python | false | false | 3,101 | py | from src.core.log import *
from src.core import config
from src.core import utility
from src.core.fuzzer import Fuzzer
from re import findall
import os
class peach_fuzzbang(Fuzzer):
""" Class implements the interface for the Peach fuzzer. This has
been tested with FuzzBang as well as regular ol' Peach.
"""
def __init__(self):
self.name = "Peach FuzzBang"
def fetch_crashes(self):
"""
"""
base = config.MONITOR_DIR + '/' + config.SESSION
crashes = {}
# build a list of files from session root
pot_files = []
for (root, subFolders, files) in os.walk(base):
for file in files:
f = os.path.join(root, file)
pot_files.append(f.replace('\\', '/'))
# massage these into our crashes dictionary
for entry in pot_files:
if '_description.txt' in entry:
# found description entry, parse it
e = entry.rsplit('/', 2)
crashes[e[1]] = entry
return crashes
def get_status(self):
""" Parse the status file and pull the latest iteration update
"""
try:
data = None
spath = config.MONITOR_DIR + '/' + config.SESSION + '/' + 'status.txt'
with open(spath) as f:
data = f.read().split('\n')
# chop it up
status = None
data = [x for x in data if len(x) > 0]
if 'Test finished' in data[:-1]:
status = 'Completed'
else:
(cidx, total) = findall("Iteration (.*?) of (.*?) :", data[-1])[0]
status = '%s/%s' % (cidx, total)
except Exception, e:
utility.msg("Failed to parse status update: %s" % e, ERROR)
status = "Error"
return status
def check_session(self):
"""
"""
valid = False
try:
if config.MONITOR_DIR and os.path.isdir(config.MONITOR_DIR):
if config.SESSION:
# validate session
if config.SESSION not in os.listdir(config.MONITOR_DIR):
utility.msg("Session %s not found in %s" % (config.SESSION, config.MONITOR_DIR))
else:
valid = True
else:
# fetch latest version
tmp = os.listdir(config.MONITOR_DIR)
if len(tmp) <= 0:
utility.msg("No running sessions found", ERROR)
valid = False
else:
config.SESSION = tmp[-1]
utility.msg("Setting session to %s" % config.SESSION, LOG)
valid = True
else:
utility.msg("Directory '%s' not found" % config.MONITOR_DIR, ERROR)
valid = False
except Exception, e:
utility.msg("Error checking session: %s" % e, ERROR)
valid = False
return valid
| [
"shodivine@gmail.com"
] | shodivine@gmail.com |
0b3788d4fbdbbf609b1d07cec5135630f51a7c4b | ed7b5c24d9a13d0c717fd6f6293f3464f43d7cbf | /demo/sjh_web/demo55.py | 0dd3351eafd4b929e7b8c9d051f64ed3d14dee2a | [] | no_license | befallenStar/python | ccb93d456dc161a8087a78220a7aaab21320ab8b | e44ce8c11b820f03fe2d60dfa84053d8cc356c80 | refs/heads/master | 2022-12-07T18:34:03.091146 | 2020-08-20T02:33:56 | 2020-08-20T02:33:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | # -*- encoding: utf-8 -*-
import urllib3
pcUserAgent = {
'IE-agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windwos NT 6.1; Trident/5.0;',
'firefox-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0',
'chrome-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
mobileUserAgent = {
'Touch capable Windows 8 device': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0; Touch)',
'Kindle Fire': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us; Silk/1.1.0-80) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16 Silk-Accelerated=true',
'iPad': 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10',
'Samsung Galaxy S3': 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30',
'BlackBerry': 'BlackBerry9700/5.0.0.862 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/331 UNTRUSTED/1.0 3gpp-gba',
'iPhone': 'Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3',
'UC standard': 'NOKIA5700/ UCWEB7.0.2.37/28/999'
}
http = urllib3.PoolManager()
r = http.request('GET', 'http://www.baidu.com/s', {'wd': 'hello'},
pcUserAgent['ff-agent']) # 伪造头部信息欺骗服务器
print(r) # <urllib3.response.HTTPResponse object at 0x000002A0FB49EE88>
print(r.status) # 200
print(r.data.decode('utf-8'))
| [
"sy5622_5@126.com"
] | sy5622_5@126.com |
5b21a4c2067e74e7ff233876453a7bbb84d6ebc6 | 3bc4b502fdb5ffecdbecc9239a0c25746dc31022 | /Ch06/p157.py | 9df9fb2464141935daf597c1cf1f74a731857083 | [] | no_license | pkc-3/python | 68da873bbe7ad9a3e0db4e22ddaa412a9377720f | d8410d897c3784c6017f7edc215ce8763e557518 | refs/heads/master | 2023-05-31T06:40:30.279748 | 2021-06-10T09:00:09 | 2021-06-10T09:00:09 | 361,634,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #self 명령어 예
class multiply3:
#멤버 변수 없음
#생성자 없음
#동적 멤버변수 생성/초기화
def data(self,x,y):
self.x = x
self.y = y
#곱셈 연산
def mul(self):
result = self.x * self.y
self.display(result) #메서드 호출
#결과 출력
def display(self, result):
print("곱셈 = %d" % (result))
obj = multiply3() #기본 생성자
obj.data(10, 20)
obj.mul() | [
"pkc_3@naver.com"
] | pkc_3@naver.com |
e5f5323c7212d7287df0abcdd4111a39a6b78ce2 | 65747551b8fab58fce7c10355b1117f992d5ba38 | /project/submissions/avneesh/dwa_notebook.py | 3a1fc90d2c8dcffeeba30e292dc228234c12f2dd | [
"MIT"
] | permissive | naveenmoto/lablet102 | 4f88c95a93de22880afda3381466506a585a7b1a | 24de9daa4ae75cbde93567a3239ede43c735cf03 | refs/heads/main | 2023-06-29T05:24:18.215235 | 2021-08-02T03:33:54 | 2021-08-02T03:33:54 | 380,767,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,070 | py | """
Testing DWA
Current Notes:
Forget the map for a while
"""
# %% Import everything
# Libraries
from grid_loader import Grid
from astar_path import AStar
from simulator import Lidar
from dwa import DWA
# Modules
import numpy as np
from matplotlib import pyplot as plt
# %% Global variables
V_MAX = 1.2 # Maximum velocity (m/s)
ACC_MAX = 0.5 # Maximum acceleration (m/s^2)
W_MAX = 1.0 # Maximum angular velocity (rad/sec)
W_DOT_MAX = 3.0 # Maximum angular acceleration (rad/sec^2)
K_MAX = 4 # Curvature = 1/R (m^-1)
pred_horizon = 10 # Predict N steps in DWA paths
goal_threshold = 0.05 # Goal threshold (m)
t_sim_end = 30 # Time for which simulation should run (s)
w_cte = 1.0 # Weight for cross track error in cost calculation
w_speed = 1.0 # Weight for speed change in cost calculation
w_kappa = 1e-4 # Cost of kappa value
w_lidar_cost = 0.0 # Cost of the position on grid
img_path = "./data/circuit.png" # Grid path
grid_res = 0.05 # m / pixel
start = (6, 3)
end = (24, 25)
# %% DWA implementation
dwa_handler = DWA(V_MAX, W_MAX, ACC_MAX, W_DOT_MAX, K_MAX) # Paraemters
dwa_handler.set_path_props(pred_horizon, goal_threshold)
dwa_handler.set_weights(w_cte, w_speed, w_kappa, w_lidar_cost)
# %% Load grid and path
# Load grid and path
grid_obj = Grid()
grid_obj.load_from_image(img_path, grid_res)
# Astar path finding
astar_planner = AStar()
astar_planner.load_grid(grid_obj)
astar_path_m = astar_planner.get_route(start, end)
# Show start to end path
plt.figure()
plt.imshow(grid_obj.grid_data.T, cmap=plt.cm.gray_r, origin='lower',
extent=[0, grid_obj.w_m, 0, grid_obj.h_m])
plt.plot(start[0], start[1], 'g+', markersize=10)
plt.plot(end[0], end[1], 'r+', markersize=10)
plt.plot(astar_path_m[:, 0], astar_path_m[:, 1], 'r.')
plt.show()
# %% Run DWA on the 'astar_path_m' path
start_pose = (start[0], start[1], np.pi/2) # Starting pose
ref_path = astar_path_m # Reference path
lidar_sim = Lidar()
lidar_sim.set_grid(grid_obj)
# Define the cost of being on path
def grid_cost(pose):
"""
Estimate the cost of being at 'pose' on the
grid
Parameters:
- pose: list
Current pose of robot as (x, y, th)
Returns:
- cost: float
The cost of being at pose on the grid
"""
# Get the distances
distances = np.array(lidar_sim.get_beam_data(pose))
min_dist = np.min(distances)
if min_dist < lidar_sim.scan_line_samples[-1]:
cost = 1 / min_dist
else:
cost = 0
return cost
# dwa_handler.pose_cost = grid_cost
logs = dwa_handler.run_dwa(start_pose, ref_path, t_sim_end)
print(f"Simulation ended in {logs[-1, -2]:.3f} seconds")
# %% Plot path and everything
poses = logs[:, :3]
plt.figure()
plt.imshow(grid_obj.grid_data.T, cmap=plt.cm.gray_r, origin='lower',
extent=[0, grid_obj.w_m, 0, grid_obj.h_m])
plt.plot(start[0], start[1], 'g+', markersize=10)
plt.plot(end[0], end[1], 'r+', markersize=10)
plt.plot(ref_path[:, 0], ref_path[:, 1], 'g.')
plt.plot(poses[:, 0], poses[:, 1], 'r')
# %%
| [
"naveen@atimotors.com"
] | naveen@atimotors.com |
daf42c575a75cfd7aa10ea738eb758756f91f28a | 2b9c398f8d39e41da3f8e3356b2ad3d2ea5cb995 | /firestorage.py | eae4c03ba72ea1d06f21504ea49a0ce9943fa71b | [] | no_license | drDrozio/Firebase | 770152ff158535a6641a1065699463f49983092f | 618e98df1ebf5a774a2feccc4cbdd93402d47514 | refs/heads/master | 2022-12-01T10:39:48.558406 | 2020-08-12T15:24:48 | 2020-08-12T15:24:48 | 287,045,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | import pyrebase
config={
"apiKey": "AIzaSyCJ_p-TCd7a16e5MiCSg0vcDPWGRjTAMbk",
"authDomain": "fire-8a42b.firebaseapp.com",
"databaseURL": "https://fire-8a42b.firebaseio.com",
"projectId": "fire-8a42b",
"storageBucket": "fire-8a42b.appspot.com",
"messagingSenderId": "218150852345",
"appId": "1:218150852345:web:d10d202f7961cc17917428",
"measurementId": "G-QRVRRVYWJM"
}
firebase=pyrebase.initialize_app(config)
storage=firebase.storage()
## Uploading files (Image,Text,Audio)
# storage.child("Images/newimage.jpg").put("messi.jpg")
# print("Image Uploaded")
storage.child("Images/newimage.jpg").download("downloaded.jgp")
print("Image Downloaded") | [
"ishanss2018@gmail.com"
] | ishanss2018@gmail.com |
d47b760098656ec22905595db57af143f04c9a99 | b5cf99c4ed0ff18e351394ae85a91068a74dcc16 | /libdemo/bs_demo.py | 445ff43c30b45cdfd2a3a0e39920958a494e0f96 | [] | no_license | srikanthpragada/DEMO_PYTHON_19_NOV_2019 | 8966d218af8531c8e77accf7e2740094e2c1902f | ac50fdbb7de94d671e0ab5274d6aadd133b70899 | refs/heads/master | 2020-09-14T08:19:55.453868 | 2019-12-23T03:00:07 | 2019-12-23T03:00:07 | 223,076,035 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | from bs4 import BeautifulSoup
st = "<html><body><h1>Title1</h1><h2>Title2.1</h2><h2>Title2.2</h2></body></html>"
bs = BeautifulSoup(st, 'html.parser')
for tag in bs.find_all("h2"):
print(tag.text)
| [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
bc326f5540687fa91d3de9c892b798f6da959d3e | 2bd804f1e6bf3fe60fc62064a2896fd05b6d2428 | /main.py | cf303f65a9ef98d3c5d8ea496531b0e243d6a356 | [] | no_license | phamtony/rain-alert | 1a891577ef0f57342ad74e338ff84df0d94916e0 | d77bbec6c63b8369028464f5a0bf004110cd1c5f | refs/heads/master | 2023-04-13T22:46:59.857008 | 2021-04-21T01:02:50 | 2021-04-21T01:02:50 | 352,727,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | import requests
import os
from twilio.rest import Client
url = "https://api.openweathermap.org/data/2.5/onecall"
api = os.environ.get("OWM_API_KEY")
account_sid = os.environ.get("account_sid")
auth_token= os.environ.get("auth_token")
lat = 30.332184
long = -81.655647
parameters = {
"lat": lat,
"lon": long,
"appid": api,
"exclude": "current,minutely,daily"
}
response = requests.get(url, params=parameters)
response.raise_for_status()
weather_data = response.json()
weather_slice = weather_data["hourly"][:12]
rain = False
for weather in weather_slice:
if int(weather["weather"][0]["id"]) < 700:
rain = True
if rain:
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="It's going to rain today! Remember to bring an umbrella",
from_='+18064524372',
to='+13233329156'
)
print(message.status)
#Use pythonanywhere and load this script, It'll be on the cloud and will run every day at 7AM | [
"tony.p@lashowroom.com"
] | tony.p@lashowroom.com |
24543972bc718a9ecee4d5687413361c96bdff0e | de5d5e533a8340011a93b7aad3a4584b1398d509 | /webmus/cms/helpers.py | 92c8b441a5ce5581185573216971e0d760d71df9 | [] | no_license | jonrscott/webmus | 1fd856eb67e9335116b415237ae32f7e34cbea0c | cdd3850deee7382af396f67bfbaa8e2fe20569cb | refs/heads/master | 2021-06-06T13:58:52.323965 | 2021-04-13T13:25:03 | 2021-04-13T13:25:03 | 11,060,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,065 | py | from bs4 import BeautifulSoup
from bs4.element import NavigableString
from django.urls import reverse
def _wrap_things(soup, start, end, number):
assert(start.parent == end.parent)
wrapper = soup.new_tag(
'div', **{'class': ['section'], 'id': 'section%d' % number})
content = soup.new_tag(
'div', **{'class': ['section-content']})
wrapper.append(content)
tag = start
while tag is not None:
next_tag = tag.next_sibling
if tag == start:
tag.replace_with(wrapper)
content.append(tag)
else:
content.append(tag.extract())
if tag == end:
tag = None
else:
tag = next_tag
if tag == end:
content.append(tag.extract())
tag = None
def prev_not_string(node):
result = node.previous_sibling
while isinstance(result, NavigableString):
result = result.previous_sibling
return result
def next_not_string(node):
result = node.next_sibling
while isinstance(result, NavigableString):
result = result.next_sibling
return result
def preprocess_html(fn):
"""
Decorator to remove unwanted bits from html raw string.
"""
def wrapped(html):
return fn(html.replace(' ', ''))
return wrapped
def html_fragment_processor(fn):
"""
Decorator to use BeautifulSoup to process just a bit of html.
"""
def wrapped(html):
soup = BeautifulSoup(html)
body = fn(soup, soup.body)
return "\n".join(body.prettify().split('\n')[1:-1])
return wrapped
@preprocess_html
@html_fragment_processor
def simplify_html(soup=None, body=None):
"""
Cleanup dodgy Summernote HTML.
Some day Summernote will do this already"""
# no weird spans
for span in body.find_all('span'):
span.replace_with_children()
# no <br>s
for br in body.find_all('br'):
br.extract()
# no empty paras
for p in body.find_all('p'):
for thing in p.contents:
if isinstance(thing, NavigableString) and thing.strip() == '':
thing.extract()
if len(p.contents) == 0:
p.extract()
# no inline styles!
for el in body.find_all():
if 'style' in el.attrs:
del el.attrs['style']
return body
@html_fragment_processor
def process_content_for_display(soup, body):
body = create_implied_sections(soup, body)
body = process_local_links(soup, body)
return body
def create_implied_sections(soup, body):
"""
If a paragraph exists on its own containing just '---', then use that
as a section marker, and jemmy the bits around it into
<div class="section"> containers.
So:
<p>Blah</p>
<p>---</p>
<p>MoreBlah</p>
becomes
<div class="section section1">
<p>Blah</p>
</div>
<div class="section section2">
<p>MoreBlah</p>
</div>
"""
wrapped_start, wrapped_end = None, None
next_section_number = 1
wrapped_yet = False
ps_to_extract = []
for p in body.find_all('p', recursive=False):
if (
p.parent == body and
len(p.contents) == 1 and
isinstance(p.contents[0], NavigableString) and
p.contents[0].strip() == '---'):
if ( wrapped_start is None and
not wrapped_yet and
prev_not_string(p) is not None):
wrapped_start = body.find_all()[0]
if wrapped_start is None:
wrapped_start = next_not_string(p)
else:
wrapped_end = prev_not_string(p)
next_sibling = next_not_string(p)
_wrap_things(
soup, wrapped_start, wrapped_end, next_section_number)
next_section_number += 1
wrapped_yet = True
wrapped_start, wrapped_end = next_sibling, None
ps_to_extract.append(p)
for p in ps_to_extract:
p.extract()
if wrapped_start is not None and wrapped_end is None:
wrapped_end = body.find_all(recursive=False)[-1]
_wrap_things(
soup, wrapped_start, wrapped_end, next_section_number)
next_section_number += 1
if next_section_number == 1:
# have at least one section!
all_things = body.find_all(recursive=False)
if len(all_things) > 0:
_wrap_things(soup, all_things[0], all_things[-1], 1)
# else:
# body.append(
# soup.new_tag(
# 'div', **{'class': 'section', 'id': 'section1'}))
return body
def process_local_links(soup, body):
for link in body.find_all('a'):
href = link['href'].strip()
if href.endswith('.local'):
if '://' in href:
href = href.split('://')[1]
page_name = href.rsplit('.', 1)[0]
link['href'] = reverse(
'base_page', kwargs={'page': page_name})
return body
| [
"jon@jonscott.co.uk"
] | jon@jonscott.co.uk |
00b5dccb42c8858ab9d32c59a2d6849cdaa5680c | 7d68be5c777a34d7d0a8e6182f1954629fbbfe88 | /Search_Stock_loop.py | 34a694239c8e000f45047e7e2daf3c78267ad899 | [] | no_license | guifacury/Check_Stock | 979b14e33d627250c3493da0212650c45a573c45 | d2345a4f8d9120f9d0e28418fcf870d1e69a47a7 | refs/heads/master | 2022-12-12T12:37:33.261098 | 2020-09-18T14:39:25 | 2020-09-18T14:39:25 | 296,637,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,758 | py | # --------------------------------
# --------------------------------
# MEU DISCORD É [ facury#0262 ]
# --------------------------------
# --------------------------------
import selenium # CASO VOCÊ NUNCA TENHA INSTALADO O SELENIUM, ESCREVA NO CMD DO WINDOWS ---> pip install selenium
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver import ActionChains
url = "https://www.nike.com.br/Snkrs#estoque" # URL DO SITE QUE VOCÊ QUER CONSULTAR
driver = selenium.webdriver.Chrome() # VAI EXECUTAR O WEBDRIVER
driver.get(url) # NÃO MEXER
sleep(7) # O COMANDO SLEEP SERVE PARA 'ESPERAR' DETERMINADO TEMPO, EU RECOMENDO QUE DEIXEI IGUAL AO QUE EU DEIXEI
print('STARTING') # VAI PRINTAR NO CONSOLE DO PYTHON QUE COMEÇOU
while True: # BASICAMENTE UM LOOP PARA FICAR CHECKANDO O STOCK E ENVIANDO NO WHATS APP
driver.get(url)
sleep(7)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # VAI DESCER ATÉ O FINAL DO SITE,PARA CARREGAR MAIS PRODUTOS
sleep(2)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # VAI DESCER ATÉ O FINAL DO SITE,PARA CARREGAR MAIS PRODUTOS
sleep(2)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") # VAI DESCER ATÉ O FINAL DO SITE,PARA CARREGAR MAIS PRODUTOS
sleep(2)
lista = [] # AQUI VÃO SER ARMAZENADOS OS LINKS
# IMPORTANTE IMPORTANTE
hrefs = driver.find_elements_by_class_name("aspect-radio-box") # MUDE PARA O ELEMENTO DO PRODUTO DO SITE,QUALQUER DUVIDA MANDE UMA MENSAGEM PARA MIM
# IMPORTANTE IMPORTANTE
for fire in range (1, len(hrefs), 1):
lista.append(hrefs[fire].get_attribute("href")) # VAI ARMAZENAR OS LINKS NA LISTA ,CASO QUEIRA TROCAR, MUDE O ATRIBUTO
fire += 1
# PASSOS IMPORTANTES
# 1 - NA MESMA SEÇÃO DO GOOGLE VOCÊ TEM QUE ABRIR O WHATS APP WEB
# 2 - DEPOIS QUE VOCÊ ESCANEAR O CODIGO E LOGAR, NAO CLIQUE EM NADA
# 3 - VOLTE PARA A ABA DO PRIMEIRO SITE
# 4 - A PARTIR DAQUI O COMPUTADOR VAI FAZER TUDO SOZINHO ATÉ QUE VOCÊ FECHAR O PROGRAMA
print('Faltam 30 Segundos,Lembre de seguir os passos comentados no código')
sleep(30)
driver.switch_to_window(driver.window_handles[1]) # VAI MUDAR A GUIA PARA A DO WHATS APP, POR ISSO DEPOIS DE LOGAAR NO WHATS APPP, VOCÊ TERA QUE VOLTAR PARA ABA ANTERIOR
campo_pesquisa = driver.find_element_by_xpath('//div[contains(@class,"copyable-text selectable-text")]') # ELEMENTO DE PESQUISA WHATS APP
sleep(6)
campo_pesquisa.click()
campo_pesquisa.send_keys('O contato da pessoa aqui') # O NOME DO CONTATO EXATO
campo_pesquisa.send_keys(Keys.ENTER)
sleep(3)
campo_mensagem = driver.find_elements_by_xpath('//div[contains(@class,"copyable-text selectable-text")]') # CAMPO DA MENSAGEM
for start_msg in range(0, len(lista), 1): # AQUI É O LOOP PARA ENVIAR TODOS OS LINKS DA LISTA
campo_mensagem[1].click()
sleep(2)
campo_mensagem[1].send_keys(lista[start_msg])
campo_mensagem[1].send_keys(Keys.ENTER)
start_msg += 1
campo_mensagem[1].send_keys(f'Foram encontrados {len(hrefs)} Produtos') # QUANTIDADE DE PRODUTOS
campo_mensagem[1].send_keys(Keys.ENTER)
campo_mensagem[1].send_keys('PROGRAMA FINALIZADO | EM 30 MINUTOS ESTAREI ENVIANDO NOVAMENTE')
campo_mensagem[1].send_keys(Keys.ENTER)
driver.switch_to_window(driver.window_handles[0]) # VAI VOLTAR PARA A ABA DOS PRODUTOS
sleep(1800) # TEMPO DE ESPERA PARA RECOMEÇAR O PROCESSO EM SEGUNDOS
# CASO VOCÊ NÃO ESTEJA CONSEGUINDO, MANDE UMA MENSAGEM NO MEU DISCORD facury#0262
| [
"noreply@github.com"
] | noreply@github.com |
1658bbbafd6efc967d6e327b77a54890a2404475 | 322ee1bc4b175f1655176785867cd752dc764bdc | /custom_button.py | 70ea35e27e623a4f3d36e700bd1fefd1e9e4685c | [] | no_license | zawhtetlinn/Calculator_test | 2e32bb65be7889615eecca9d462e28b55efe5120 | b92bba5fcadae9f0ca226c8af2c32601ec95a163 | refs/heads/main | 2023-07-11T10:39:54.533208 | 2021-08-14T13:19:48 | 2021-08-14T13:19:48 | 396,013,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,754 | py | import tkinter
import sys
class TkinterCustomButton(tkinter.Frame):
""" tkinter custom button with border, rounded corners and hover effect
Arguments: master= where to place button
bg_color= background color, None is standard,
fg_color= foreground color, blue is standard,
hover_color= foreground color, lightblue is standard,
border_color= foreground color, None is standard,
border_width= border thickness, 0 is standard,
command= callback function, None is standard,
width= width of button, 110 is standard,
height= width of button, 35 is standard,
corner_radius= corner radius, 10 is standard,
text_font= (<Name>, <Size>),
text_color= text color, white is standard,
text= text of button,
hover= hover effect, True is standard,
image= PIL.PhotoImage, standard is None"""
def __init__(self,
bg_color=None,
fg_color="#2874A6",
hover_color="#5499C7",
border_color=None,
border_width=0,
command=None,
width=120,
height=40,
corner_radius=10,
text_font=None,
text_color="white",
text="CustomButton",
hover=True,
image=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
if bg_color is None:
self.bg_color = self.master.cget("bg")
else:
self.bg_color = bg_color
self.fg_color = fg_color
self.hover_color = hover_color
self.border_color = border_color
self.width = width
self.height = height
if corner_radius*2 > self.height:
self.corner_radius = self.height/2
elif corner_radius*2 > self.width:
self.corner_radius = self.width/2
else:
self.corner_radius = corner_radius
self.border_width = border_width
if self.corner_radius >= self.border_width:
self.inner_corner_radius = self.corner_radius - self.border_width
else:
self.inner_corner_radius = 0
self.text = text
self.text_color = text_color
if text_font is None:
if sys.platform == "darwin": # macOS
self.text_font = ("Avenir", 13)
elif "win" in sys.platform: # Windows
self.text_font = ("Century Gothic", 11)
else:
self.text_font = ("TkDefaultFont")
else:
self.text_font = text_font
self.image = image
self.function = command
self.hover = hover
self.configure(width=self.width, height=self.height)
if sys.platform == "darwin" and self.function is not None:
self.configure(cursor="pointinghand")
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
background=self.bg_color,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
if self.hover is True:
self.canvas.bind("<Enter>", self.on_enter)
self.canvas.bind("<Leave>", self.on_leave)
self.canvas.bind("<Button-1>", self.clicked)
self.canvas.bind("<Button-1>", self.clicked)
self.canvas_fg_parts = []
self.canvas_border_parts = []
self.text_part = None
self.text_label = None
self.image_label = None
self.draw()
def draw(self):
self.canvas.delete("all")
self.canvas_fg_parts = []
self.canvas_border_parts = []
self.canvas.configure(bg=self.bg_color)
# border button parts
if self.border_width > 0:
if self.corner_radius > 0:
self.canvas_border_parts.append(self.canvas.create_oval(0,
0,
self.corner_radius * 2,
self.corner_radius * 2))
self.canvas_border_parts.append(self.canvas.create_oval(self.width - self.corner_radius * 2,
0,
self.width,
self.corner_radius * 2))
self.canvas_border_parts.append(self.canvas.create_oval(0,
self.height - self.corner_radius * 2,
self.corner_radius * 2,
self.height))
self.canvas_border_parts.append(self.canvas.create_oval(self.width - self.corner_radius * 2,
self.height - self.corner_radius * 2,
self.width,
self.height))
self.canvas_border_parts.append(self.canvas.create_rectangle(0,
self.corner_radius,
self.width,
self.height - self.corner_radius))
self.canvas_border_parts.append(self.canvas.create_rectangle(self.corner_radius,
0,
self.width - self.corner_radius,
self.height))
# inner button parts
if self.corner_radius > 0:
self.canvas_fg_parts.append(self.canvas.create_oval(self.border_width,
self.border_width,
self.border_width + self.inner_corner_radius * 2,
self.border_width + self.inner_corner_radius * 2))
self.canvas_fg_parts.append(self.canvas.create_oval(self.width - self.border_width - self.inner_corner_radius * 2,
self.border_width,
self.width - self.border_width,
self.border_width + self.inner_corner_radius * 2))
self.canvas_fg_parts.append(self.canvas.create_oval(self.border_width,
self.height - self.border_width - self.inner_corner_radius * 2,
self.border_width + self.inner_corner_radius * 2,
self.height-self.border_width))
self.canvas_fg_parts.append(self.canvas.create_oval(self.width - self.border_width - self.inner_corner_radius * 2,
self.height - self.border_width - self.inner_corner_radius * 2,
self.width - self.border_width,
self.height - self.border_width))
self.canvas_fg_parts.append(self.canvas.create_rectangle(self.border_width + self.inner_corner_radius,
self.border_width,
self.width - self.border_width - self.inner_corner_radius,
self.height - self.border_width))
self.canvas_fg_parts.append(self.canvas.create_rectangle(self.border_width,
self.border_width + self.inner_corner_radius,
self.width - self.border_width,
self.height - self.inner_corner_radius - self.border_width))
for part in self.canvas_fg_parts:
self.canvas.itemconfig(part, fill=self.fg_color, width=0)
for part in self.canvas_border_parts:
self.canvas.itemconfig(part, fill=self.border_color, width=0)
# no image given
if self.image is None:
# create tkinter.Label with text
self.text_label = tkinter.Label(master=self,
text=self.text,
font=self.text_font,
bg=self.fg_color,
fg=self.text_color)
self.text_label.place(relx=0.5, rely=0.5, anchor=tkinter.CENTER)
# bind events the the button click and hover events also to the text_label
if self.hover is True:
self.text_label.bind("<Enter>", self.on_enter)
self.text_label.bind("<Leave>", self.on_leave)
self.text_label.bind("<Button-1>", self.clicked)
self.text_label.bind("<Button-1>", self.clicked)
self.set_text(self.text)
# use the given image
else:
# create tkinter.Label with image on it
self.image_label = tkinter.Label(master=self,
image=self.image,
bg=self.fg_color)
self.image_label.place(relx=0.5,
rely=0.5,
anchor=tkinter.CENTER)
# bind events the the button click and hover events also to the image_label
if self.hover is True:
self.image_label.bind("<Enter>", self.on_enter)
self.image_label.bind("<Leave>", self.on_leave)
self.image_label.bind("<Button-1>", self.clicked)
self.image_label.bind("<Button-1>", self.clicked)
def configure_color(self, bg_color=None, fg_color=None, hover_color=None, text_color=None):
if bg_color is not None:
self.bg_color = bg_color
else:
self.bg_color = self.master.cget("bg")
if fg_color is not None:
self.fg_color = fg_color
# change background color of image_label
if self.image is not None:
self.image_label.configure(bg=self.fg_color)
if hover_color is not None:
self.hover_color = hover_color
if text_color is not None:
self.text_color = text_color
if self.text_part is not None:
self.canvas.itemconfig(self.text_part, fill=self.text_color)
self.draw()
def set_text(self, text):
if self.text_label is not None:
self.text_label.configure(text=text)
def on_enter(self, event=0):
for part in self.canvas_fg_parts:
self.canvas.itemconfig(part, fill=self.hover_color, width=0)
if self.text_label is not None:
# change background color of image_label
self.text_label.configure(bg=self.hover_color)
if self.image_label is not None:
# change background color of image_label
self.image_label.configure(bg=self.hover_color)
def on_leave(self, event=0):
for part in self.canvas_fg_parts:
self.canvas.itemconfig(part, fill=self.fg_color, width=0)
if self.text_label is not None:
# change background color of image_label
self.text_label.configure(bg=self.fg_color)
if self.image_label is not None:
# change background color of image_label
self.image_label.configure(bg=self.fg_color)
def clicked(self, event=0):
if self.function is not None:
self.function()
self.on_leave() | [
"zawhtetz691@gmail.com"
] | zawhtetz691@gmail.com |
9dd49b3cf82fa3b52f4bc3b9c1514bcf1c23dca0 | 63ba933a294865f65409635f62e0f1d59f725f37 | /src/trees/isBalanced.py | 7ecb0495d36c1aecf3938a94d2007c4730bf1f19 | [
"CC0-1.0"
] | permissive | way2arun/datastructures_algorithms | fc4302bdbb923ef8912a4acf75a286f2b695de2a | 4ea4c1579c28308455be4dfa02bd45ebd88b2d0a | refs/heads/master | 2021-12-07T04:34:35.732026 | 2021-09-30T12:11:32 | 2021-09-30T12:11:32 | 203,658,808 | 1 | 0 | null | 2020-08-08T15:55:09 | 2019-08-21T20:23:46 | Python | UTF-8 | Python | false | false | 1,995 | py | """
Balanced Binary Tree
Given a binary tree, determine if it is height-balanced.
For this problem, a height-balanced binary tree is defined as:
a binary tree in which the left and right subtrees of every node differ in height by no more than 1.
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: true
Example 2:
Input: root = [1,2,2,3,3,null,null,4,4]
Output: false
Example 3:
Input: root = []
Output: true
Constraints:
The number of nodes in the tree is in the range [0, 5000].
-104 <= Node.val <= 104
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
# Solution 1 - 48 ms
#return self.dfs(root)[1]
# Solution 2 - 28 ms
h, is_b = self.helper(root)
return is_b
def helper(self, root):
if root is None:
return 0, True
hl, lb = self.helper(root.left)
hr, rb = self.helper(root.right)
if lb and rb and abs(hl - hr) <= 1:
return max(hl, hr) + 1, True
else:
return -1, False
def dfs(self, root): # return (depth, isBalance)
if root is None:
return 0, True
leftH, leftB = self.dfs(root.left) # left height, left balance
rightH, rightB = self.dfs(root.right) # right height, right balance
return max(leftH, rightH) + 1, abs(leftH - rightH) <= 1 and leftB and rightB
# Main Call
root = TreeNode(3)
root.left = TreeNode(9)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(7)
solution = Solution()
print(solution.isBalanced(root))
root = TreeNode(1)
root.right = TreeNode(2)
root.left = TreeNode(2)
root.left.right = TreeNode(3)
root.left.left = TreeNode(3)
root.left.left.right = TreeNode(4)
root.left.left.left = TreeNode(4)
print(solution.isBalanced(root))
| [
"way2aru@yahoo.com"
] | way2aru@yahoo.com |
4e8a6e2e759799da23d684e7ed113a3372cb6320 | a41d781cad9af29a07da3cd4713bde2160da8f0f | /imgs2LMDB.py | 494c01826a983c0cf19659b0d9960d71392af20d | [
"MIT"
] | permissive | johnnylili/AdvancedEAST-PyTorch | cee11bea353071806c21d62315747981893b9c35 | a835c8cedce4ada1bc9580754245183d9f4aaa17 | refs/heads/master | 2022-12-04T08:51:57.823992 | 2020-08-22T18:38:29 | 2020-08-22T18:38:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,173 | py | """ a modified version of CRNN torch repository https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py """
# import fire
import os
import lmdb
import cv2
# import imageio
import cfg
from PIL import Image, ImageDraw
from tqdm import tqdm
import numpy as np
from preprocess import preprocess
from label import shrink, point_inside_of_quad, point_inside_of_nth_quad
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.frombuffer(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
# img = imageio.imread(imageBuf)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(gtFile, outputPath, checkValid=True, map_size=8589934592):
"""
Create LMDB dataset for training and evaluation.
ARGS:
inputPath : input folder path where starts imagePath
outputPath : LMDB output path
gtFile : list of image path and label
checkValid : if true, check the validity of every image
"""
os.makedirs(outputPath, exist_ok=True)
env = lmdb.open(outputPath, map_size=map_size) # 85899345920/8Gb
cache = {}
cnt = 1
gtFile = os.path.join(cfg.data_dir, gtFile)
with open(gtFile, 'r', encoding='gbk') as data:
datalist = data.readlines()
nSamples = len(datalist)
width_height = datalist[0].strip('\n').split(',')[-1] # 图片尺寸
for i in range(nSamples):
print(datalist[i])
imagePath_name = datalist[i].strip('\n').split(',')[0]
imagePath = os.path.join(cfg.data_dir, cfg.train_image_dir_name, imagePath_name)
labelPath = os.path.join(cfg.data_dir, cfg.train_label_dir_name, imagePath_name[:-4]+'_gt.npy')
label = np.load(labelPath)
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
try:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
except(Exception):
print('error occured', i)
with open(outputPath + '/error_image_log.txt', 'a') as log:
log.write('%s-th image data occured error\n' % str(i))
continue
imageKey = 'image-%09d'.encode() % cnt
labelKey = 'label-%09d'.encode() % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'.encode()] = str(nSamples).encode()
cache['width-height'.encode()] = str(width_height).encode()
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
def directCreateDataset(gtFile, outputPath, checkValid=True, map_size=8589934592, data_dir=cfg.data_dir):
"""
Create LMDB dataset for training and evaluation.
ARGS:
inputPath : input folder path where starts imagePath
outputPath : LMDB output path
gtFile : list of image path and label
checkValid : if true, check the validity of every image
"""
os.makedirs(outputPath, exist_ok=True)
env = lmdb.open(outputPath, map_size=map_size) # 85899345920/8Gb
cache = {}
cnt = 1
gtFile = os.path.join(data_dir, gtFile)
with open(gtFile, 'r') as data:
f_list = data.readlines()
nSamples = len(f_list)
for line, _ in zip(f_list, tqdm(range(nSamples))):
print('第{}张图片:{}'.format(cnt, f_list[cnt - 1]))
line_cols = str(line).strip().split(',')
img_name, width, height = \
line_cols[0].strip(), int(line_cols[1].strip()), \
int(line_cols[2].strip())
gt = np.zeros((height // cfg.pixel_size, width // cfg.pixel_size, 7))
train_label_dir = os.path.join(data_dir, cfg.train_label_dir_name) # 'labels_%s/' % train_task_id
xy_list_array = np.load(os.path.join(train_label_dir, img_name[:-4] + '.npy')) # (N, 4, 2)
train_image_dir = os.path.join(data_dir, cfg.train_image_dir_name)
if not os.path.exists(os.path.join(train_image_dir, img_name)):
print('%s does not exist' % os.path.join(train_image_dir, img_name))
continue
# ---------------------------------生成标签---------------------------------
with Image.open(os.path.join(train_image_dir, img_name)) as im:
draw = ImageDraw.Draw(im)
for xy_list in xy_list_array:
_, shrink_xy_list, _ = shrink(xy_list, cfg.shrink_ratio)
shrink_1, _, long_edge = shrink(xy_list, cfg.shrink_side_ratio)
p_min = np.amin(shrink_xy_list, axis=0)
p_max = np.amax(shrink_xy_list, axis=0)
# floor of the float
ji_min = (p_min / cfg.pixel_size - 0.5).astype(int) - 1
# +1 for ceil of the float and +1 for include the end
ji_max = (p_max / cfg.pixel_size - 0.5).astype(int) + 3
imin = np.maximum(0, ji_min[1])
imax = np.minimum(height // cfg.pixel_size, ji_max[1])
jmin = np.maximum(0, ji_min[0])
jmax = np.minimum(width // cfg.pixel_size, ji_max[0])
for i in range(imin, imax):
for j in range(jmin, jmax):
px = (j + 0.5) * cfg.pixel_size
py = (i + 0.5) * cfg.pixel_size
if point_inside_of_quad(px, py, shrink_xy_list, p_min, p_max):
gt[i, j, 0] = 1
line_width, line_color = 1, 'red'
ith = point_inside_of_nth_quad(px, py,
xy_list,
shrink_1,
long_edge)
vs = [[[3, 0], [1, 2]], [[0, 1], [2, 3]]]
if ith in range(2):
gt[i, j, 1] = 1
if ith == 0:
line_width, line_color = 2, 'yellow'
else:
line_width, line_color = 2, 'green'
gt[i, j, 2:3] = ith
gt[i, j, 3:5] = \
xy_list[vs[long_edge][ith][0]] - [px, py]
gt[i, j, 5:] = \
xy_list[vs[long_edge][ith][1]] - [px, py]
draw.line([(px - 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size,
py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size,
py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size,
py - 0.5 * cfg.pixel_size)],
width=line_width, fill=line_color)
act_image_dir = os.path.join(cfg.data_dir, cfg.show_act_image_dir_name)
if cfg.draw_act_quad:
im.save(os.path.join(act_image_dir, img_name))
# train_label_dir = os.path.join(data_dir, cfg.train_label_dir_name) # 'labels_%s/' % train_task_id
# np.save(os.path.join(train_label_dir, img_name[:-4] + '_gt.npy'), gt)
imagePath = os.path.join(cfg.data_dir, cfg.train_image_dir_name, img_name)
label = gt
# ---------------------------写入LMDB---------------------------
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
try:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
except(Exception):
print('error occured', i)
with open(outputPath + '/error_image_log.txt', 'a') as log:
log.write('%s-th image data occured error\n' % str(i))
continue
imageKey = 'image-%09d'.encode() % cnt
labelKey = 'label-%09d'.encode() % cnt
gt_xy_list_Key = 'gt_xy_list-%09d'.encode() % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
cache[gt_xy_list_Key] = xy_list_array
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'.encode()] = str(nSamples).encode()
cache['width-height'.encode()] = str(width).encode()
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
def genData():
if not os.path.exists(os.path.join(cfg.data_dir, cfg.val_fname)):
preprocess()
mapsize_256 = 2.6e8
train_mapsize = (int(cfg.train_task_id[-3:]) / 256)**2 * mapsize_256 * 1.3
val_mapsize = train_mapsize // 10
directCreateDataset(cfg.train_fname, cfg.lmdb_trainset_dir_name, checkValid=True, map_size=train_mapsize)
directCreateDataset(cfg.val_fname, cfg.lmdb_valset_dir_name, checkValid=True, map_size=val_mapsize)
if __name__ == "__main__":
genData()
| [
"noreply@github.com"
] | noreply@github.com |
682ff3d690856f794b02b0de622319a91a49cbda | db373aaec7f164138bafb97eb6798ac23ed6db3a | /src/env/lib/python3.6/sre_constants.py | 3e09693e04ab823b242d8416c00275a4c2123435 | [] | no_license | justinkwan20/fora | a0b2f4d2a383cba1797980cf1e1cf6846bbc895b | 427b24ebef978bb103de6df50c71fe01c8723cc7 | refs/heads/master | 2020-03-13T10:11:35.321752 | 2018-04-22T15:59:35 | 2018-04-22T15:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | /home/iceman/anaconda3/lib/python3.6/sre_constants.py | [
"wpine215@gmail.com"
] | wpine215@gmail.com |
468476f6db7243c6bbaa9620e0a6d5e4ea3e22ff | 110c5310346e0db4ea399c6a553d75fe3fbf5bcd | /test/sagemaker_tests/tensorflow/tensorflow1_training/resources/gpu_device_placement.py | 11bbcdff72b00abf1b5610d0b709da216dae7a9a | [
"Apache-2.0"
] | permissive | Huahan98/deep-learning-containers | dc5f3391f4099326c8402832f87cc3c4bda86cc8 | 1510b917ebfb24a3bfb744e4590d46ba78657392 | refs/heads/master | 2023-09-03T13:29:32.142167 | 2021-11-10T17:42:43 | 2021-11-10T17:42:43 | 285,139,382 | 1 | 0 | Apache-2.0 | 2020-08-05T00:57:54 | 2020-08-05T00:57:53 | null | UTF-8 | Python | false | false | 1,031 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import tensorflow as tf
# https://www.tensorflow.org/programmers_guide/using_gpu
print('-' * 87)
print('Run GPU test.')
with tf.device('/gpu:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
print('-' * 87)
print('')
| [
"saravsak@amazon.com"
] | saravsak@amazon.com |
1ad1cdf4c211d1ad2cfc0e6db523776b6a91d5d7 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/initial_859.py | a64a24fccf2efc8b865aa813310e625203f34f62 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,330 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((740, 588, 378), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((934, 253, 192), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((18, 558, 379), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((553, 818, 131), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((756, 296, 36), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((816, 91, 319), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((649, 924, 860), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((341, 421, 253), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((623, 816, 736), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((88, 643, 970), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((302, 317, 967), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((635, 925, 161), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((490, 53, 130), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((933, 833, 769), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((362, 701, 371), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((190, 600, 839), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((310, 511, 365), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((295, 883, 14), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((639, 840, 123), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((761, 18, 329), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((107, 498, 442), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
64728e5c76187cf4177e6d19c48c73b797430c05 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /perso_arabic_norm/describe_splits.py | 70788b7f48dbdc399d1fcc680fe3b99a08017009 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 2,067 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Collects basic stats for training and test splits from the results file.
Example:
--------
LANGUAGE=...
cat data/ngrams/results/reading/00/baselines/${LANGUAGE}.*.tsv > /tmp/${LANGUAGE}.tsv
python describe_splits.py \
--results_tsv_file /tmp/${LANGUAGE}.tsv
Dependencies:
-------------
absl
pandas
statsmodels
"""
from typing import Sequence
import logging
from absl import app
from absl import flags
import pandas as pd
import statsmodels.stats.api as sms
flags.DEFINE_string(
"results_tsv_file", "",
"Results text file in tab-separated (tsv) format.")
FLAGS = flags.FLAGS
def _to_str(stats):
"""Retrieves basic stats from the object."""
return f"mean: {stats.mean} var: {stats.var} std: {stats.std}"
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if not FLAGS.results_tsv_file:
raise app.UsageError("Specify --results_tsv_file [FILE]!")
logging.info(f"Reading metrics from {FLAGS.results_tsv_file} ...")
df = pd.read_csv(FLAGS.results_tsv_file, sep="\t", header=None)
logging.info(f"Read {df.shape[0]} samples")
num_train_toks = list(df[0]) # Token can be char or word.
train_stats = sms.DescrStatsW(num_train_toks)
logging.info(f"Train stats: {_to_str(train_stats)}")
num_test_toks = list(df[1])
test_stats = sms.DescrStatsW(num_test_toks)
logging.info(f"Test stats: {_to_str(test_stats)}")
if __name__ == "__main__":
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
bda313940f2dc1b4bee68c1e809d1c719d15b34b | b74cc40a3a3bfa57437e2038db4e35942e0a2b48 | /ProjectEuler/Python/p21.py | c5780d3870f64497a4923886f26515e873016516 | [] | no_license | projectPythonator/portfolio | 4a79492c1e8237b7aa0fc427ac3085941b9115c8 | 5d96c7f7c07e06e1653c25e818e0e299aaf6efb0 | refs/heads/master | 2022-03-07T06:32:15.416337 | 2022-02-22T22:15:36 | 2022-02-22T22:15:36 | 149,538,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,306 | py |
#author agis daniels
#Evaluate the sum of all the amicable numbers under 10000.
from math import sqrt
def sum1(n):
return 1 + sum([i for i in range(2, n-1) if 0 == n%i])
def sum2(n):
if 0 == n:
return 0
ans = step = 1
f = 2
lim = int(sqrt(n))
if lim*lim == n:
ans += lim
lim -= 1
if 0 != n%2:
f = 3
step = 2
for i in range(f, lim+1, step):
if 0 == n%i:
ans += i + (n//i)
return ans
def sum3(n):
ans = 1
p = 2
if 0 == n%p:
j = p*p
n //= p
while 0 == n%p:
j *= p
n //= p
ans *= j-1
ans //= p-1
if 2 == p:
p = 3
while p*p <= n and 1 < n:
if 0 == n%p:
j = p*p
n //= p
while 0 == n%p:
j *= p
n //= p
ans *= j-1
ans //= p-1
p += 2
if 1 < n:
ans *= n + 1
return ans
def helper3(n):
return sum3(n) - n
def sol1(n):
tot = 0
for a in range(1, n):
print(a)
for b in range(a+1, n):
if helper3(b) == a and helper3(a) == b:
tot += a + b
return tot
def sol2(n):
tot = 0
for a in range(2, n):
b = helper3(a)
if b>a:
if helper3(b) == a:
tot += a + b
return tot
def main():
#print("the answer from sol one is {}".format(sol1(9999)))
print("the answer from sol two is {}".format(sol2(9999)))
main()
| [
"noreply@github.com"
] | noreply@github.com |
9c833ca200046fae0f5390f5be87fd27f4d4812c | 1d22fade6916bdff3a5843a94cfcd0bf988947cd | /String/Longest Common Prefix.py | 5d94158496887607df2d5744afa541c5ad24127a | [] | no_license | Anishukla/InterviewBit | aafc75b1f0d8277b89eeeed21272a298dfdd0898 | 5a8d8a8f7b70ef64032ce237e8ff4db106d9738f | refs/heads/master | 2022-11-12T03:13:15.258421 | 2020-07-02T06:42:30 | 2020-07-02T06:42:30 | 263,426,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | class Solution:
# @param A : list of strings
# @return a strings
def longestCommonPrefix(self, A):
if len(A) == 0:
return ''
first_letter = A[0]
longest_prefix = ''
for letter in range(1, len(first_letter)+1):
prefix = first_letter[0:letter]
flag = True
for word in A:
if (word.startswith(prefix)):
pass
else:
flag = False
if flag:
longest_prefix = prefix
else:
break
return(longest_prefix)
| [
"shuklaneesh@gmail.com"
] | shuklaneesh@gmail.com |
a0ead35d36d493c9845a43942f9861d6ba38d077 | 5ed2eb094c75da388af64247f77203d1fd825476 | /visualizacionProba.py | 37f847c81b0b7e106c50414096db4a9511447019 | [] | no_license | aitorgolla10/textmining | acee8a4c3359270c40f8354d61f276acf642875b | 9a31a53e8751697864a1ead38218b873e6f4108e | refs/heads/master | 2022-11-05T08:16:09.630957 | 2019-11-17T22:38:14 | 2019-11-17T22:38:14 | 215,301,062 | 1 | 2 | null | 2022-10-10T06:44:55 | 2019-10-15T13:06:06 | Python | UTF-8 | Python | false | false | 96 | py | from visualizacion import Visualizacion as visualizar
visualizar.visualizarClusters(visualizar) | [
"agomez257@ikasle.ehu.eus"
] | agomez257@ikasle.ehu.eus |
3f3687f5bc5b251a3f3dfd1ab222f022f96e1572 | f4f4ed24cda0a6febac02c93e023084056235830 | /week13/onlineshop/onlineshop/settings.py | 7df3ac64996c4d3f5fc64b5e01a0a29743b1c95d | [] | no_license | madinamantay/BFDjango | 36163f2f5bf53e6a2ed7a244402c14d9bc5746eb | a364a5a16ecad65f09de4af20bdee0791d4a212f | refs/heads/master | 2022-06-14T09:30:06.630511 | 2020-04-27T03:26:51 | 2020-04-27T03:26:51 | 237,785,225 | 0 | 0 | null | 2022-05-25T03:19:28 | 2020-02-02T14:41:51 | Python | UTF-8 | Python | false | false | 5,628 | py | """
Django settings for onlineshop project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import datetime
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lc3@87_dbpy0p!5$0+hl41pf$0w)ef3&v1#eg2yh$py%do)$#q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_jwt',
'auth_',
'api',
]
AUTH_USER_MODEL = 'auth_.MyUser'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'onlineshop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'onlineshop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
JWT_AUTH = {
'JWT_ENCODE_HANDLER':
'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER':
'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER':
'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_RESPONSE_PAYLOAD_HANDLER':
'rest_framework_jwt.utils.jwt_response_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_GET_USER_SECRET_KEY': None,
'JWT_PUBLIC_KEY': None,
'JWT_PRIVATE_KEY': None,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=10),
'JWT_AUDIENCE': None,
'JWT_ISSUER': None,
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
'JWT_AUTH_COOKIE': None,
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s -- %(asctime)s: %(message)s',
},
'main': {
'format': '%(levelname)s -- %(message)s'
}
},
'handlers': {
'file_handler': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'test.log',
'formatter': 'verbose'
},
'console_handler': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'main'
}
},
'loggers': {
'api': {
'handlers': ['file_handler', 'console_handler'],
'level': 'DEBUG',
},
},
}
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"madinamantay@gmail.com"
] | madinamantay@gmail.com |
fc3b24f8e844f358d301887f327d6eefee482f83 | 438bd0cbd5a7e2a42c27564f0a45d78471d355ef | /Testing.py | ec1ed9769da0ceb97c0bdb19366b8844c0cd0d24 | [
"MIT"
] | permissive | Choco31415/DFTileConverter | bbfb92bc591ab1d726591c3e57784a0557fa6c8b | a228a558ac3c8dd1b60e48d549ad63fa47e39ac9 | refs/heads/master | 2020-03-25T07:54:47.120068 | 2018-08-06T17:59:53 | 2018-08-06T17:59:53 | 143,587,858 | 4 | 0 | MIT | 2018-08-06T03:54:48 | 2018-08-05T06:05:30 | Python | UTF-8 | Python | false | false | 3,585 | py | """
Need testing.
"""
# Handle imports
import unittest
from LoadTilesets import image_to_array, get_tileset_by_id, get_id_of_tileset, get_tileset
from Convert import detect_tileset, check_tileset_all_offsets, get_tile_ids
import numpy as np
import json
import os
# Define constants
test_screenshots = [os.path.join("resources", "screenshots", "Image_Vidumec15x15b.png"),
os.path.join("resources", "screenshots", "Image_Anikki8x8.png")]
actual_tileset_names = ["Vidumec", "Anikki_square_8x8"]
actual_offsets = [(9, 12), (6, 4)]
actual_tile_id_files = [os.path.join("resources", "test", "Image_Vidumec15x15b_tile_ids"),
os.path.join("resources", "test", "Image_Anikki8x8_tile_ids")]
# Setup
# Define methods
class TestLoadTilesetMethods(unittest.TestCase):
def test_image_to_array(self):
tileset = image_to_array("resources/test/Curses 640x300diag.png")
assert (tileset == np.load("resources/test/Curses.npy")).all()
class TestConvertMethods(unittest.TestCase):
def test_detect_tileset0(self):
self.general_test_tileset(0)
def test_detect_tileset1(self):
self.general_test_tileset(1)
def general_test_tileset(self, i):
global test_screenshots, actual_tileset_names, actual_offsets
test_screenshot = test_screenshots[i]
actual_tileset_name = actual_tileset_names[i]
actual_offset = actual_offsets[i]
image = image_to_array(test_screenshot)[:,:,:3]
tileset_id, offset = detect_tileset(image)
assert (tileset_id == get_id_of_tileset(actual_tileset_name)), "Detected tileset id: {}, name: {}".format(tileset_id, get_tileset_by_id(tileset_id)["local_filename"])
assert (tuple(offset) == actual_offset), "Detected offset: {}".format(offset)
def test_check_tileset_all_offsets0(self):
self.general_test_check_tileset_all_offsets(0)
def test_check_tileset_all_offsets1(self):
self.general_test_check_tileset_all_offsets(1)
def general_test_check_tileset_all_offsets(self, i):
global test_screenshots, actual_tileset_names, actual_offsets
actual_tileset_name = actual_tileset_names[i]
actual_tileset = get_tileset(actual_tileset_name)
actual_offset = actual_offsets[i]
test_screenshot = test_screenshots[i]
image = image_to_array(test_screenshot)[:, :, :3]
subset = image[
0: 4 * actual_tileset["shape"][0],
0: 4 * actual_tileset["shape"][1],
]
_, offset = check_tileset_all_offsets(subset, actual_tileset, [3, 3])
assert (offset == actual_offset), "Detected offset: {}".format(offset)
def test_get_tile_ids0(self):
self.general_test_get_tile_ids(0)
def test_get_tile_ids1(self):
self.general_test_get_tile_ids(1)
def general_test_get_tile_ids(self, i):
global test_screenshots, actual_tileset_names, actual_offsets, actual_tile_id_files
actual_tileset_name = actual_tileset_names[i]
actual_tileset = get_tileset(actual_tileset_name)
actual_offset = actual_offsets[i]
with open(actual_tile_id_files[i], "r") as f:
actual_tile_ids = json.loads(f.read())
test_screenshot = test_screenshots[i]
image = image_to_array(test_screenshot)[:, :, :3]
cropped_image = image[actual_offset[0]:,actual_offset[1]:]
tile_ids = get_tile_ids(cropped_image, actual_tileset)
assert (tile_ids.tolist() == actual_tile_ids), "Found tile ids: {}".format(tile_ids.tolist()) | [
"parke.26@osu.edu"
] | parke.26@osu.edu |
656400a9a3c0238586b3bc67900a8c9c266c3cfb | 5891051796778cfb44a255248ce38789bfef9e70 | /DjangoLearn/bgfaith/urls.py | 2fae220197d9f146c5fbb61d9e5154182b10d282 | [] | no_license | Faithlmy/Python_base | cc546a5d86b123e102a69df1227cde9b6e567493 | 5a43557e6375dc9dbe5f6701d7c10e549873a5ab | refs/heads/master | 2021-01-01T17:07:04.097978 | 2018-03-31T16:44:01 | 2018-03-31T16:44:01 | 98,000,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | """bgfaith URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from bgapp.views import *
urlpatterns = [
# url(r'^admin/', include(admin.site.urls)),
url('^bgapp/', include('bgapp.urls', namespace='', app_name=''))
]
| [
"lmengyy@126.com"
] | lmengyy@126.com |
a66628917e9d57bacaf3bbfd12e66540e5012968 | 5eb685241d6ad86848b56fcefb525a24acdd77d6 | /controllers/__init__.py | 71c1f604183bb0ebfc5b05480a246d5baa56714c | [] | no_license | kForth/SubsystemAccelerationSimulator | d95fe96f159eb752751cbd956f905afe154358f9 | fb35b44f9e1262dad412519907204ec1bbfda838 | refs/heads/master | 2021-05-06T19:58:00.584809 | 2018-02-16T00:01:25 | 2018-02-16T00:01:25 | 112,236,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from controllers.bang_bang import BangBangController
from controllers.pidf import PidfController
| [
"kgoforth1503@gmail.com"
] | kgoforth1503@gmail.com |
ad55a036719eab54161bb16e9344fa465842a9b0 | 003ffcf8144565404636f3d74590a8d6b10a90a4 | /620-not-boring-movies/620-not-boring-movies.py | 649086294562ebc1cd5148e624db643e5a39e3ab | [] | no_license | congve1/leetcode | fb31edf93049e21210d73f7b3e7b9b82057e1d7a | ce1e802b5052da2cdb919d6d7e39eed860e0b61b | refs/heads/master | 2020-05-13T19:19:58.835432 | 2019-05-06T00:44:07 | 2019-05-06T00:44:07 | 181,652,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | # Write your MySQL query statement below
select id,movie,description,rating
from cinema
where description != 'boring' and id%2 = 1
order by rating DESC
| [
"congve1@live.com"
] | congve1@live.com |
5a783ed4e3f6c0a37f168ca46e333d7a5b023fac | 180201955dc2a1902f17e4e38715f2e8e78fd703 | /venv/Scripts/pip-script.py | bd6c4e62d876474585c9e4df096fbec258773e9c | [] | no_license | psruti/flask_song_catalogue | 23ccad69059a99251270004469e35ab851d44e7e | dcce6ea28870d4df059717296b2829f2920837cc | refs/heads/master | 2021-06-27T12:40:44.030859 | 2019-10-30T16:27:59 | 2019-10-30T16:27:59 | 218,568,615 | 0 | 0 | null | 2021-03-20T02:05:05 | 2019-10-30T16:09:01 | Python | UTF-8 | Python | false | false | 416 | py | #!C:\Users\sruti\PycharmProjects\book_catalog\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"peri.sruti@gmail.com"
] | peri.sruti@gmail.com |
cdb8d762485a925f85def4f00a98dd3df6ff07fb | 92b3b091ee21d7310cf0e6569ac9b1fe057efe22 | /see_tunning.py | 333314be28bf943e882c3fb882d3c482f7d6183b | [] | no_license | chenhao1umbc/WSCDL | 3c33476328f708c16fb3596dc4adda3703cd0121 | 7f3847b2ca932289126bab5af8454eef7239f048 | refs/heads/master | 2021-06-13T13:57:48.912084 | 2021-04-07T21:14:08 | 2021-04-07T21:14:08 | 186,867,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,087 | py | """This is the main file to run Weakly supervised supervised dictionary learning
The default data type is torch.tensor with precision float32
"""
#%%
from utils2 import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
opts = OPT(C=10, K0=1, K=2)
opts.init, opts.shuffle, opts.show_details = 'rand', False, True
opts.Dh, opts.Dw, opts.batch_size = 100, 29, -1
opts.lamb, opts.lamb0, opts.eta, opts.mu = 0.1, 0.1, 0.01, 0.1 #sparsity, label, low rank
# %% analysis result
record = torch.load('tunning_rough.pt')
n = len(record)
res = torch.rand(n, 3)
param = torch.rand(n,5)
for i,v in enumerate(record):
res[i] = torch.tensor(v[0]) # [n, acc, recall, f1]
param[i] = torch.tensor(v[1]) # [n, Dw, lamb, lamb0, eta, mu]
value, index = res.max(0)
print('max acc, recall, f1, vlaues :', value, '\n')
for i, v in enumerate(index):
print(f"max {['acc', 'recall', 'f1'][i]} index and vlaues :", res[v])
print([v], param[v], '\n')
# a function of given parameters to return the result tensors
def get_result(res, param, Dw=0, lamb=0, lamb0=0, eta=0, mu=0):
""" if Dw, lamb etc. is 0, that means coresponding column are all selected
otherwise Dw etc. is a value from its pool,e.g.
pool_Dw = [ 7., 15., 21., 25., 29., 35., 45.]
Dw = 7. or Dw=15.
param has the shape of [n_record, 5]
each of the 5 columns means [Dw, lamb, lamb0, eta, mu]
res has the shape of [[n_record, 5]]
each of the 3 columns means [acc, recall, F1]
"""
# find the indecies of given param
n = param.shape[0]
idx = torch.arange(n)
if Dw!=0 :
res_ind = idx[param[:,0] == Dw]
else:
res_ind = idx.clone()
if lamb!=0: res_ind = np.intersect1d(idx[param[:,1] == lamb], res_ind)
if lamb0 !=0: res_ind = np.intersect1d(idx[param[:,2] == lamb0], res_ind)
if eta !=0: res_ind = np.intersect1d(idx[param[:,3] == eta], res_ind)
if mu !=0: res_ind = np.intersect1d(idx[param[:,4] == mu], res_ind)
return res[res_ind], res_ind
"Dw=29, lamb=0.1, lamb0=0.1, eta=0.01, mu=0.1"
"Dw=21, lamb=0.1, lamb0=1, eta=0.001, mu=1"
"Dw=21, lamb=0.1, lamb0=0.1, eta=0.001, mu=1"
r, idx = get_result(res, param, Dw=21, lamb=0.1, lamb0=0.1, eta=0.001, mu=1)
print(r)
print(param[idx])
for i in range(5):
if param[idx][:, i].unique().shape[0] >1 :
which_is_0 = i
break
fig = plt.figure()
fig.set_size_inches(w=6, h=4)
index = param[idx][:, which_is_0].sort()[1]
plt.plot(param[idx][:, which_is_0].sort()[0], r[:, -1][index], '-x')
#%%
record = torch.load('tunning.pt')
res = torch.zeros(80, 3)
para = torch.zeros(80, 5)
for i,r in enumerate(record):
res[i] = torch.tensor(r[0])
para[i] = torch.tensor(r[1])
res = res.reshape(5, 16, -1).mean(0)
para = para.reshape(5, 16, -1).mean(0)
v, i = res.sort()
"Dw=29, lamb=0.1, lamb0=0.1, eta=0.001, mu=0.1 is the best"
# %% compare with others' result
route = '/home/chenhao1/Matlab/WSCDL/'
# res = sio.loadmat(route+'res_knn.mat')
# res = res['Pre_Labels']
# res[res==-1]=0
# res = res.T
# metrics.f1_score(Y_test.cpu().flatten(), res.flatten())
with open(route+'you_raich_0.txt') as f:
data =f.readlines()
rec = []
prec = []
count = 0
for i, d in enumerate(data):
if d == 'rec =\n':
rec.append(float(data[i+2][4:10]))
count += 1
if d == 'prec =\n':
prec.append(float(data[i+2][4:10]))
if count == 10:
print(rec[-1])
print(prec[-1])
rec, prec = torch.tensor(rec), torch.tensor(prec)
f1 = 2/(1/rec+1/prec)
v, i = f1.sort()
# best lamb, winzize, N
# 10, 30, 200
# 10, 100, 10
# 10, 50, 50
#%% visualize learned atoms
param = str([opts.K, opts.K0, opts.Dw, opts.lamb, opts.lamb0, opts.eta , opts.mu])
D, D0, S, S0, W, opts, loss = \
torch.load('../saved_dicts/'+param+'DD0SS0Woptsloss.pt', map_location='cpu')
for i in range(10):
fig= plt.figure()
fig.set_size_inches(w=4, h=6)
d = D[i].permute(0,2,1).reshape(opts.Dh, opts.K*opts.Dw).cpu()
plt.imshow(d, aspect='auto', interpolation='None')
plt.title(f'Class {i} atoms')
# %% | [
"chenhao1@umbc.edu"
] | chenhao1@umbc.edu |
844f97b190cde2823b75910dc0064c3c08c8298b | cf6e1c6c27d269793e57119e1f86fec8533042f0 | /Day4/hemant_q1.py | 49895b2672d9529ea7247a990f4b22f0cd29f539 | [] | no_license | rashidkhalid1212/Blend_with_python | 621fea1668ded0d006649d97c5578bdbe902d79c | 0a829dbdf12fc8d3801fc3327bd23a275c9b32b6 | refs/heads/main | 2023-07-02T06:14:01.045457 | 2020-11-04T06:36:34 | 2020-11-04T06:36:34 | 308,340,768 | 0 | 0 | null | 2020-10-29T13:42:13 | 2020-10-29T13:42:12 | null | UTF-8 | Python | false | false | 359 | py | a=float(input("Enter 1st Number: "))
b=float(input("Enter 2nd number: "))
c=input("What type calculations you want to do?\n1. Type 'm' for Multiply\n2. Type 'd' for division\n3. Type 'a' for addition\n4. Type 's' for substraction. ")
if(c=='m'):
print(a*b)
elif(c=='d'):
print(a/b)
elif(c=='a'):
print(a+b)
elif(c=='s'):
print(a-b)
else:
print (-1) | [
"ksingh1617@gmail.com"
] | ksingh1617@gmail.com |
6c4072c302692caf0fc1eefcb3cc828a7315e998 | e56214188faae8ebfb36a463e34fc8324935b3c2 | /intersight/models/hyperflex_server_model_all_of.py | 00b213f2b14b684bad926afd34710dd805f3dc75 | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 5,600 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class HyperflexServerModelAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'server_model_entries': 'list[HyperflexServerModelEntry]',
'app_catalog': 'HyperflexAppCatalog'
}
attribute_map = {
'server_model_entries': 'ServerModelEntries',
'app_catalog': 'AppCatalog'
}
def __init__(self,
server_model_entries=None,
app_catalog=None,
local_vars_configuration=None): # noqa: E501
"""HyperflexServerModelAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._server_model_entries = None
self._app_catalog = None
self.discriminator = None
if server_model_entries is not None:
self.server_model_entries = server_model_entries
if app_catalog is not None:
self.app_catalog = app_catalog
@property
def server_model_entries(self):
"""Gets the server_model_entries of this HyperflexServerModelAllOf. # noqa: E501
:return: The server_model_entries of this HyperflexServerModelAllOf. # noqa: E501
:rtype: list[HyperflexServerModelEntry]
"""
return self._server_model_entries
@server_model_entries.setter
def server_model_entries(self, server_model_entries):
"""Sets the server_model_entries of this HyperflexServerModelAllOf.
:param server_model_entries: The server_model_entries of this HyperflexServerModelAllOf. # noqa: E501
:type: list[HyperflexServerModelEntry]
"""
self._server_model_entries = server_model_entries
@property
def app_catalog(self):
"""Gets the app_catalog of this HyperflexServerModelAllOf. # noqa: E501
:return: The app_catalog of this HyperflexServerModelAllOf. # noqa: E501
:rtype: HyperflexAppCatalog
"""
return self._app_catalog
@app_catalog.setter
def app_catalog(self, app_catalog):
"""Sets the app_catalog of this HyperflexServerModelAllOf.
:param app_catalog: The app_catalog of this HyperflexServerModelAllOf. # noqa: E501
:type: HyperflexAppCatalog
"""
self._app_catalog = app_catalog
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HyperflexServerModelAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HyperflexServerModelAllOf):
return True
return self.to_dict() != other.to_dict()
| [
"ucs-build@github.com"
] | ucs-build@github.com |
b4ed9c7379c08e759c855bff60427fc27ad90ca0 | e1ec811a380d624b3c3c95c88f6369248862ca52 | /matplotlib/Zadanie9.py | 2aab46e9021017e92557088607c0a3aaab3c79e8 | [] | no_license | michalj11121/Wd-155280 | ac53e138de089d9a53fc287582052ccd9ed224a2 | 7eee6bf2334c39ddf0eb93a555df40f1c241ea1a | refs/heads/master | 2022-08-21T16:27:27.506633 | 2020-05-31T17:43:01 | 2020-05-31T17:43:01 | 245,125,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df= pd.read_csv("zamowienia.csv",header=0,delimiter=";")
group=df.groupby(['Sprzedawca']).agg({"idZamowienia":['count']})
sprzedawca=group.index.values
zamowienia=[group.values[y][0] for y in range(len(group.values))]
Explode=[0 for i in range(len(group.index.values))]
Explode[5]=0.1
def prepare_label(pct, br):
absolute = int(np.ceil(pct / 100. * np.sum(br)))
return "{:.1f}% \n({}/{})".format(pct, absolute, sum(zamowienia))
wedges, texts, autotexts = plt.pie(zamowienia,explode=Explode, shadow=True,labels=sprzedawca,
autopct=lambda pct: prepare_label(pct, zamowienia), textprops=dict(color="black"))
plt.setp(autotexts, size=8, weight="bold",rotation=-45)
plt.title(":)")
plt.legend(title='Sprzedawcy')
plt.show() | [
"noreply@github.com"
] | noreply@github.com |
ee7369e682c31cf9d17610f4c1b6a6cddd40b41f | 98ebd5d42c9badf37b377e83740317ab21d1b99a | /jogoForca.py | 94b9832da6c4597f3c1bbe5a2d19a77a61f82452 | [] | no_license | paola-rodrigues/JogoAdivinhacao | d21e4738a40219336c6af4c8461851e6d3291fb8 | b2a83f7d78f974cbe823ac2e5a0e42b64c0c0413 | refs/heads/main | 2023-08-17T04:14:43.556448 | 2021-10-01T12:47:56 | 2021-10-01T12:47:56 | 409,313,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,119 | py | import random
def imprime_mensagem_abertura():
print("**********************************")
print("****Bem-Vindo no jogo de Forca****!")
print("**********************************")
def carrega_palavra_secreta():
arquivo = open("palavras.txt", "r")
palavras = []
for linha in arquivo:
linha = linha.strip()
palavras.append(linha)
arquivo.close()
numero = random.randrange(0, len(palavras))
palavra_secreta = palavras[numero].upper()
return palavra_secreta
def inicializa_letras_acertada(palavra):
return["_" for letra in palavra]
def jogar():
imprime_mensagem_abertura()
palavra_secreta = carrega_palavra_secreta()
letras_acertadas = inicializa_letras_acertada(palavra_secreta)
enforcou = False
acertou = False
erros = 0
#enquato não enforcou e não acertou
while(not acertou and not enforcou):
chute = input ("Qual letra? ")
chute = chute.strip().upper()
if(chute in palavra_secreta):
index = 0
for letra in palavra_secreta:
if(chute.upper() == letra.upper()):
letras_acertadas[index] =letra
index += 1
else:
erros += 1
desenha_forca(erros)
enforcou = erros == 7
acertou = "_" not in letras_acertadas
print(letras_acertadas)
if(acertou):
imprime_mensagem_vencedor()
else:
imprime_mensagem_perdedor(palavra_secreta)
print("Fim do jogo")
def imprime_mensagem_vencedor():
print("Parabéns, você ganhou!")
print(" ___________ ")
print(" '._==_==_=_.' ")
print(" .-\\: /-. ")
print(" | (|:. |) | ")
print(" '-|:. |-' ")
print(" \\::. / ")
print(" '::. .' ")
print(" ) ( ")
print(" _.' '._ ")
print(" '-------' ")
def imprime_mensagem_perdedor(palavra_secreta):
print("Puxa, você foi enforcado!")
print("A palavra era {}".format(palavra_secreta))
print(" _______________ ")
print(" / \ ")
print(" / \ ")
print("// \/\ ")
print("\| XXXX XXXX | / ")
print(" | XXXX XXXX |/ ")
print(" | XXX XXX | ")
print(" | | ")
print(" \__ XXX __/ ")
print(" |\ XXX /| ")
print(" | | | | ")
print(" | I I I I I I I | ")
print(" | I I I I I I | ")
print(" \_ _/ ")
print(" \_ _/ ")
print(" \_______/ ")
def desenha_forca(erros):
print(" _______ ")
print(" |/ | ")
if(erros == 1):
print(" | (_) ")
print(" | ")
print(" | ")
print(" | ")
if(erros == 2):
print(" | (_) ")
print(" | \ ")
print(" | ")
print(" | ")
if(erros == 3):
print(" | (_) ")
print(" | \| ")
print(" | ")
print(" | ")
if(erros == 4):
print(" | (_) ")
print(" | \|/ ")
print(" | ")
print(" | ")
if(erros == 5):
print(" | (_) ")
print(" | \|/ ")
print(" | | ")
print(" | ")
if(erros == 6):
print(" | (_) ")
print(" | \|/ ")
print(" | | ")
print(" | / ")
if (erros == 7):
print(" | (_) ")
print(" | \|/ ")
print(" | | ")
print(" | / \ ")
print(" | ")
print("_|___ ")
print()
if (__name__ == "__main__"):
jogar()
| [
"paola.n.rodrigues@gmail.com"
] | paola.n.rodrigues@gmail.com |
e7f98660ed5a6c6d386e1f664e87461a5ba04dc1 | 93e65e85fade65db1705bf0f8958c2d1acc01bb2 | /dataset/lsun/data.py | 3c80c655391d48bce9839bd85d549c4910cc6bb6 | [] | no_license | SunJiamei/GANExercise | 530b9db7175c6bc18367abf63fd0a42c73674883 | cfc3ce66d204a03d7d60b5bd038a0fc3cd8f82b9 | refs/heads/master | 2023-04-09T05:02:52.238289 | 2021-04-21T03:26:58 | 2021-04-21T03:26:58 | 346,563,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,610 | py | from __future__ import print_function
import argparse
import cv2
import lmdb
import numpy
import os
from os.path import exists, join
__author__ = 'Fisher Yu'
__email__ = 'fy@cs.princeton.edu'
__license__ = 'MIT'
def view(db_path):
print('Viewing', db_path)
print('Press ESC to exist or SPACE to advance.')
window_name = 'LSUN'
cv2.namedWindow(window_name)
env = lmdb.open(db_path, map_size=1099511627776,
max_readers=100, readonly=True)
with env.begin(write=False) as txn:
cursor = txn.cursor()
for key, val in cursor:
print('Current key:', key)
img = cv2.imdecode(
numpy.fromstring(val, dtype=numpy.uint8), 1)
cv2.imshow(window_name, img)
c = cv2.waitKey()
if c == 27:
break
def export_images(db_path, out_dir, flat=False, limit=-1, resize=64):
print('Exporting', db_path, 'to', out_dir)
env = lmdb.open(db_path, map_size=1099511627776,
max_readers=100, readonly=True)
count = 0
with env.begin(write=False) as txn:
cursor = txn.cursor()
for key, val in cursor:
key = key.decode('utf-8')
# print(key)
if not flat:
image_out_dir = join(out_dir, '/'.join(key[:6]))
else:
image_out_dir = out_dir
if not exists(image_out_dir):
os.makedirs(image_out_dir)
image_out_path = join(image_out_dir, key + '.jpg')
img = cv2.imdecode(
numpy.fromstring(val, dtype=numpy.uint8), 1)
# with open(image_out_path, 'w') as fp:
# fp.write(val)
if resize>0:
img = cv2.resize(img, (resize,resize), interpolation=cv2.INTER_AREA)
cv2.imwrite(image_out_path, img)
count += 1
if count == limit:
break
if count % 1000 == 0:
print('Finished', count, 'images')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command', nargs='?', type=str,
choices=['view', 'export'],
help='view: view the images in the lmdb database '
'interactively.\n'
'export: Export the images in the lmdb databases '
'to a folder. The images are grouped in subfolders'
' determinted by the prefiex of image key.')
parser.add_argument('lmdb_path', nargs='+', type=str,
help='The path to the lmdb database folder. '
'Support multiple database paths.')
parser.add_argument('--out_dir', type=str, default='')
parser.add_argument('--flat', action='store_true',
help='If enabled, the images are imported into output '
'directory directly instead of hierarchical '
'directories.')
parser.add_argument('--resize', type=int, default=64, help='if larger than 0, then the images will be resized to '
'the shape (resize, resize)')
args = parser.parse_args()
command = args.command
lmdb_paths = args.lmdb_path
for lmdb_path in lmdb_paths:
if command == 'view':
view(lmdb_path)
elif command == 'export':
export_images(lmdb_path, args.out_dir, args.flat, limit=-1, resize=args.resize)
if __name__ == '__main__':
main()
| [
"sunjiamei.hit@gmail.com"
] | sunjiamei.hit@gmail.com |
28433f824005544abf60673f5477c97075e02b7a | e2e1e2c27f9a9c13e7492fca93080ea093af7ae5 | /COMP309/a4/Submission/part3/LinearRegression/src/simple_linear_regression.py | 1f21dfbd940de5bc03e7fb0645ba5a97bd368ff4 | [] | no_license | David-B-91/uni | 30ebd03ab065ef96b5cc5033bcac1cbb7a46cab9 | 341c1d5b85f30bdfdfd977627f16aa862a48e923 | refs/heads/master | 2022-03-21T17:01:18.540959 | 2019-11-21T06:33:05 | 2019-11-21T06:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,706 | py | # -*- coding: utf-8 -*-
"""
This is an example to perform simple linear regression algorithm on the dataset (weight and height),
where x = weight and y = height.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
import random
from sklearn.linear_model import LinearRegression
from utilities.losses import compute_loss
from utilities.optimizers import gradient_descent, pso, mini_batch_gradient_descent
from sklearn.model_selection import train_test_split
# General settings
from utilities.visualization import visualize_train, visualize_test
seed = 309
# Freeze the random seed
random.seed(seed)
np.random.seed(seed)
train_test_split_test_size = 0.3
# Training settings
alpha = 0.1 # step size
max_iters = 50 # max iterations
def load_data():
"""
Load Data from CSV
:return: df a panda data frame
"""
df = pd.read_csv("../data/Part2.csv")
dfo = pd.read_csv("../data/Part2Outliers.csv")
return df, dfo
def data_preprocess(data):
"""
Data preprocess:
1. Split the entire dataset into train and test
2. Split outputs and inputs
3. Standardize train and test
4. Add intercept dummy for computation convenience
:param data: the given dataset (format: panda DataFrame)
:return: train_data train data contains only inputs
train_labels train data contains only labels
test_data test data contains only inputs
test_labels test data contains only labels
train_data_full train data (full) contains both inputs and labels
test_data_full test data (full) contains both inputs and labels
"""
# Split the data into train and test
train_data, test_data = train_test_split(data, test_size = train_test_split_test_size)
# Pre-process data (both train and test)
train_data_full = train_data.copy()
train_data = train_data.drop(["Height"], axis = 1)
train_labels = train_data_full["Height"]
test_data_full = test_data.copy()
test_data = test_data.drop(["Height"], axis = 1)
test_labels = test_data_full["Height"]
# Standardize the inputs
train_mean = train_data.mean()
train_std = train_data.std()
train_data = (train_data - train_mean) / train_std
test_data = (test_data - train_mean) / train_std
# Tricks: add dummy intercept to both train and test
train_data['intercept_dummy'] = pd.Series(1.0, index = train_data.index)
test_data['intercept_dummy'] = pd.Series(1.0, index = test_data.index)
return train_data, train_labels, test_data, test_labels, train_data_full, test_data_full
def learn(y, x, theta, max_iters, alpha, optimizer_type, metric_type):
# removed hardcoded optimizer type and metric type
"""
Learn to estimate the regression parameters (i.e., w and b)
:param y: train labels
:param x: train data
:param theta: model parameter
:param max_iters: max training iterations
:param alpha: step size
:param optimizer_type: optimizer type (default: Batch Gradient Descient): GD, SGD, MiniBGD or PSO
:param metric_type: metric type (MSE, RMSE, R2, MAE). NOTE: MAE can't be optimized by GD methods.
:return: thetas all updated model parameters tracked during the learning course
losses all losses tracked during the learning course
"""
thetas = None
losses = None
if optimizer_type == "BGD":
thetas, losses = gradient_descent(y, x, theta, max_iters, alpha, metric_type)
elif optimizer_type == "MiniBGD":
thetas, losses = mini_batch_gradient_descent(y, x, theta, max_iters, alpha, metric_type, mini_batch_size = 10)
elif optimizer_type == "PSO":
thetas, losses = pso(y, x, theta, max_iters, 100, metric_type)
else:
raise ValueError(
"[ERROR] The optimizer '{ot}' is not defined, please double check and re-run your program.".format(
ot = optimizer_type))
print('\n')
print(losses)
print('\n')
print(thetas)
return thetas, losses
if __name__ == '__main__':
# Settings
metric_type = "MAE" # MSE, RMSE, MAE, R2
optimizer_type = "PSO" # PSO, BGD, MiniBGD
# Step 1: Load Data
data, data_outliers = load_data()
# Step 2: Preprocess the data
train_data, train_labels, test_data, test_labels, train_data_full, test_data_full = data_preprocess(data_outliers)
# Step 3: Learning Start
theta = np.array([0.0, 0.0]) # Initialize model parameter
start_time = datetime.datetime.now() # Track learning starting time
thetas, losses = learn(train_labels.values, train_data.values, theta, max_iters, alpha, optimizer_type, metric_type)
end_time = datetime.datetime.now() # Track learning ending time
exection_time = (end_time - start_time).total_seconds() # Track execution time
# Step 4: Results presentation
print("Learn: execution time={t:.3f} seconds".format(t = exection_time))
# Build baseline model
print("R2:", -compute_loss(test_labels.values, test_data.values, thetas[-1], "R2")) # R2 should be maximize
print("MSE:", compute_loss(test_labels.values, test_data.values, thetas[-1], "MSE"))
print("RMSE:", compute_loss(test_labels.values, test_data.values, thetas[-1], "RMSE"))
print("MAE:", compute_loss(test_labels.values, test_data.values, thetas[-1], "MAE"))
niter = max_iters
# visualize_train(train_data_full, train_labels, train_data, thetas, losses, niter)
visualize_test(test_data_full, test_data, thetas)
plt.show()
| [
"43230632+ImaDaveDave@users.noreply.github.com"
] | 43230632+ImaDaveDave@users.noreply.github.com |
5a7260bc899debcc1c45d30948eebd5d78510a4b | da91c0cc53de46f170cc219553cfa785f3bb8868 | /search/management/commands/init_db.py | 9ffe38cc0ac6e0bcb3ef3f45010a27fc401adcfb | [] | no_license | naomie-co/OC_P11 | acded3f746d4db2a8b2d0fa647a55b9c0656cac8 | 0bf2a64a1bfcf9eac19a5a96618124fa897b4ec4 | refs/heads/main | 2022-12-28T21:49:26.879014 | 2020-10-14T15:32:44 | 2020-10-14T15:32:44 | 301,518,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | """Script to initialize the platform database from the OpenFoodFacts API"""
import requests
from django.core.management.base import BaseCommand
from search.models import categorie, op_food, substitute
from search.const import CATEGORIES
class Command(BaseCommand):
"""This class aims to interact with the OpenfoodFact's API
Its parameters aims to prepared the request
Its request_product method made the resquest
"""
def __init__(self):
"""Parameters for the API request"""
self.url = 'https://world.openfoodfacts.org/cgi/search.pl'
self.param = {
'action':'process',
'tagtype_0':'categories',
'tag_contains_0':'contains',
'tag_0':'',
'page_size':50,
'json':1
}
def categorie_db(self):
"""Insert categories into .models categorie's table"""
for elt in CATEGORIES:
cat = categorie()
cat.name = elt
#cat.clean()
cat.save()
return cat
def request_product(self, tag):
"""Get products from the API depending on the tag in parameter.
Store the result in a list of list named data
Return data """
i = 0
self.param["tag_0"] = tag
request = requests.get(self.url, params=self.param)
result = request.json()
data = []
for val in result["products"]:
try:
data.append([val["product_name_fr"],\
val["nutrition_grades"], val["ingredients_text_fr"],\
val["image_nutrition_url"], val["image_url"], val["url"]])
i += 1
if i > 40:
break
except KeyError:
print("Erreur dans la réception des données : ", val)
return data
def search_product(self):
"""From the categories of the category table, launch a request to the
OFF API with the request_product method. Retrieve the OFF data to
insert into the op_food table"""
categories = categorie()
categories = categorie.objects.all()
for cat in categories:
for value in self.request_product(cat.name):
new_values = op_food(categorie=cat, \
name=value[0], nutriscore=value[1], ingredient=value[2], \
picture_100g=value[3], picture=value[4], url=value[5])
new_values.save()
def delete_data(self):
"""Delete data from categorie, op_food and substitute tables"""
categorie.objects.all().delete()
op_food.objects.all().delete()
substitute.objects.all().delete()
def handle(self, *args, **options):
"""Delete data then fill the database
"""
self.delete_data()
self.categorie_db()
self.search_product()
| [
"naomie.colombo@gmail.com"
] | naomie.colombo@gmail.com |
1f041b1eff52d1d5f5e2b5a9f2ff0bb67bfcb298 | db58eeac603cf4f5ef5a50b7a07ff466a8627ad5 | /our_story/app_chat/views.py | 4b3fbe7bea7f16305660bd73f2775963a87d9600 | [] | no_license | moning02004/mybook | 4ba0f26870a4e4289452f98bbfef9d85b15610ec | fc8d35862e90af2fe8f34a76d56cda8ad7b2b485 | refs/heads/master | 2020-06-25T18:39:02.856696 | 2020-01-25T23:55:41 | 2020-01-25T23:55:41 | 187,236,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py | from operator import itemgetter
from django.contrib.auth.models import User
from django.db.models import Q, Sum
from django.http import HttpRequest, JsonResponse, Http404
from django.shortcuts import render, redirect
from .models import MessageBox
def index(request):
assert isinstance(request, HttpRequest)
if not request.user.is_authenticated:
return redirect('app_main:index')
message_list = list()
for message_box in request.user.message_box_user.all():
content = dict()
content['user'] = message_box.friend
content['unread'] = message_box.message.all().filter(to_user=request.user).aggregate(sum=Sum('unread'))['sum']
content['unread'] = 0 if content['unread'] is None else content['unread']
content['last_message'] = message_box.message.last().content
content['last_time'] = message_box.message.last().created
message_list.append(content)
message_list.sort(key=itemgetter('last_time'))
return render(request, 'app_chat/index.html', {'message_list': reversed(message_list)})
def message(request, friend, me):
assert isinstance(request, HttpRequest)
if not request.user.username in [friend, me]: raise Http404
try:
friend = User.objects.get(username=friend)
query = Q(user=request.user) & Q(friend=User.objects.get(username=friend))
message_box = MessageBox.objects.get(query)
except:
message_box = MessageBox.objects.create(user=request.user, friend=friend)
MessageBox.objects.create(user=friend, friend=request.user)
for message in reversed(message_box.message.all().filter(to_user=request.user).order_by('unread')):
if message.unread == 0: break
message.unread = 0
message.save()
return render(request, 'app_chat/message.html', {'friend': User.objects.get(username=friend).profile.get_name(), 'message_list': message_box.message.all()})
| [
"moning02004@naver.com"
] | moning02004@naver.com |
e7b44d764adac0f9bb1fcfc066db793aee0cbfb8 | 7fd67691f37471363ab2bc6c08e9d7697af3509e | /reverse_and_reverse.py | 38e90f618d86b13ece86d1a9dd230dd0a1fa93c6 | [] | no_license | dishiao/leetCode | c992a6a220ec73b907a709220a785645b1513407 | fa31c8a5ac9940e9e4e03f63520d8f16dcd5f81f | refs/heads/master | 2020-05-16T08:16:45.793223 | 2019-10-21T02:47:00 | 2019-10-21T02:47:00 | 182,903,641 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | class Solution:
def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:
listA = []
for a in A:
#水平翻转
a = a[::-1]
#反转图片
listB = []
for b in a:
if b == 1:
b=0
elif b == 0:
b=1
listB.append(b)
listA.append(listB)
return listA
| [
"729268381@qq.com"
] | 729268381@qq.com |
b4cc44a8dc286587afe58cb6e7afd5a3a49ca17c | ff156385bfe5af19409baede4584035b7ab8e3b2 | /bookshelf/bookshelf_app/models.py | 73149245134db338e66982bef20cbb4ff3e0e50e | [] | no_license | Synnysyn/bookshelf | 0f998e65a7a2faad813a67e44602b26e0b1207fb | dba55a6cd6a4ea7a8f0939f883fdca29f875fba0 | refs/heads/main | 2023-04-17T21:24:25.578910 | 2021-05-10T12:14:41 | 2021-05-10T12:14:41 | 364,503,338 | 0 | 0 | null | 2021-05-10T12:14:42 | 2021-05-05T08:03:49 | null | UTF-8 | Python | false | false | 454 | py | from django.db import models
class Book(models.Model):
"""
- title - CharField
- author - CharField
- description - TextField
- added - DateTimeField (auto)
"""
title = models.CharField(max_length=64)
author = models.CharField(max_length=64)
description = models.TextField(max_length=1000)
added = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f'"{self.title}" by {self.author}' | [
"69205616+Synnysyn@users.noreply.github.com"
] | 69205616+Synnysyn@users.noreply.github.com |
3c1d722e1c738d4f343a7ee14495e90cd1acee39 | 453507707e8beb5d06a1338ac126ca7ef7f9cf35 | /dns_resolver/my_dns_script.py | 1675e619bfba992f635488142a76d0cadaab92af | [] | no_license | singhshivam55/Projects | 4406a51c0d7d1ce3c8fb84a637d8657460c1eaa1 | 252ff70a8abb58191307e8c2a7e9aa13562309b3 | refs/heads/main | 2023-01-07T22:44:17.297302 | 2020-11-10T09:42:35 | 2020-11-10T09:42:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | import json
import dns.resolver
class Dns_resolver:
def __init__(self):
pass
def get_records(self, domain):
record_types = [
'A',
'AAAA',
'ALIAS',
'NS',
'CNAME',
'SOA',
'MX',
'TXT',
'SRV',
'PTR',
]
records = []
for record_type in record_types:
try:
data = dns.resolver.query(domain, record_type)
for rdata in data:
records.append(record_type + ' ' + rdata.to_text())
except Exception:
pass
return records
def dns_domain(self):
domains = ['google.com', 'baidu.com', 'amazon.com'] # Insert domains required
dict_record = {}
for domain in domains:
records = self.get_records(domain)
commands = {}
for record in records:
command, description = record.strip().split(None, 1)
commands[command] = description.split()
dict_record[domain] = commands
with open("domain.json", 'w') as json_file:
json.dump(dict_record, json_file, indent=2)
return dict_record
if __name__ == "__main__":
Dns_resolver().dns_domain()
| [
"tshubham97@users.noreply.github.com"
] | tshubham97@users.noreply.github.com |
4fbda2699b9145b694ef3f7a10590380ae779cad | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L54/54-23_MD_NVT_rerun/set_4.py | 5cd6d8796cee46fdf49e2b9f80b0d39eff8896aa | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L54/MD_NVT_rerun/ti_one-step/54_23/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_4.in'
temp_pbs = filesdir + 'temp_4.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_4.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_4.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"songlin3@msu.edu"
] | songlin3@msu.edu |
11d67fff98cefb075e7dbd155b00d6bef217ab3f | dd1ee9195c4d7b3a09fe31c7d26a31e26a4e2725 | /laplacianFilter.py | 2e0da5d737c0351dcd2caf9bbdaf2ee0f135995f | [
"MIT"
] | permissive | krishna1401/Digital-Image-Processing | 0245175411c90055c2e5ce1b3cc221227c606cba | 47a4da4bef9d08708ac84174b0fcd0ced6a8b5e2 | refs/heads/master | 2020-07-10T06:20:16.361033 | 2019-11-06T18:32:37 | 2019-11-06T18:32:37 | 204,191,421 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,112 | py | #Perform Laplacian Filtering on a Image
import cv2
def laplacianFilteredImage(image, mask):
#Objective: Create Filtered Image by Laplacian Filter
#Input: Original Image & Laplacian Filter Mask
#Output: Resultant Image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
resultant_image = image.copy()
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
resultant_image[i, j] = image[i, j] + mask[i, j]
return resultant_image
def positiveLaplacianFilter(image):
#Objective: Performing second order laplacian derivative over an image
#Input: Original Image
#Output: Filtered Image
# Positive Laplacian Filter
# 0 1 0
# 1 -4 1
# 0 1 0
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
mask = image.copy()
for i in range(1,image.shape[0]-1):
for j in range(1,image.shape[1]-1):
filter_over_i = image[i+1, j] + image[i-1, j] - 2*image[i, j]
filter_over_j = image[i, j+1] + image[i, j-1] - 2*image[i, j]
mask[i, j] = filter_over_i + filter_over_j
return mask
def negativeLaplacianFilter(image):
#Objective: Performing second order laplacian derivative over an image
#Input: Original Image
#Output: Filtered Image
# Negative Laplacian Filter
# 0 -1 0
# -1 4 -1
# 0 -1 0
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Converting Image to Gray Scale
mask = image.copy()
for i in range(1,image.shape[0]-1):
for j in range(1,image.shape[1]-1):
filter_over_i = 2*image[i, j] - image[i+1, j] - image[i-1, j]
filter_over_j = 2*image[i, j] - image[i, j+1] - image[i, j-1]
mask[i, j] = filter_over_i + filter_over_j
return mask
img = cv2.imread('image2.jpg')
mask = positiveLaplacianFilter(img)
output = laplacianFilteredImage(img, mask)
cv2.imshow('image',output)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"krishna.dduc@gmail.com"
] | krishna.dduc@gmail.com |
854a9ef00d9b3d18ea058a44ef58c188666c6094 | 9e110e4ab763ad3f93fa531949fc9696885f8f2c | /interface_graphique.py | 4276599d24630505ae7607b9dbc44cdb1a204e94 | [] | no_license | psychoumayma/calcul_mental | 72a714159a96997ba2f492e7b58eaf38a24b6412 | f72b022614282fb8cfc315877e970847ec3d47ff | refs/heads/master | 2021-01-25T04:26:47.877248 | 2017-06-05T21:21:48 | 2017-06-05T21:21:48 | 93,444,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | from tkinter import*
from random import randint
from random import choice
import jeu
fenetre = Tk()
FRAME=Frame(fenetre, width=100, height =50).place(x=700,y=400)
LABEL=Label(FRAME, text='', font = 'Papyrus 20 bold', fg="blue")
BRAVO=Label(FRAME, text='', font = 'Papyrus 20 bold', fg="green")
DOMMAGE=Label(FRAME, text='', font = 'Papyrus 20 bold', fg="red")
ent = Entry(fenetre)
Score = 0
Total = 0
Niveau = 1
#cette fonction consiste à dessiner une zone de réponse
#ou l'utilisateur peut entrer sa réponse.
def affiche_zone_reponse():
ent.insert(0, '')
ent.pack(side=LEFT, fill=X)
ent.bind('<Return>', reponse)
#cette fonction permet de prendre la réponse entrée et la vérifier.
def reponse(event):
x= int(ent.get())
test_reponse(x)
#Cette fonction permet de vérifier la réponse entrée
#et afficher un label qui dépend de la réponse
#
def test_reponse(reponse):
global Score
global Niveau
global Total
if reponse==jeu.reponse(jeu.Question[0:2], jeu.Question[2]):
Score = Score+1
DOMMAGE.place_forget()
BRAVO.config(text="Bonne Réponse BRAVO!")
BRAVO.place(x=120, y=320)
if Score == 5:
Niveau += 1
Total += Score
Score = 0
if Niveau <4:
question()
else:
print(Total)
else:
BRAVO.place_forget()
DOMMAGE.config(text="FAUX! Essayez encore!")
DOMMAGE.place(x=120, y=320)
#Cette fonction permet d'afficher la question dans un label
def affiche_question():
if jeu.Question[2]==0:
c = "Calculer la somme "+str(jeu.Question[0])+'+'+str(jeu.Question[1])
elif jeu.Question[2]==1:
c = "Calculer la différence "+str(jeu.Question[0])+'-'+str(jeu.Question[1])
elif jeu.Question[2]==2:
c = "Calculer le produit de "+str(jeu.Question[0])+' et '+str(jeu.Question[1])
elif jeu.Question[2]==3:
c = "Calculer le quotient de la division euclidienne "+str(jeu.Question[0])+'/'+str(jeu.Question[1])
LABEL.config(text=c)
LABEL.pack(side=LEFT)
#cette fonction permet d'exécuter les deux fonctions qui permettent
#d'afficher la question et la zone de saisie.
def affiche_jeu():
affiche_question()
affiche_zone_reponse()
#Cette fonction permet de créer une zone de saisie.
def zone_de_saisie(label, stringvar):
text = stringvar.get()
label.config(text=text)
#Cette fonction permet de déterminer l'operation en fonction du niveau.
def question():
if Niveau == 1:
x =[0,1,2]
else:
x =[0,1,2,3]
operation = choice(x)
n = jeu.adapt_level(Niveau, operation)
jeu.Question_graphique(n, operation)
affiche_jeu()
#Affichage d'une Image
photo = PhotoImage(file="enfant.gif")
canvas = Canvas(fenetre,width=300, height=203, bd=8)
canvas.create_image(0, 20, anchor=NW, image=photo)
canvas.pack()
# bouton de sortie qui permet de quitter le jeu
bouton=Button(fenetre, text="Quitter", command=fenetre.quit)
bouton.pack(side=BOTTOM)
#bouton de relance
##def recommencer():
## main()
## button1 = Tkinter.Button(fenetre, text="RECOMMENCER", command=x)
def main():
fenetre.geometry("700x400")
question()
fenetre.mainloop()
fenetre.destroy()
if __name__=="__main__":
main()
| [
"oumaymadraoui@MacBook-Air-de-Oumayma.local"
] | oumaymadraoui@MacBook-Air-de-Oumayma.local |
0acc93ad642a017f32f09919b908407abe44852a | 6629d7831ced862ca7c513cbc46ca334ce649918 | /Finance/forms.py | 8f707e6cc0883ef580e5070811fda0a97653956c | [] | no_license | HunterProgram22/KudelaCorp | 33db17091698d4d934ce7f58f0dc430bf203699f | ca6481309f5e0a530057065b7c23a960cdb10593 | refs/heads/master | 2022-12-13T19:56:02.533181 | 2019-11-25T19:43:27 | 2019-11-25T19:43:27 | 133,077,035 | 0 | 0 | null | 2022-12-03T05:42:24 | 2018-05-11T18:50:09 | HTML | UTF-8 | Python | false | false | 2,606 | py | from django import forms
from .models import MonthBal, MonthInc, TaxReturn
class MonthBalForm(forms.ModelForm):
class Meta:
model = MonthBal
fields = ('date', 'huntington_check', 'fifththird_check', 'huntington_save',
'fifththird_save', 'capone_save', 'amex_save', 'robinhood_invest', 'deacon_invest',
'buckeye_invest', 'opers_retire', 'four57_retire', 'four01_retire', 'roth_retire',
'main_home', 'justin_car', 'kat_car', 'capone_credit', 'amex_credit',
'discover_credit', 'car_loan', 'pubstudent_loan', 'privstudent_loan',
'main_mortgage',)
class MonthIncForm(forms.ModelForm):
class Meta:
model = MonthInc
fields = ('date', 'huntington_interest', 'fifththird_interest',
'capone_interest', 'amex_interest', 'schwab_interest',
'schwab_dividends', 'expense_checks', 'miscellaneous_income',
'refund_rebate_repayment', 'gift_income', 'supremecourt_salary',
'cdm_salary', 'opers_retirement', 'four57b_retirement',
'four01k_retirement', 'roth_retirement', 'robinhood_investments', 'schwab_investments',
'amex_savings', 'fifththird_savings', 'capone_savings', 'five29_college',
'huntington_savings', 'federal_tax', 'social_security', 'medicare',
'ohio_tax', 'columbus_tax', 'health_insurance',
'supplementallife_insurance', 'flex_spending', 'cdm_std',
'cdmsupplemental_ltd', 'parking', 'parking_admin', 'main_mortgage',
'hoa_fees', 'auto_insurance', 'aep_electric', 'rumpke_trash',
'delaware_sewer', 'delco_water', 'suburban_gas', 'verizon_kat',
'sprint_justin', 'directtv_cable', 'timewarner_internet',
'caponeauto_loan', 'public_loan', 'private_loan', 'capone_creditcard',
'amex_creditcard', 'discover_creditcard', 'kohls_vicsec_macy_eddiebauer_creditcards',
'katwork_creditcard', 'cashorcheck_purchases', 'daycare',
'taxdeductible_giving',)
class TaxReturnForm(forms.ModelForm):
class Meta:
model = TaxReturn
fields = ('year', 'total_job_wages', 'total_federal_wages', 'total_income',
'adjusted_gross_income', 'itemized_deduction_total', 'federal_taxable_income',
'total_federal_tax_owed', 'total_federal_payments', 'state_taxable_income',
'total_state_tax_owed', 'total_state_payments',)
| [
"HunterProgram22@github.com"
] | HunterProgram22@github.com |
3b0c1f82cde68be68ba4467960932a855a2a113c | cd4791edf703ee7cec983c2039570b2627529144 | /fall/15_login/app.py | 7c4c9c942ee562f1881cab8116b7f6217b8f8e1d | [] | no_license | bmoses00/SoftDev | 939c92b4ca0b9638e9dce7b7729fe880695e54c2 | b457e7e1a2ca2ff1a6198fad6246ec8f7188abe6 | refs/heads/master | 2020-07-23T12:55:02.251968 | 2020-04-20T22:04:05 | 2020-04-20T22:04:05 | 207,563,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,320 | py | # Brian Moses
# SoftDev pd 2
# k#16: Oh yes, perhaps I do
# 10/3/19
from flask import Flask, render_template, request, session, redirect, url_for, flash
import os
app = Flask(__name__)
@app.route('/')
def main():
print ('-------------------------------------------')
# print(login)
# print("------------------------------------")
# print(session['username'])
# print("------------------------------------")
# print(session['password'])
# if (login):
# return render_template("welcome.html", username = request.args.get('Username'))
# hardcoded username/pass combo
session['username'] = 'chicken'
session['password'] = 'portenders'
# we use try-catch block to make sure we don't get an error if
# session['loggedIn'] is undefined, which it would be if it's the first
# time the page had been loaded
try:
if session['loggedIn']:
return render_template("welcome.html", username = session['userUsername'])
except:
session['loggedIn'] = False
return render_template("index.html",
username = request.args.get('Username'),
password = request.args.get('Password'))
@app.route('/logout')
def help():
session['loggedIn'] = False
return redirect(url_for('main'))
@app.route('/auth')
def authenticate():
# checks if they entered the right username/pass combo
if ((request.args.get('Username')) == session['username']
and request.args.get('Password') == session['password']):
session['userUsername'] = request.args.get('Username')
session['loggedIn'] = True
flash("You have logged in!")
return render_template("welcome.html", username = request.args.get('Username'))
else:
return render_template("error.html" # checks if user has wrong username, pass, or both
, usernameWrong = request.args.get('Username') != session['username']
, passwordWrong = request.args.get('Password') != session['password'])
#return redirect("http://www.xkcd.com")
# return render_template("response.html")
return 'Else did not trigger. Code is wrong.'
if __name__ == "__main__":
app.secret_key = os.urandom(32)
app.debug = True
app.run()
| [
"bmoses00@stuy.edu"
] | bmoses00@stuy.edu |
85e231fb8ba4cfd6c1162cb823ec5bb8281d3e38 | 0920b50773cfd231137d2383695a6730d0678628 | /pylib/options.py | aa0c0975c42ce82b8eb23f47fdaaa066b16f3010 | [] | no_license | chyser/bin | 05b67cf299b0e427e253abc42ca015fcdec8e84c | b54f23c6c5f1f19e426ee06c9e9faf9f561ee9a9 | refs/heads/master | 2021-01-19T19:35:05.801722 | 2015-08-19T17:58:29 | 2015-08-19T17:58:29 | 17,319,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,201 | py | #!/usr/bin/env python
"""
Library:
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
import glob
class MOptException(Exception): pass
#-------------------------------------------------------------------------------
class OptionClass(object):
#-------------------------------------------------------------------------------
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __init__(self, usageStr):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
object.__init__(self)
self._usageStr_ = usageStr
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __contains__(self, x):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return self[x] is not None
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __getitem__(self, x):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return getattr(self, x)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get(self, attr, default=None, cls=None):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
val = getattr(self, attr)
if val is None:
return default
if cls is not None:
try:
val = cls(val)
except ValueError as ex:
self.usage(101, "option: '%s' has '%s'" % (val, str(ex)))
return val
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def usage(self, rc, s=''):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
"""
if self._usageStr_ is None:
print('No help provided\n', file=sys.stderr)
sys.exit(rc)
if isinstance(self._usageStr_, (unicode, str)):
print(self._usageStr_ + '\n' + str(s), file=sys.stderr)
sys.exit(rc)
else:
self._usageStr_(rc, s)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def logError(self, __d__, d, v=None):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#import pprint
print('OptionClass.validate():')
print(' ', self.__dict__, '\n')
#pprint.pprint(self.__dict__)
#pprint.pprint(d, '\n')
if v is not None:
print(v, '\n')
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def validate(self, d):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
v = set(self.__dict__) - set(d)
if v:
return self.logError(self.__dict__, d, v)
v = set(d) - set(self.__dict__)
if v:
return self.logError(self.__dict__, d, v)
noFirstError = True
for key, val in self.__dict__.items():
if d[key] != val:
if noFirstError:
noFirstError = self.logError(self.__dict__, d)
print(' key:', key, ', d:', d[key], ', __dict__:', val)
if not noFirstError: print()
return noFirstError
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def __str__(self):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return str(self.__dict__)
#-------------------------------------------------------------------------------
def mopt(cmdLineArgs, oneOnlyFlags, oneOnlyParams, *args, **kwds):
#-------------------------------------------------------------------------------
""" parses cmdLineArgs for options and arguments. options are normally
identified by a leading '-'.
mopt(cmdArgs, oneOnlyFlags, oneOnlyParams, usageStr="", **keyword)
mopt(opt, oneOnlyFlags, oneOnlyParams, usageStr="", **keyword)
mopt(cmdArgs, oneOnlyFlags, oneOnlyParams, multipleFlags, multipleParams, usageStr="", **keywords)
mopt(opt, oneOnlyFlags, oneOnlyParams, multipleFlags, multipleParams, usageStr="", **keywords)
Keyword arguments:
addHelp : automatically call usage for -? or --help', default: True
nonOpStopOp : '-'s are ignored after first non-option, default: True
skipUnknownOps : if True, put unknown options into arg list, else call usage with error. default: False
allowMultiChar : if False, '-abcd' means options a, b, c, and d, else it is option 'abcd'. default: False
shortOpMarker : marker when leading char used to identify short options. default: '-'
longOpMarker : marker when leading char used to identify long options. default: '--'
expandWildCards : expand wildcards in arguments (assume they are files). default: True
oneOnlyFlags, oneOnlyParams, multipleFlags and multipleParams are lists of:
- tuples (<short form>, <long form>)
- string
if single char is short form, else long form
usageStr may be either a string or a function to be used as to
display a usage message to stderr.
The long form value (or short if short form only) becomes an
attribute of the option class and will be set to None or [] if not
explicitely set. If an option is listed both as a flag and as a
param, then it always tries to fill the param with the next command
line arg unless it is last, in which case it does not generate a
error (usage call).
Arguments are checked for wildcards and expanded if expandWildCards
is True. Expansion mimics unix shells {*.py, co??l.py, abc[123].py)
and can be excaped by quotes ['"].
If mopt() is called multiple times with a prior OptionClass 'opt'
instead of a cmd line, further processing can occur on the remaining
command line options. This usually implies the first call had
skipUnknownOps = True.
Returns tuple (list of arguments, OptionClass instance)
"""
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def cvt(options, opts, sopt, lopt, val=None):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
shortVals = {}; longVals = set()
for opt in options:
if isinstance(opt, unicode):
shortV, longV = (opt, None) if sopt and len(opt) == 1 else (None, opt)
else:
shortV, longV = opt
if shortV:
name = shortV if longV is None else longV
if not hasattr(opts, name):
name = shortV if longV is None else longV
## note, each one should have a different list of not None
setattr(opts, name, None if val is None else [])
shortVals[sopt + shortV] = name
if longV:
if not hasattr(opts, longV):
## note, each one should have a different list of not None
setattr(opts, longV, None if val is None else [])
longVals.add(lopt + longV)
return shortVals, longVals
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def expandWCs(arg):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
""" expand wildcards like "*.py", "c?c?.py", "tmp[1234].py"
won't expand if surrounded ' or "
"""
if arg[0] in set(['"', "'"]) and arg[-1] in set(['"', "'"]):
return [arg[1:-1]]
if '*' in arg or '?' in arg or ('[' in arg and ']' in arg):
return glob.glob(arg)
return [arg]
## allow multiple calls by passing in the prior opt class
if isinstance(cmdLineArgs, OptionClass):
opts = cmdLineArgs
cmdLineArgs = opts._args_
else:
opts = None
## parse out keyword arguments
addHelp = kwds.get('addHelp', True)
nonOpStopOp = kwds.get('nonOpStopOp', True)
skipUnknownOps = kwds.get('skipUnknownOps', False)
allowMultiChar = kwds.get('allowMultiChar', False)
shortOpMarker = kwds.get('shortOpMarker', '-')
longOpMarker = kwds.get('longOpMarker', '--')
expandWildCards = kwds.get('expandWildCards', True)
k = set(['addHelp', 'nonOpStopOp', 'skipUnknownOps', 'allowMultiChar', 'shortOpMarker', 'longOpMarker', 'expandWildCards'])
if set(kwds) - k:
raise MOptException("illegal keyword(s): " + str(set(kwds) - k))
lopt = shortOpMarker if allowMultiChar else longOpMarker
## parse out arguments
la = len(args)
if la == 0 or la == 1:
usageStr = '' if la == 0 else args[0]
assert not isinstance(usageStr, list)
if opts is None:
opts = OptionClass(usageStr)
shortMultipleFlags = longMultipleFlags = shortMultipleParams = longMultipleParams = set()
elif la == 2 or la == 3:
usageStr = '' if la == 2 else args[2]
assert not isinstance(usageStr, list)
if opts is None:
opts = OptionClass(usageStr)
if not (isinstance(args[0], (list, tuple)) and isinstance(args[1], (list, tuple))):
raise TypeError('mopt() takes either 3-4 or 5-6 arguments (not counting keyword only args')
shortMultipleFlags, longMultipleFlags = cvt(args[0], opts, shortOpMarker, lopt, 'list')
shortMultipleParams, longMultipleParams = cvt(args[1], opts, shortOpMarker, lopt, 'list')
else:
raise TypeError('mopt() takes either 3-4 or 5-6 arguments (not counting keyword only args')
shortSingleFlags, longSingleFlags = cvt(oneOnlyFlags, opts, shortOpMarker, lopt)
shortSingleParams, longSingleParams = cvt(oneOnlyParams, opts, shortOpMarker, lopt)
opts._cmdline_ = cmdLineArgs
opts._args_ = oargs = []
if not allowMultiChar:
## convert ['-acbd'] to ['-a', '-c', '-b', '-d']
cargs = []
for arg in cmdLineArgs:
if arg.startswith(lopt):
cargs.append(arg)
elif arg.startswith(shortOpMarker) and len(arg) > 2:
for c in arg[1:]:
cargs.append(shortOpMarker + c)
else:
cargs.append(arg)
else:
cargs = cmdLineArgs
#print('cargs:', cargs)
idx = 0
while idx < len(cargs):
arg = cargs[idx]
if addHelp:
if arg == shortOpMarker + '?' or arg == lopt + 'help':
opts.usage(0)
if arg in shortSingleParams:
idx += 1
try:
val = cargs[idx]
except IndexError:
## allows the last option to also be a flag if no following parameter
if arg not in shortSingleFlags:
opts.usage(10001, 'parameter "%s" has no parameter' % arg)
val = True
setattr(opts, shortSingleParams[arg], val)
elif arg in longSingleParams:
idx += 1
try:
val = cargs[idx]
except IndexError:
## allows the last option to also be a flag if no following parameter
if arg not in longSingleFlags:
opts.usage(10001, 'parameter "%s" has no parameter' % arg)
val = True
setattr(opts, arg[len(lopt):], val)
elif arg in shortMultipleParams:
idx += 1
try:
val = cargs[idx]
except IndexError:
## allows the last option to also be a flag if no following parameter
if arg not in shortMultipleFlags:
opts.usage(10001, 'parameter "%s" has no parameter' % arg)
val = True
getattr(opts, shortMultipleParams[arg]).append(val)
elif arg in longMultipleParams:
idx += 1
try:
val = cargs[idx]
except IndexError:
## allows the last option to also be a flag if no following parameter
if arg not in longMultipleFlags:
opts.usage(10001, 'parameter "%s" has no parameter' % arg)
val = True
getattr(opts, arg[len(lopt):]).append(val)
elif arg in shortSingleFlags:
setattr(opts, shortSingleFlags[arg], True)
elif arg in longSingleFlags:
setattr(opts, arg[len(lopt):], True)
elif arg in shortMultipleFlags:
getattr(opts, shortMultipleFlags[arg]).append(True)
elif arg in longMultipleFlags:
getattr(opts, arg[len(lopt):]).append(True)
## signal to stop option parsing is an 'empty' long option
elif arg == lopt:
if expandWildCards:
for arg in cargs[idx+1:]:
oargs.extend(expandWCs(arg))
else:
oargs.extend(cargs[idx+1:])
break
## must have found a negative number
elif arg[0] == '-' and arg[1] in set('0123456789'):
oargs.append(arg)
## must have found an unknown option
elif arg.startswith(shortOpMarker):
if not skipUnknownOps:
opts.usage(10000, 'Unknown option: "%s"' % arg)
oargs.append(arg)
## must be an argument
else:
if nonOpStopOp:
if expandWildCards:
for arg in cargs[idx:]:
oargs.extend(expandWCs(arg))
else:
oargs.extend(cargs[idx:])
break
if expandWildCards:
oargs.extend(expandWCs(arg))
else:
oargs.append(arg)
idx += 1
return oargs, opts
#-------------------------------------------------------------------------------
def __test__(verbose=False):
#-------------------------------------------------------------------------------
"""
used for automated module testing. see L{tester}
"""
import pylib.tester as tester
class TException(Exception): pass
def usage(rc, s=''):
raise TException(s)
t = ['-caa', '--sara', 'cool', 'filename', '--cool', '-5', '-a', '-a']
args, opts = mopt(t, [('c', 'cat')], ['cool', 'sara'], ['a'], [], "cool")
tester.Assert(opts.get('cool', 0, int) == 0)
tester.Assert(len(opts.a) == 2)
args, opts = mopt(t, [('c', 'cat')], ['cool', 'sara'], ['a'], [], 'this is the prgm', nonOpStopOp=False)
tester.Assert(opts.get('cool', 0, int) == -5)
tester.Assert(len(opts.a) == 4)
args, opts = mopt(t, [('c', 'cat'), 'a'], ['cool', 'sara'], 'this is the prgm', nonOpStopOp=False)
tester.Assert(opts.get('cool', 0, int) == -5)
tester.AssertRecvException(AttributeError, opts.get, ('b', ))
tester.AssertRecvException(TException, mopt, (t, [('c', 'cat')], ['cool', 'sara'], usage))
tester.AssertRecvException(TException, mopt, (['--help'], [('c', 'cat')], ['cool', 'sara'], usage))
tester.AssertRecvException(TException, mopt, (['-?'], [('c', 'cat')], ['cool', 'sara'], usage))
args, opts = mopt(t, [('c', 'cat')], ['cool'], 'this is the prgm', nonOpStopOp=False, skipUnknownOps=True)
tester.Assert(opts.get('cool', 0, int) == -5)
tester.Assert(args == ['-a', '-a', '--sara', 'cool', 'filename', '-a', '-a'])
# test opts as first param
arg, opts = mopt(opts, [], [], ['a'], ['sara'], '', nonOpStopOp=False)
tester.Assert(opts.validate({'a': [True, True, True, True], 'sara': ['cool'], 'cat': True, '_usageStr_': 'this is the prgm', 'cool': u'-5', '_args_': [u'filename'],
'_cmdline_': ['-a', '-a', '--sara', 'cool', 'filename', '-a', '-a']}))
arg, opts = mopt(opts, [], [], ['a'], ['sara'], nonOpStopOp=False)
tester.Assert(opts.validate({'a': [True, True, True, True], 'sara': ['cool'],
'cat': True, '_usageStr_': 'this is the prgm', '_args_': ['filename'], 'cool': '-5', '_cmdline_': ['filename']}))
arg, opts = mopt(opts, [], [], ['a'], ['sara'], '', nonOpStopOp=False)
tester.Assert(opts.validate({'a': [True, True, True, True], 'sara': ['cool'],
'cat': True, '_usageStr_': 'this is the prgm', '_args_': ['filename'], 'cool': '-5', '_cmdline_': ['filename']}))
arg, opts = mopt(opts, [], [], ['a'], ['sara'], nonOpStopOp=False)
tester.Assert(opts.validate({'a': [True, True, True, True], 'sara': ['cool'],
'cat': True, '_usageStr_': 'this is the prgm', '_args_': ['filename'], 'cool': '-5', '_cmdline_': ['filename']}))
arg, opts = mopt(opts, ['c', 'cool'], [])
tester.Assert(opts.validate({'a': [True, True, True, True], 'c': None, 'sara': ['cool'], 'cat': True,
'_usageStr_': u'this is the prgm', '_args_': ['filename'],
'cool': '-5', '_cmdline_': ['filename']}))
tester.Assert('c' not in opts)
tester.Assert('cat' in opts)
tester.Assert(opts['cat'] is True)
t = ['-cool', '-run', '5', 'stuff']
args, opts = mopt(t, ['cool'], ['run'], 'this is the prgm', allowMultiChar=True)
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'run': '5',
'_args_': ['stuff'], 'cool': True, '_cmdline_': ['-cool', '-run', '5', 'stuff']}))
t = ['/cool', '/run', '5', 'stuff']
args, opts = mopt(t, ['cool'], ['run'], 'this is the prgm', allowMultiChar=True, shortOpMarker='/')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'run': '5',
'_args_': ['stuff'], 'cool': True, '_cmdline_': ['/cool', '/run', '5', 'stuff']}))
t = ['--sara', 'boo']
args, opts = mopt(t, ['sara'], ['sara'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'sara': 'boo',
'_args_': [], '_cmdline_': ['--sara', 'boo']}))
args, opts = mopt(t, ['sara'], ['sara'], usage)
tester.AssertRecvException(TException, opts.get, ('sara', '', float))
t = ['--sara']
args, opts = mopt(t, ['sara'], ['sara'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'sara': True, '_args_': [], '_cmdline_': ['--sara']}))
args, opts = mopt(['*.py'], ['sara'], ['sara'], 'this is the prgm')
tester.AssertRecvException(TypeError, mopt, (['--help'], [('c', 'cat')]))
args, opts = mopt(['*.py'], ['sara'], ['sara'], 'this is the prgm')
tester.Assert('options.py' in args)
args, opts = mopt(['"*.py"'], ['sara'], ['sara'], 'this is the prgm')
tester.Assert('*.py' in args)
args, opts = mopt(['coo[123].py'], ['sara'], ['sara'], 'this is the prgm')
args, opts = mopt(['*.py'], ['sara'], ['sara'], 'this is the prgm', expandWildCards=False)
tester.Assert('*.py' in args)
args, opts = mopt(['*.py'], ['sara'], ['sara'], 'this is the prgm', expandWildCards=False, nonOpStopOp=False)
tester.Assert('*.py' in args)
t = ['-c', '-r', '5', 'stuff']
args, opts = mopt(t, [('c', 'cool')], [('r','run')], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'run': '5',
'_args_': ['stuff'], 'cool': True, '_cmdline_': ['-c', '-r', '5', 'stuff']}))
t = ['-s']
args, opts = mopt(t, ['s'], ['s'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 's': True, '_args_': [], '_cmdline_': ['-s']}))
t = ['-s', 'boo']
args, opts = mopt(t, ['s'], ['s'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 's': 'boo', '_args_': [], '_cmdline_': ['-s', 'boo']}))
t = ['-s']
args, opts = mopt(t, [], [], ['s'], ['s'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 's': [True], '_args_': [], '_cmdline_': ['-s']}))
t = ['-s', 'boo']
args, opts = mopt(t, [], [], ['s'], ['s'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 's': ['boo'], '_args_': [], '_cmdline_': ['-s', 'boo']}))
t = ['--sara']
args, opts = mopt(t, [], [], ['sara'], ['sara'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'sara': [True], '_args_': [], '_cmdline_': ['--sara']}))
t = ['--sara', 'boo']
args, opts = mopt(t, [], [], ['sara'], ['sara'], 'this is the prgm')
tester.Assert(opts.validate({'_usageStr_': 'this is the prgm', 'sara': ['boo'], '_args_': [], '_cmdline_': ['--sara', 'boo']}))
t = ['--sara', 'boo', '--', '--cool']
args, opts = mopt(t, [], [], ['sara'], ['sara'])
tester.Assert(opts.validate({'_usageStr_': '', 'sara': ['boo'], '_args_': ['--cool'],
'_cmdline_': ['--sara', 'boo', '--', '--cool']}))
t = ['--sara', 'boo', '--', '--cool']
args, opts = mopt(t, [], [], ['sara'], ['sara'], expandWildCards=False)
tester.Assert(opts.validate({'_usageStr_': '', 'sara': ['boo'], '_args_': ['--cool'],
'_cmdline_': ['--sara', 'boo', '--', '--cool']}))
t = ['--sara', '--', '--cool']
args, opts = mopt(t, [], [], ['sara'], [], expandWildCards=False)
tester.Assert(opts.validate({'_usageStr_': '', 'sara': [True], '_args_': ['--cool'],
'_cmdline_': ['--sara', '--', '--cool']}))
t = ['--sara']
tester.AssertRecvException(TException, mopt, (t, [], [('s', 'sara')], usage))
t = ['-s']
tester.AssertRecvException(TException, mopt, (t, [], [('s', 'sara')], usage))
t = ['--sara']
tester.AssertRecvException(TException, mopt, (t, [], [], [], [('s', 'sara')], usage))
t = ['-s']
tester.AssertRecvException(TException, mopt, (t, [], [], [], [('s', 'sara')], usage))
tester.AssertRecvException(TypeError, mopt, (t, [], [], [('s', 'sara')], usage))
t = ['---sara', '---', '---cool']
args, opts = mopt(t, [], [], ['sara'], [], expandWildCards=False, longOpMarker='---')
tester.Assert(opts.validate({'_usageStr_': '', 'sara': [True], '_args_': ['---cool'],
'_cmdline_': ['---sara', '---', '---cool']}))
t = ['---sara', '-s', '-d']
args, opts = mopt(t, [], [], ['sara'], [], expandWildCards=False, longOpMarker='---', shortOpMarker='---')
tester.Assert(opts.validate({'_usageStr_': '', 'sara': [True], '_args_': ['-s', '-d'],
'_cmdline_': ['---sara', '-s', '-d']}))
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
#-------------------------------------------------------------------------------
import pylib.osscripts as oss
args, opts = mopt(oss.argv[1:], [], [], __test__.__doc__)
print(oss.argv[1:])
print(args)
print('-'*40)
#mopt([], [], [], '', cool=5)
t = ['---cc', 'cl9', '---lib', r'C:\libcpp\lib;C:\Program Files\Microsoft Visual Studio\VC98\lib', '-c', 'msh.cpp']
args, opts = mopt(t, [], [('Z', 'cc'), ('L', 'lib')], [], [], expandWildCards=False, longOpMarker='---', shortOpMarker='---')
print(args)
print(opts)
res = not __test__(verbose=True)
oss.exit(res)
| [
"chris.hyser@oracle.com"
] | chris.hyser@oracle.com |
a765cbe96955bdac735e102715ca63e35d4ceee6 | 5cc7f0bfadbddf29671419a6a64b6046d055ddee | /database/mysql_connector.py | 3e498755590152fdfc82779586e02e71ab1041b6 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-NC-4.0",
"BSD-2-Clause",
"MIT"
] | permissive | webbpinner/openrvdas | 270e661b9e4adff76cdb42cc200dfd7e1aa373ae | 10342586e7406d55b72031f9b54ce2feb10f2f1a | refs/heads/master | 2023-08-20T18:24:23.535467 | 2023-05-25T15:05:03 | 2023-05-25T15:05:03 | 137,220,057 | 0 | 0 | MIT | 2018-06-13T13:33:48 | 2018-06-13T13:33:45 | Python | UTF-8 | Python | false | false | 12,452 | py | #!/usr/bin/env python3
"""Tables:
data: pk timestamp field_name field_value source_record
We don't know what type each value will have, so have a column for
int, float, str and bool and leave all but the appropriate value type
NULL. Docs claim that NULL values take no space, so...
Still so many ways we could make this more space efficient, most
obviously by partitioning field_name (and even timestamp?) into
foreign keys.
field_name - could store this in a separate table so that it's only
a foreign key in the data table. Something like:
fields: id field_name field_type
source_record - an id indexing a table where raw source records are
stored, so that we can re-parse and recreate whatever data we want
if needed.
Current implementation is simple and inefficient in both computation
and storage.
TODO: Allow wildcarding field selection, so client can specify 'S330*,Knud*'
"""
import logging
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
from logger.utils.das_record import DASRecord # noqa: E402
try:
import mysql.connector
MYSQL_ENABLED = True
except ImportError:
MYSQL_ENABLED = False
################################################################################
class MySQLConnector:
# Name of table in which we will store mappings from record field
# names to the tnames of the tables containing those fields.
DATA_TABLE = 'data'
FIELD_TABLE = 'fields'
SOURCE_TABLE = 'source'
def __init__(self, database, host, user, password,
tail=False, save_source=True):
"""Interface to MySQLConnector, to be imported by, e.g. DatabaseWriter."""
if not MYSQL_ENABLED:
logging.warning('MySQL not found, so MySQL functionality not available.')
return
self.connection = mysql.connector.connect(database=database, host=host,
user=user, password=password,
auth_plugin='mysql_native_password')
self.save_source = save_source
# What's the next id we're supposed to read? Or if we've been
# reading by timestamp, what's the last timestamp we've seen?
self.next_id = 1
self.last_timestamp = 0
self.exec_sql_command('set autocommit = 1')
# Create tables if they don't exist yet
if not self.table_exists(self.SOURCE_TABLE):
table_cmd = 'CREATE TABLE %s (id INT PRIMARY KEY AUTO_INCREMENT, ' \
'record TEXT)' % self.SOURCE_TABLE
logging.info('Creating table with command: %s', table_cmd)
self.exec_sql_command(table_cmd)
if not self.table_exists(self.DATA_TABLE):
table_cmd = ['CREATE TABLE %s ' % self.DATA_TABLE,
'(',
'id INT PRIMARY KEY AUTO_INCREMENT,',
'timestamp DOUBLE,',
'field_name VARCHAR(255),',
'int_value INT,',
'float_value DOUBLE,',
'str_value TEXT,',
'bool_value INT,',
'source INT,',
'INDEX (timestamp),',
'FOREIGN KEY (source) REFERENCES %s(id)'
% self.SOURCE_TABLE,
')'
]
logging.info('Creating table with command: %s', ' '.join(table_cmd))
self.exec_sql_command(' '.join(table_cmd))
# Once tables are initialized, seek to end if tail is True
if tail:
self.seek(offset=0, origin='end')
############################
def exec_sql_command(self, command):
cursor = self.connection.cursor()
try:
cursor.execute(command)
self.connection.commit()
cursor.close()
except mysql.connector.errors.Error as e:
logging.error('Executing command: "%s", encountered error "%s"',
command, str(e))
############################
def table_exists(self, table_name):
"""Does the specified table exist in the database?"""
cursor = self.connection.cursor()
cursor.execute('SHOW TABLES LIKE "%s"' % table_name)
if cursor.fetchone():
exists = True
else:
exists = False
cursor.close()
return exists
############################
def write_record(self, record):
"""Write record to table."""
# First, check that we've got something we can work with
if not record:
return
if not type(record) == DASRecord:
logging.error('write_record() received non-DASRecord as input. '
'Type: %s', type(record))
return
# If we're saving source records, we have to do a little
# legerdemain: after we've saved the record, we need to retrieve
# the id of the record we've just saved so that we can attach it
# to the data values we're about to save.
if self.save_source:
write_cmd = 'insert into `%s` (record) values (\'%s\')' % \
(self.SOURCE_TABLE, record.as_json())
logging.debug('Inserting source into table with command: %s', write_cmd)
self.exec_sql_command(write_cmd)
# Get the id of the saved source record. Note: documentation
# *claims* that this is kept on a per-client basis, so it's safe
# even if another client does an intervening write.
query = 'select last_insert_id()'
cursor = self.connection.cursor()
cursor.execute(query)
# source_field = ', source'
source_id = next(cursor)[0]
else:
# source_field = ''
source_id = None
if not record.fields:
logging.info('DASRecord has no parsed fields. Skipping record.')
return
# Write one row for each field-value pair. Columns are:
# timestamp
# field_name
# int_value \
# float_value, \ Only one of these fields will be non-NULL,
# str_value / depending on the type of the value.
# bool_value /
timestamp = record.timestamp
values = []
for field_name, value in record.fields.items():
value_array = ['%f' % timestamp, '"%s"' % field_name,
'NULL', 'NULL', 'NULL', 'NULL']
if type(value) is int:
value_array[2] = '%d' % value
elif type(value) is float:
value_array[3] = '%f' % value
elif type(value) is str:
value_array[4] = '"%s"' % value
elif type(value) is bool:
value_array[5] = '%d' % ('1' if value else '0')
elif value is None:
value_array[4] = '""'
else:
logging.error('Unknown record value type (%s) for %s: %s',
type(value), field_name, value)
continue
# If we've saved this field's source record, append source's
# foreign key to row so we can look it up.
if source_id:
value_array.append('%d' % source_id)
# Join entries into a string, append to list of other values
# we've already saved.
value_str = '(%s)' % ','.join(value_array)
values.append(value_str)
# Build the SQL query
fields = ['timestamp',
'field_name',
'int_value',
'float_value',
'str_value',
'bool_value']
if source_id:
fields.append('source')
if not values:
logging.warning('No values found in record %s', str(record))
write_cmd = 'insert into `%s` (%s) values %s' % \
(self.DATA_TABLE, ','.join(fields), ','.join(values))
logging.debug('Inserting record into table with command: %s', write_cmd)
self.exec_sql_command(write_cmd)
############################
def read(self, field_list=None, start=None, num_records=1):
"""Read the next record from table. If start is specified, reset read
to start at that position."""
if start is None:
start = self.next_id
condition = 'id >= %d' % start
# If they haven't given us any fields, retrieve everything
if field_list:
field_conditions = ['field_name="%s"' % f for f in field_list.split(',')]
condition += ' and (%s)' % ' or '.join(field_conditions)
condition += ' order by id'
if num_records is not None:
condition += ' limit %d' % num_records
query = 'select * from `%s` where %s' % (self.DATA_TABLE, condition)
logging.debug('read query: %s', query)
return self._process_query(query)
############################
def read_time(self, field_list=None, start_time=None, stop_time=None):
"""Read the next records from table based on timestamps. If start_time
is None, use the timestamp of the last read record. If stop_time is None,
read all records since then."""
if start_time is None:
condition = 'timestamp > %f' % self.last_timestamp
else:
condition = 'timestamp > %f' % start_time
if stop_time is not None:
condition = '(%s and timestamp < %f)' % (condition, stop_time)
# If they haven't given us any fields, retrieve everything
if field_list:
field_conditions = ['field_name="%s"' % f for f in field_list]
condition += ' and (%s)' % ' or '.join(field_conditions)
condition += ' order by timestamp'
query = 'select * from `%s` where %s' % (self.DATA_TABLE, condition)
logging.debug('read query: %s', query)
return self._process_query(query)
############################
def seek(self, offset=0, origin='current'):
"""Behavior is intended to mimic file seek() behavior but with
respect to records: 'offset' means number of records, and origin
is either 'start', 'current' or 'end'."""
num_rows = self._num_rows(self.DATA_TABLE)
if origin == 'current':
self.next_id += offset
elif origin == 'start':
self.next_id = offset + 1
elif origin == 'end':
self.next_id = num_rows + offset + 1
self._next_id = min(num_rows, self.next_id)
logging.debug('Seek: next position %d', self.next_id)
############################
def _num_rows(self, table_name):
query = 'select count(1) from `%s`' % table_name
cursor = self.connection.cursor()
cursor.execute(query)
num_rows = next(cursor)[0]
return num_rows
############################
def _process_query(self, query):
cursor = self.connection.cursor()
cursor.execute(query)
results = {}
for values in cursor:
(id, timestamp, field_name,
int_value, float_value, str_value, bool_value,
source) = values
if field_name not in results:
results[field_name] = []
if int_value is not None:
val = int_value
elif float_value is not None:
val = float_value
elif str_value is not None:
val = str_value
elif float_value is not None:
val = int_value
elif bool_value is not None:
val = bool(bool_value)
results[field_name].append((timestamp, val))
self.next_id = id + 1
self.last_timestamp = timestamp
cursor.close()
return results
############################
def delete_table(self, table_name):
"""Delete a table."""
delete_cmd = 'drop table `%s`' % table_name
logging.info('Dropping table with command: %s', delete_cmd)
self.exec_sql_command(delete_cmd)
############################
def close(self):
"""Close connection."""
self.connection.close()
| [
"david.cohn@gmail.com"
] | david.cohn@gmail.com |
39f447838bd22667f92f23482d571f4de05c5fa3 | e414ec62afaa75d187d68831df6c91919c5bad56 | /build/husky/husky_gazebo/catkin_generated/pkg.installspace.context.pc.py | 832143068bb9ca382fe6603616802306cfed0de1 | [] | no_license | mark2n/tianbao-robtech | 1458a9aca3d20adb0f62e92de271d7aa968be7da | 771cd5ad9194d30fa358e65d3609ede3b643d332 | refs/heads/master | 2021-01-19T22:07:45.969245 | 2017-05-26T09:09:38 | 2017-05-26T09:09:38 | 83,771,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "husky_gazebo"
PROJECT_SPACE_DIR = "/home/tudeng/tianbao-robtech/install"
PROJECT_VERSION = "0.2.6"
| [
"tianbao.zhang@yahoo.com"
] | tianbao.zhang@yahoo.com |
f934beae75c687ea2711ab282598059dfd7ac6e2 | 8554c0b9e8355ab9f59d1441a442c7d00a2d75a8 | /bin/pyrouge_write_config_file | 9a48b85a331db407a8716f2be85fffbab49a7dfe | [
"MIT"
] | permissive | j-min/pyrouge | 225d3fc2561f010cd90eb3bec057456f4016494d | 3693dfba1339114d8d579dcca36a06f797fa7697 | refs/heads/master | 2020-04-16T14:16:02.153527 | 2019-01-14T12:52:19 | 2019-01-14T12:52:19 | 165,660,792 | 1 | 0 | MIT | 2019-01-14T12:50:33 | 2019-01-14T12:50:33 | null | UTF-8 | Python | false | false | 570 | #!/usr/bin/env python
from __future__ import print_function, unicode_literals, division
import argparse
from pyrouge import Rouge155
from pyrouge.utils.argparsers import model_sys_parser, config_parser
def get_args():
parser = argparse.ArgumentParser(parents=[model_sys_parser, config_parser])
return parser.parse_args()
def main():
args = get_args()
Rouge155.write_config_static(
args.system_dir, args.system_filename_pattern,
args.model_dir, args.model_filename_pattern,
args.config_file_path, args.system_id)
if __name__ == "__main__":
main()
| [
"benjamin.heinzerling@web.de"
] | benjamin.heinzerling@web.de | |
98dfd5030c36ce5c10996ad0e1e826a84a1fc575 | 26bebf63dd015accb86a063da4b5bde074c3e1d1 | /app.py | 2fd86af5b3f9b1d5a22a1fe93490e4c864df967d | [] | no_license | Sagini-Dominic/AT-Workshop | 9ef25a98055f0dafe03ade0e978dd96e9cadd179 | 4220c2418dcc244c48c04879168d11544638dde2 | refs/heads/master | 2020-04-28T06:41:38.330504 | 2019-03-11T19:32:21 | 2019-03-11T19:32:21 | 175,068,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/base')
def base():
return render_template("base.html")
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/sms')
def sms():
return render_template("sms.html")
@app.route('/voice')
def voice():
return render_template("voice.html")
@app.route('/payment')
def payment():
return render_template("payment.html")
@app.route('/airtime')
def airtime():
return render_template("airtime.html")
if __name__ == '__main__':
app.run() | [
"saginidominic@gmail.com"
] | saginidominic@gmail.com |
cd7d5da0b443ac1fb04d89ebc2f8978329d57ef8 | c62a4272f9cba5ae25c6695a07f9a1f9e9e2a0fe | /tracking/options.py | 2b2c43dd698aad2ba0bca8c504442d68bd6f6605 | [] | no_license | Alexadlu/MANet | 43b14b23a92136de8eeda4f43148f964146579c3 | 50dc0c47591974159841407b05ac65542fa834f6 | refs/heads/master | 2021-11-21T16:39:25.473057 | 2021-10-05T05:55:03 | 2021-10-05T05:55:03 | 197,388,454 | 104 | 20 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | from collections import OrderedDict
opts = OrderedDict()
opts['use_gpu'] = True # if you change to False ,it will use CPU to run,but is very slow
opts['model_path1'] = "../models/MANet-2IC.pth"
opts['img_size'] = 107
opts['padding'] = 16
opts['batch_pos'] = 32
opts['batch_neg'] = 96
opts['batch_neg_cand'] = 1024
opts['batch_test'] = 256
opts['n_samples'] = 512
opts['trans_f'] = 0.6
opts['scale_f'] = 1.05
opts['trans_f_expand'] = 1.5
opts['n_bbreg'] = 1000
opts['overlap_bbreg'] = [0.6, 1]
opts['scale_bbreg'] = [1,2]
opts['lr_init'] = 0.0001
opts['maxiter_init'] = 30
opts['n_pos_init'] = 500
opts['n_neg_init'] = 5000
opts['overlap_pos_init'] = [0.7, 1]
opts['overlap_neg_init'] = [0, 0.5]
opts['lr_update'] = 0.0002
opts['maxiter_update'] = 15
opts['n_pos_update'] = 50
opts['n_neg_update'] = 200
opts['overlap_pos_update'] = [0.7, 1]
opts['overlap_neg_update'] = [0, 0.3]
opts['success_thr'] = 0
opts['n_frames_short'] = 20
opts['n_frames_long'] = 100 #init 100
opts['long_interval'] = 10
opts['w_decay'] = 0.0005
opts['momentum'] = 0.9
opts['grad_clip'] =10
opts['lr_mult'] = {'fc6':10} #test gtot use {'fc6':10,'fc4':5,'fc5':5} /// #test rgbt234 use {'fc6':10}
opts['ft_layers'] = ['fc']
| [
"noreply@github.com"
] | noreply@github.com |
e5b83f893b2e670a76c3e80afe4f2b7a7c9ecff8 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /reference/ucmdb/discovery/nnmi_api.py | 5f1ad3f66d69e9c7f50fca6f28d7628e45ce3907 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,922 | py | #!/usr/bin/env python
# coding: utf8
import re
import logger
import types
import time
import os
import ip_addr
import itertools
import nnmi_filters
import java.net
from java.lang import System, String
from com.hp.ucmdb.discovery.library.clients.recorder import ExecutionRecorderManager
import com.hp.ov.nms.sdk
from javax.xml.ws import BindingProvider
from javax.xml.ws import WebServiceException
from com.hp.ucmdb.discovery.clients.nnm import SoapHeadersHandler
from com.hp.ucmdb.discovery.clients.nnm import SoapHandlerResolver
from java.io import FileOutputStream, FileInputStream
from com.esotericsoftware.kryo import Kryo
from com.esotericsoftware.kryo.io import Output, Input
FF = nnmi_filters.get_jaxws_filter_factory()
# Default page sizes. To tune actual page sizes, refer to NmsAPI constructor
DEFAULT_PAGESIZE_NODE = 500
DEFAULT_PAGESIZE_L2CONNECTION = 200
DEFAULT_PAGESIZE_VLAN = 50
DEFAULT_CONDITIONS_IN_FILTER = 100
_DEFAULT_RELATED_TOPOLOGY_PAGESIZE = 1000
DEFAULT_PAGESIZE_INTERFACE = _DEFAULT_RELATED_TOPOLOGY_PAGESIZE
DEFAULT_PAGESIZE_IPADDRESS = _DEFAULT_RELATED_TOPOLOGY_PAGESIZE
DEFAULT_PAGESIZE_IPSUBNET = _DEFAULT_RELATED_TOPOLOGY_PAGESIZE
DEFAULT_PAGESIZE_PORT = _DEFAULT_RELATED_TOPOLOGY_PAGESIZE
DEFAULT_PAGESIZE_CARD = _DEFAULT_RELATED_TOPOLOGY_PAGESIZE
NO_PAGE_SIZE = -1
FETCH_DELAY = 0
FETCH_RETRY_COUNT = 3
FETCH_RETRY_DELAY = 20
RECORD_FOLDER_PATH = ExecutionRecorderManager.RECORD_FOLDER_PATH
class StoreConfig:
def __init__(self, read, write, fallback_to_live):
self._read = read
self._write = write
self._fallback_to_live = fallback_to_live
def read(self):
return self._read
def write(self):
return self._write
def fallback_to_live(self):
return self._fallback_to_live
_STORE_CONFIG = None#StoreConfig(True, True, True)
_STORE_NAMESPACE = 'default'
def not_empty(x):
return not((x is None) or (x == ''))
class NmsServices:
Node = 'Node'
Interface = 'Interface'
IPAddress = 'IPAddress'
IPSubnet = 'IPSubnet'
L2Connection = 'L2Connection'
L2Node = 'L2Node'
VLAN = 'VLAN'
Port = 'Port'
Card = 'Card'
class RestorableItem:
def __init__(self, cmdbId, id_):
if not cmdbId:
raise ValueError('Invalid cmdbId')
if not id_:
raise ValueError('Invalid id_')
self.id = id_
self.cmdbId = cmdbId
def _restore_items(fetcher, id_map, ids_to_restore):
r'@types: BaseNmsFetcher, dict[str, str], set[str] -> list[BaseNmsEntity]'
cls = fetcher.collection_class.item_class
restorable_items = []
for id_ in ids_to_restore:
item = cls(RestorableItem(id_map.get(id_), id_), fetcher)
restorable_items.append(item)
return restorable_items
def is_restorable(entity):
return hasattr(entity, 'cmdbId')
def is_not_restorable(entity):
return not is_restorable(entity)
class _HasOsh:
''' Mixin which holds generated OSH object '''
def __init__(self):
self._osh = None
def get_osh(self):
return self._osh
def set_osh(self, osh):
if osh is None:
raise ValueError("osh is None")
self._osh = osh
def has_osh(entity):
''' entity with _HasOsh mixin -> boolean '''
return entity is not None and entity.get_osh() is not None
def to_osh(entity):
''' entity with _HasOsh mixin -> OSH '''
return entity.get_osh()
last_action = System.currentTimeMillis()
def ensure_delay(delay=0):
def decorator_fn(real_fn):
def wrapper(*args, **kwargs):
global last_action
current_time = System.currentTimeMillis()
difference = (int)((current_time - last_action) / 1000)
if difference < delay:
sleep_time = delay-difference
logger.debug("Delaying by %s seconds" % sleep_time)
time.sleep(sleep_time)
last_action = System.currentTimeMillis()
return real_fn(*args, **kwargs)
return wrapper
return decorator_fn
def retry_on(exceptions, times, with_delay=0, rethrow_exception=True, reload=False):
if not exceptions: raise ValueError("exceptions are not specified")
if not times: raise ValueError("times is not specified")
def decorator_fn(real_fn):
def wrapper(*args, **kwargs):
if reload:
times = FETCH_RETRY_COUNT
with_delay = FETCH_RETRY_DELAY
local_retries = times
while local_retries >= 0:
try:
return real_fn(*args, **kwargs)
except exceptions, ex:
local_retries -= 1
if local_retries >= 0:
logger.debug("(%s) Retrying call after exception %r" % (local_retries, ex))
if with_delay > 0:
logger.debug("after delay of %s seconds" % with_delay)
time.sleep(with_delay)
else:
if rethrow_exception:
raise ex
else:
logger.debug('Ignore the exception finally:%s'%ex)
return wrapper
return decorator_fn
# assumptions\limitation: 1st arg is self, return value is countable
def log_self_calls():
def decorator_fn(real_fn):
def wrapper(*args, **kwargs):
logger.debug(" ---> %s.%s(%s, %s)" % (args[0].__class__.__name__, real_fn.__name__, args[1:], kwargs))
r = real_fn(*args, **kwargs)
if r is not None:
logger.debug(" <--- returning %s items" % len(r))
else:
logger.debug(" <--- returning None")
return r
return wrapper
return decorator_fn
class BaseNmsEntity(_HasOsh):
'''
Flag enables querying custom attributes. Each specific entity is expected to be modified to
support them and this flag to be set to True. Otherwise custom attributes are not requested
even if enabled globally.
'''
includes_custom_attrs = False
def __init__(self, item, fetcher):
self.fetcher = fetcher
self.id = None
if is_restorable(item):
self.cmdbId = item.cmdbId
self.id = item.id
_HasOsh.__init__(self)
def __repr__(self):
fields_repr = []
for field_name in self.field_names:
field_value = getattr(self, field_name)
field_value_repr = repr(field_value)
fields_repr.append('%s = %s' % (field_name, field_value_repr))
return '%s(%s)' % (self.__class__.__name__, ', '.join(fields_repr))
def __str__(self):
fields_str = []
for field_name in self.field_names:
field_value = getattr(self, field_name)
field_value_str = repr(field_value)
fields_str.append('%s=%s' % (field_name, field_value_str))
return '<%s %s at 0x%.8X>' % (self.__class__.__name__, ' '.join(fields_str), id(self))
class BaseManagementNmsEntity(BaseNmsEntity):
def __init__(self, item, fetcher):
BaseNmsEntity.__init__(self, item, fetcher)
def _get_management_mode(self, item):
management_mode = item.getManagementMode()
if management_mode:
return management_mode.value()
else:
return None
class NmsNodeEntity(BaseManagementNmsEntity):
DEV_PREFIX_LEN = len('com.hp.ov.nms.devices.')
LAN_SWITCH_CAPABILITY = 'com.hp.nnm.capability.node.lanswitching'
IP_FORWARDING_CAPABILITY = 'com.hp.nnm.capability.node.ipforwarding'
field_names = (
'id',
'name',
'is_lan_switch',
'is_router',
'system_name',
'system_contact',
'system_description',
'system_location',
'system_object_id',
'long_name',
'snmp_version',
'device_model',
'device_vendor',
'device_family',
'device_description',
'device_category',
'uuid',
'management_mode',
# 'customAttributes',
)
def __init__(self, item, fetcher):
BaseManagementNmsEntity.__init__(self, item, fetcher)
self.management_mode = None
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.is_lan_switch = self._get_is_lan_switch(item)
self.is_router = self._get_is_router(item)
self.system_name = item.getSystemName()
self.system_contact = item.getSystemContact()
self.system_description = item.getSystemDescription()
self.system_location = item.getSystemLocation()
self.system_object_id = item.getSystemObjectId()
self.long_name = self._get_long_name(item)
self.snmp_version = item.getSnmpVersion()
self.device_model = self._get_device_model(item)
self.device_vendor = self._get_device_vendor(item)
self.device_family = self._get_device_family(item)
self.device_description = item.getDeviceDescription()
self.device_category = self._get_device_category(item)
self.uuid = item.getUuid()
self.management_mode = self._get_management_mode(item)
# self.customAttributes = item.getCustomAttributes()
self._report_all = False # indicates whether all related information should be reported, including interfaces, ips etc
def _get_is_lan_switch(self, item):
caps = item.getCapabilities()
if caps:
for cap in caps:
cap_key = cap.getKey()
if cap_key:
cap_key = cap_key.strip()
if cap_key == self.LAN_SWITCH_CAPABILITY:
return 1
return 0
def _get_is_router(self, item):
caps = item.getCapabilities()
if caps:
for cap in caps:
cap_key = cap.getKey()
if cap_key:
cap_key = cap_key.strip()
if cap_key == self.IP_FORWARDING_CAPABILITY:
return 1
return 0
def _get_device_family(self, item):
device_family = item.getDeviceFamily()
if device_family and (device_family != '<No SNMP>'):
return device_family[self.DEV_PREFIX_LEN:]
else:
return ''
def _get_device_vendor(self, item):
device_vendor = item.getDeviceVendor()
if device_vendor and (device_vendor != 'com.hp.ov.nms.devices.nosnmp'):
return device_vendor[self.DEV_PREFIX_LEN:]
else:
return ''
def _get_device_model(self, item):
device_model = item.getDeviceModel()
if device_model and (device_model != 'com.hp.ov.nms.devices.<No SNMP>'):
return device_model
else:
return ''
def _get_device_category(self, item):
device_category = item.getDeviceCategory()
if device_category:
return device_category[self.DEV_PREFIX_LEN:]
else:
return ''
def _get_long_name(self, item):
long_name = item.getLongName()
return long_name or ''
class NmsInterfaceEntity(BaseManagementNmsEntity):
field_names = (
'id',
'name',
'hosted_on_id',
'connection_id',
'if_index',
'if_alias',
'if_descr',
'if_name',
'if_speed',
'physical_address',
'if_type',
'uuid',
'status',
'admin_status',
'oper_status',
'management_mode',
)
def __init__(self, item, fetcher):
r'@types: com.hp.ov.nms.sdk.iface._interface, NmsInterfaceFetcher'
BaseManagementNmsEntity.__init__(self, item, fetcher)
self.hosted_on_id = None
self.management_mode = None
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.hosted_on_id = item.getHostedOnId()
self.connection_id = item.getConnectionId()
self.if_index = item.getIfIndex()
self.if_alias = item.getIfAlias()
self.if_descr = item.getIfDescr()
self.if_name = item.getIfName()
self.if_speed = item.getIfSpeed()
self.admin_status = item.getAdministrativeState()
self.oper_status = item.getOperationalState()
self.physical_address = self._get_physical_address(item)
self.if_type = self._get_interface_type(item)
self.uuid = item.getUuid()
self.management_mode = self._get_management_mode(item)
self.status = self._get_status(item)
def _get_status(self, item):
status = item.getStatus()
return status.value()
def _get_physical_address(self, item):
physical_address = item.getPhysicalAddress()
if physical_address:
return physical_address
else:
return None
def _get_interface_type(self, item):
typeStr = item.getIfType()
if typeStr:
try:
typeValue = int(typeStr)
if typeValue > 0 and typeValue < 252:
return typeValue
except:
pass
return None
class NmsIPAddressEntity(BaseManagementNmsEntity):
field_names = (
'id',
'hosted_on_id',
'ip_subnet_id',
'in_interface_id',
'ip_value',
'prefix_length',
'uuid',
'management_mode',
)
def __init__(self, item, fetcher):
BaseManagementNmsEntity.__init__(self, item, fetcher)
self.hosted_on_id = None
self.management_mode = None
if not is_restorable(item):
self.id = item.getId()
self.hosted_on_id = item.getHostedOnId()
self.ip_subnet_id = item.getIpSubnetId()
self.in_interface_id = item.getInInterfaceId()
self.ip_value = item.getIpValue()
self.prefix_length = item.getPrefixLength()
self.uuid = item.getUuid()
self.management_mode = self._get_management_mode(item)
class NmsIPSubnetEntity(BaseNmsEntity):
field_names = (
'id',
'name',
'prefix_length',
'prefix',
'uuid',
)
def __init__(self, item, fetcher):
BaseNmsEntity.__init__(self, item, fetcher)
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.prefix_length = item.getPrefixLength()
self.prefix = item.getPrefix()
self.uuid = item.getUuid()
class NmsL2ConnectionEntity(BaseNmsEntity):
field_names = (
'id',
'name',
'interfaces',
'uuid',
)
def __init__(self, item, fetcher):
BaseNmsEntity.__init__(self, item, fetcher)
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.uuid = item.getUuid()
interfaces = item.getInterfaces()
if interfaces is not None:
self.interfaces = list(interfaces)
else:
self.interfaces = self._getInterfacesIdsByL2Name(item.name)
def _getHostInterface(self, id):
interfaceFetcher = self.fetcher.api.get_fetcher(NmsServices.Interface)
name_filter = FF.CONDITION('hostedOnId', '==', id)
return interfaceFetcher.filtered(name_filter).all()
def _findInterfaceIdByHostAndName(self, hostName, interfaceName):
hostFetcher = self.fetcher.api.get_fetcher(NmsServices.Node)
name_filter = FF.CONDITION('name', '==', hostName)
hosts = hostFetcher.filtered(name_filter).all()
if hosts:
hostList = hosts.items()
if hostList:
# our api for NNM returns for each host tuple(hostId, hostObject)
# we need to host object
host = hostList[0][1]
if len(hostList) > 1:
logger.warn("Non unique host was found. Host name: %s " % host.name)
else:
hostInterfaces = self._getHostInterface(host.id)
for interface in hostInterfaces:
if interface.name == interfaceName:
return interface.id
return None
def _getInterfacesIdsByL2Name(self, name):
interfaceInfoList = name.split(",")
interfaces = []
for interfaceInfo in interfaceInfoList:
interfaceId = self._getInterfaceId(interfaceInfo)
if interfaceId:
interfaces.append(interfaceId)
return interfaces
def _getInterfaceId(self, interfaceInfo):
"""
Trying to get interface info from Layer2Connection name.
In NNMi Layer2Connection name include in such format "Hostname[InterfaceName]"
"""
match = re.match("(.*)\[(.*)\]", interfaceInfo.strip())
if match:
hostName = match.group(1)
interfaceName = match.group(2)
return self._findInterfaceIdByHostAndName(hostName, interfaceName)
class NmsVLANEntity(BaseNmsEntity):
field_names = (
'id',
'name',
'uuid',
'vlan_id',
)
def __init__(self, item, fetcher):
BaseNmsEntity.__init__(self, item, fetcher)
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.uuid = item.getUuid()
self.vlan_id = item.getVlanId()
self.ports = self._get_ports()
def _get_ports(self):
port_objects = self.fetcher._get_stub().getPortsForVLANbyId(self.id).getItem()
if port_objects is not None:
return [port_object.getId() for port_object in port_objects]
else:
return []
class NmsPortEntity(BaseNmsEntity):
PORT_DUPLEX_TYPE = {
'FULL': 'full',
'HALF': 'half',
'AUTO': 'auto-negotiated',
'UNKNOWN': 'other',
}
field_names = (
'id',
'name',
'hosted_on_id',
'interface',
'card',
'speed',
'type',
'duplex_setting',
'index',
'uuid',
)
def __init__(self, item, fetcher):
BaseNmsEntity.__init__(self, item, fetcher)
self.hosted_on_id = None
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.hosted_on_id = self._get_hosted_on_id(item)
self.interface = self._get_interface(item)
self.card = self._get_card(item)
self.speed = self._get_speed(item)
self.type = self._get_type(item)
self.duplex_setting = self._get_duplex_setting(item)
self.index = item.getIndex()
self.uuid = item.getUuid()
def _get_hosted_on_id(self, item):
hosted_on_id = item.getHostedOnId()
if hosted_on_id:
return hosted_on_id
else:
return ''
def _get_interface(self, item):
interface = item.getIface()
if interface:
return interface
else:
return ''
def _get_card(self, item):
try:
card = item.getCard()
if card:
return card
except AttributeError:
pass
return ''
def _get_speed(self, item):
speed = item.getSpeed()
if speed:
return speed
else:
return ''
def _get_type(self, item):
_type = item.getType()
if _type:
return _type
else:
return ''
def _get_duplex_setting(self, item):
duplex_setting = item.getDuplexSetting()
if duplex_setting:
return self.PORT_DUPLEX_TYPE.get(duplex_setting.value())
else:
return ''
class NmsCardEntity(BaseManagementNmsEntity):
field_names = (
'id',
'name',
'hosted_on_id',
'card_descr',
'firmware_version',
'hardware_version',
'software_version',
'hosting_card',
'serial_number',
'type',
'index',
'uuid',
'management_mode',
)
def __init__(self, item, fetcher):
BaseManagementNmsEntity.__init__(self, item, fetcher)
self.hosted_on_id = None
self.management_mode = None
if not is_restorable(item):
self.id = item.getId()
self.name = item.getName()
self.hosted_on_id = self._get_hosted_on_id(item)
self.card_descr = self._get_card_descr(item)
self.firmware_version = self._get_firmware_version(item)
self.hardware_version = self._get_hardware_version(item)
self.software_version = self._get_software_version(item)
self.hosting_card = self._get_hosting_card(item)
self.serial_number = self._get_serial_number(item)
self.type = self._get_type(item)
self.index = self._get_index(item)
self.uuid = item.getUuid()
self.management_mode = self._get_management_mode(item)
def _get_hosted_on_id(self, item):
hosted_on_id = item.getHostedOnId()
if hosted_on_id:
return hosted_on_id
else:
return ''
def _get_card_descr(self, item):
card_descr = item.getCardDescr()
if card_descr:
return card_descr
else:
return ''
def _get_firmware_version(self, item):
firmware_version = item.getFirmwareVersion()
if firmware_version:
return firmware_version
else:
return ''
def _get_hardware_version(self, item):
hardware_version = item.getHardwareVersion()
if hardware_version:
return hardware_version
else:
return ''
def _get_software_version(self, item):
software_version = item.getSoftwareVersion()
if software_version:
return software_version
else:
return ''
def _get_hosting_card(self, item):
hosting_card = item.getHostingCard()
if hosting_card:
return hosting_card
else:
return ''
def _get_serial_number(self, item):
serial_number = item.getSerialNumber()
if serial_number:
return serial_number
else:
return ''
def _get_type(self, item):
_type = item.getType()
if _type:
return _type
else:
return ''
def _get_index(self, item):
index = item.getIndex()
if index:
return index
else:
return ''
class BaseNmsCollection:
def __init__(self, fetcher, items):
r'@types: BaseNmsFetcher, list[BaseNmsEntity]'
self.fetcher = fetcher
self.api = fetcher.api
idmap = {}
for item in items:
if not isinstance(item, self.item_class):
raise ValueError('expected instances of %r class, but %r instance occurred' % (self.item_class, item.__class__.__name__))
idmap[item.id] = item
self._items = idmap
def __len__(self):
return len(self._items)
def __getitem__(self, item_id):
if isinstance(item_id, types.IntType) or isinstance(item_id, types.LongType):
return self.values()[item_id]
elif isinstance(item_id, types.SliceType):
return self.__getslice___(item_id.start, item_id.stop)
else:
return self._items[item_id]
def __getslice___(self, start, end):
cls = self.__class__
return cls(self.fetcher, self.values()[start:end])
def __contains__(self, item_id):
return item_id in self._items.keys()
def filter_restorable_items(self):
'@types: -> list[BaseNmsEntity]'
return filter(is_not_restorable, self.itervalues())
def items(self):
return self._items.items()
def keys(self):
return self._items.keys()
def values(self):
return self._items.values()
def iteritems(self):
return self._items.iteritems()
def itervalues(self):
return self._items.itervalues()
def iterkeys(self):
return self._items.iterkeys()
def get(self, item_id, default=None):
return self._items.get(item_id, default)
def merge(self, collection):
if collection.item_class != self.item_class:
raise ValueError('cannot merge collections with different item types')
if collection.fetcher.__class__ != self.fetcher.__class__:
raise ValueError('cannot merge collections with different fetcher types')
cls = self.__class__
return cls(self.fetcher, itertools.chain(self.itervalues(), collection.itervalues()))
def _get_partitioned_topology_by_field(self, nms_service, field_name, values):
fetcher = self.api.get_fetcher(nms_service)
if fetcher:
if values:
idMap, discovered_ids, undiscovered_ids = self.api.ucmdb_api.partitionIds(values)
restorable_items = _restore_items(fetcher, idMap, discovered_ids)
restorable_items_collection = fetcher.collection_class(fetcher,
restorable_items)
if undiscovered_ids:
undiscovered_ids = sorted(undiscovered_ids)
fullCollection = fetcher.collection_class(fetcher, [])
for id_chunk_index in xrange(0, len(undiscovered_ids), DEFAULT_CONDITIONS_IN_FILTER):
filter_ = FF.EMPTY
for undiscovered_id in list(undiscovered_ids)[id_chunk_index:id_chunk_index+DEFAULT_CONDITIONS_IN_FILTER]:
filter_ |= FF.CONDITION(field_name, '==', undiscovered_id)
fullCollection = fullCollection.merge(fetcher.filtered(filter_).all())
return fullCollection.merge(restorable_items_collection)
return restorable_items_collection
return fetcher.collection_class(fetcher, [])
def _get_partitioned_topology_by_id(self, nms_service, ids):
r'@types: NmsServices, set[str]->BaseNmsCollection'
return self._get_partitioned_topology_by_field(nms_service, 'id', ids)
def _get_related_topology(self, nms_service, field_name, values):
fetcher = self.api.get_fetcher(nms_service)
if fetcher:
if values:
values = sorted(values)
fullCollection = fetcher.collection_class(fetcher, [])
for values_index in xrange(0, len(values), DEFAULT_CONDITIONS_IN_FILTER):
filter_ = FF.EMPTY
for value in values[values_index:values_index+DEFAULT_CONDITIONS_IN_FILTER]:
filter_ |= FF.CONDITION(field_name, '==', value)
if fetcher:
fullCollection = fullCollection.merge(fetcher.filtered(filter_).all())
return fullCollection
return fetcher.collection_class(fetcher, [])
class NmsNodeCollection(BaseNmsCollection):
item_class = NmsNodeEntity
def _get_rt_interface(self):
# Interface.hostedOnId <== Node.id
return self._get_related_topology(NmsServices.Interface,
'hostedOnId',
self.keys())
def _get_rt_ip_address(self):
# IPAddress.hostedOnId <== Node.id
return self._get_related_topology(NmsServices.IPAddress,
'hostedOnId',
self.keys())
def _get_rt_port(self):
# Port.hostedOnId <== Node.id
return self._get_related_topology(NmsServices.Port,
'hostedOnId',
self.keys())
def _get_rt_card(self):
# Card.hostedOnId <== Node.id
return self._get_related_topology(NmsServices.Card,
'hostedOnId',
self.keys())
class NmsInterfaceCollection(BaseNmsCollection):
item_class = NmsInterfaceEntity
def _get_rt_node(self):
ids = set([entity.hosted_on_id for entity in self.filter_restorable_items() if entity.hosted_on_id])
return self._get_partitioned_topology_by_id(NmsServices.Node, ids)
class NmsIPAddressCollection(BaseNmsCollection):
item_class = NmsIPAddressEntity
def _get_rt_ip_subnet(self):
# IPSubnet.id ==> IPAddress.ipSubnetId
ids = set([entity.ip_subnet_id for entity in self.filter_restorable_items() if entity.ip_subnet_id])
return self._get_partitioned_topology_by_field(NmsServices.IPSubnet,
'ipSubnetId',
ids)
class NmsIPSubnetCollection(BaseNmsCollection):
item_class = NmsIPSubnetEntity
class NmsL2NodeCollection(NmsNodeCollection):
item_class = NmsL2ConnectionEntity
class NmsL2ConnectionCollection(BaseNmsCollection):
item_class = NmsL2ConnectionEntity
def _get_rt_interface(self):
# L2Connection.interfaces[] ==> Interface.id
interface_ids = []
for entity in self:
interface_ids.extend(entity.interfaces)
return self._get_partitioned_topology_by_id(NmsServices.Interface,
set(interface_ids))
class NmsVLANCollection(BaseNmsCollection):
item_class = NmsVLANEntity
def _get_rt_port(self):
# VLAN.ports[] ==> Port.id
port_ids = []
for entity in self.filter_restorable_items():
port_ids.extend(entity.ports)
port_ids = set(port_ids)
return self._get_partitioned_topology_by_id(NmsServices.Port, port_ids)
class NmsPortCollection(BaseNmsCollection):
item_class = NmsPortEntity
def _get_rt_node(self):
# Port.hostedOnId ==> Node.id
ids = set([entity.hosted_on_id for entity in self.filter_restorable_items()])
return self._get_partitioned_topology_by_id(NmsServices.Node, ids)
class NmsCardCollection(BaseNmsCollection):
item_class = NmsCardEntity
class StorageFileDoesNotExist(Exception):
pass
class StorageOperationException(Exception):
pass
class ResultStorage:
def __init__(self, fetcher, namespace=_STORE_NAMESPACE):
self.fetcher = fetcher
self.namespace = namespace
def get_storage_key(self, final_filter, page_index, page_size):
filter_hash = nnmi_filters.filter_hash(final_filter)
key = "%s_%s_i%s_p%s" % (self.fetcher.__class__.__name__, filter_hash, page_index, page_size)
return key
def get_trigger_id(self):
return self.fetcher.api.configuration.triggerId
def get_store_file_name(self, storage_key):
path = RECORD_FOLDER_PATH
triggerId = self.get_trigger_id()
filePath = '%snnm_store/%s_%s' % (path, triggerId, self.namespace)
fileName = '%s.ser' % storage_key
fullFileName = "%s/%s" % (filePath, fileName)
return filePath, fullFileName
def serialize(self, items, fullFileName):
stream = None
try:
try:
kryo = Kryo()
stream = Output(FileOutputStream(fullFileName))
kryo.writeObject(stream, items)
except:
raise StorageOperationException("Serialization failed")
finally:
if stream is not None:
try:
stream.close()
except:
pass
def deserialize(self, fullFileName):
stream = None
try:
try:
kryo = Kryo()
stream = Input(FileInputStream(fullFileName))
return kryo.readObject(stream, java.util.ArrayList)
except:
raise StorageOperationException("Deserialization failed")
finally:
if stream is not None:
try:
stream.close()
except:
pass
def store_items(self, items, storage_key):
filePath, fullFileName = self.get_store_file_name(storage_key)
if not os.path.exists(filePath):
os.makedirs(filePath)
logger.debug(" -- Saving items to file '%s'" % fullFileName)
self.serialize(items, fullFileName)
def read_items(self, storage_key):
_, fullFileName = self.get_store_file_name(storage_key)
logger.debug(" -- Reading items from file '%s'" % fullFileName)
if os.path.isfile(fullFileName):
return self.deserialize(fullFileName)
else:
raise StorageFileDoesNotExist()
class BaseNmsFetcher:
def __init__(self, api, endpoint_proto, endpoint_host, endpoint_port,
auth_username, auth_password, default_filter=None):
self.api = api
self.endpoint_proto = endpoint_proto
self.endpoint_host = endpoint_host
self.endpoint_port = endpoint_port
self.auth_username = auth_username
self.auth_password = auth_password
self.default_filter = default_filter
self._connection_host = endpoint_host
try:
ip_addr.IPv6Address(endpoint_host)
self._connection_host = "[%s]" % endpoint_host
except:
pass
self._storage = ResultStorage(self)
def _create_stub(self):
service = self.stub_class(java.net.URL('%s://%s:%d%s' % (self.endpoint_proto, self._connection_host, int(self.endpoint_port), self.endpoint_path)))
service.setHandlerResolver(SoapHandlerResolver())
port = self._get_port(service)
port.getRequestContext().put(BindingProvider.USERNAME_PROPERTY, self.auth_username);
port.getRequestContext().put(BindingProvider.PASSWORD_PROPERTY, self.auth_password);
return port
def _get_stub(self):
return self._create_stub()
def _get_port(self, service):
raise NotImplemented("_get_port")
def __getitem__(self, index):
if isinstance(index, types.TupleType):
page_index, page_size = index
else:
page_index, page_size = index, self.page_size
result = self.fetch(page_index=page_index, page_size=page_size)
if result is None:
raise IndexError()
return result
def __repr__(self):
return '%s(endpoint_proto = %r, endpoint_host = %r, endpoint_port = %r, auth_username = %r, auth_password = %r, default_filter = %r)' % (self.__class__.__name__, self.endpoint_proto, self.endpoint_host, self.endpoint_port, self.auth_username, self.auth_password, self.default_filter)
def __str__(self):
return '<%s endpoint_proto=%r endpoint_host=%r endpoint_port=%r auth_username=%r auth_password=%r default_filter=%r>' % (self.__class__.__name__, self.endpoint_proto, self.endpoint_host, self.endpoint_port, self.auth_username, self.auth_password, self.default_filter)
@retry_on((java.net.SocketException, WebServiceException), FETCH_RETRY_COUNT, with_delay=FETCH_RETRY_DELAY, rethrow_exception=False, reload=True)
@ensure_delay(FETCH_DELAY)
@log_self_calls()
def fetch(self, page_index, page_size=None, subfilter=None):
item_class = self.collection_class.item_class
includes_custom_attrs = item_class.includes_custom_attrs
configuration = self.api.configuration
if page_size is None:
page_size = self.page_size
final_filter = FF.EMPTY
if self.default_filter is not None:
final_filter &= self.default_filter
if page_size != NO_PAGE_SIZE: #explicitly unlimited
final_filter &= FF.PAGER(page_index, page_size)
if subfilter is not None:
final_filter &= subfilter
if includes_custom_attrs and configuration.requestCustomAttributes:
final_filter &= FF.CUSTOM_ATTRS
result_items = []
storage_key = None
if _STORE_CONFIG is not None:
storage_key = self._storage.get_storage_key(final_filter, page_index, page_size)
items = None
items_updated = False
if _STORE_CONFIG and _STORE_CONFIG.read():
try:
items = self._storage.read_items(storage_key)
except (StorageFileDoesNotExist, StorageOperationException), ex:
logger.debug("Failed to read from storage or no previous results exist")
if _STORE_CONFIG.fallback_to_live():
items = self._get_stub_items(final_filter.nr())
items_updated = True
else:
raise ex
else:
items = self._get_stub_items(final_filter.nr())
items_updated = True
if _STORE_CONFIG and _STORE_CONFIG.write() and items_updated:
self._storage.store_items(items, storage_key)
if not items:
return None
item_class = self.collection_class.item_class
for item in items:
if self._is_valid_item(item):
item_entity = item_class(item, self)
result_items.append(item_entity)
return self.collection_class(self, result_items)
def all(self):
result = []
for page in self:
for item in page:
result.append(item)
if len(result) < self.page_size:
break
return self.collection_class(self, result)
def filtered(self, subfilter):
cls = self.__class__
_filter = self.default_filter
if subfilter is not None:
if _filter is not None:
_filter &= subfilter
else:
_filter = subfilter
return cls(api=self.api, endpoint_proto=self.endpoint_proto,
endpoint_host=self.endpoint_host,
endpoint_port=self.endpoint_port,
auth_username=self.auth_username,
auth_password=self.auth_password,
default_filter=_filter)
class NmsNodeFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.node.NodeBeanService
collection_class = NmsNodeCollection
endpoint_path = '/NodeBeanService/NodeBean'
page_size = DEFAULT_PAGESIZE_NODE
def _get_stub_items(self, subfilter):
return self._get_stub().getNodes(subfilter).getItem()
def _get_port(self, service):
return service.getNodeBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getName())
)
class NmsInterfaceFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.iface.InterfaceBeanService
endpoint_path = '/InterfaceBeanService/InterfaceBean'
collection_class = NmsInterfaceCollection
page_size = DEFAULT_PAGESIZE_INTERFACE
def _get_stub_items(self, subfilter):
return self._get_stub().getInterfaces(subfilter).getItem()
def _get_port(self, service):
return service.getInterfaceBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId())
)
class NmsIPAddressFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.ipaddress.IPAddressBeanService
endpoint_path = '/IPAddressBeanService/IPAddressBean'
collection_class = NmsIPAddressCollection
page_size = DEFAULT_PAGESIZE_IPADDRESS
def _get_stub_items(self, subfilter):
return self._get_stub().getIPAddresses(subfilter).getItem()
def _get_port(self, service):
return service.getIPAddressBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getHostedOnId()) and
not_empty(item.getIpValue())
)
class NmsIPSubnetFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.ipsubnet.IPSubnetBeanService
endpoint_path = '/IPSubnetBeanService/IPSubnetBean'
collection_class = NmsIPSubnetCollection
page_size = DEFAULT_PAGESIZE_IPSUBNET
def _get_stub_items(self, subfilter):
return self._get_stub().getIPSubnets(subfilter).getItem()
def _get_port(self, service):
return service.getIPSubnetBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getPrefix()) and
not_empty(item.getPrefixLength()) and
(0 <= item.getPrefixLength() <= 32)
)
class NmsL2ConnectionFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.l2connection.L2ConnectionBeanService
endpoint_path = '/L2ConnectionBeanService/L2ConnectionBean'
collection_class = NmsL2ConnectionCollection
page_size = DEFAULT_PAGESIZE_L2CONNECTION
def _get_stub_items(self, subfilter):
return self._get_stub().getL2Connections(subfilter).getItem()
def _get_port(self, service):
return service.getL2ConnectionBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getName())
)
class NmsL2NodeFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.l2connection.L2ConnectionBeanService
endpoint_path = '/L2ConnectionBeanService/L2ConnectionBean'
collection_class = NmsL2NodeCollection
page_size = DEFAULT_PAGESIZE_L2CONNECTION
def _get_stub_items(self, subfilter):
return self._get_stub().getL2Connections(subfilter).getItem()
def _get_port(self, service):
return service.getL2ConnectionBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getName())
)
class NmsVLANFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.vlan.VLANBeanService
endpoint_path = '/VLANBeanService/VLANBean'
collection_class = NmsVLANCollection
page_size = DEFAULT_PAGESIZE_VLAN
def _get_stub_items(self, subfilter):
return self._get_stub().getVLANs(subfilter).getItem()
def _get_port(self, service):
return service.getVLANBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getVlanId())
)
class NmsPortFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.phys.PortBeanService
endpoint_path = '/NmsSdkService/PortBean'
collection_class = NmsPortCollection
page_size = DEFAULT_PAGESIZE_PORT
def _get_stub_items(self, subfilter):
return self._get_stub().getPorts(subfilter).getItem()
def _get_port(self, service):
return service.getPortBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getName()) and
not_empty(item.getIndex()) and
not_empty(item.getHostedOnId())
)
class NmsCardFetcher(BaseNmsFetcher):
stub_class = com.hp.ov.nms.sdk.phys.CardBeanService
endpoint_path = '/NmsSdkService/CardBean'
collection_class = NmsCardCollection
page_size = DEFAULT_PAGESIZE_CARD
def _get_stub_items(self, subfilter):
return self._get_stub().getCards(subfilter).getItem()
def _get_port(self, service):
return service.getCardBeanPort()
def _is_valid_item(self, item):
return (
not_empty(item.getId()) and
not_empty(item.getHostedOnId()) and (
not_empty(item.getSerialNumber()) or
not_empty(item.getEntityPhysicalIndex())
)
)
class NmsAPI:
SERVICE_TO_FETCHER = {
NmsServices.Node: NmsNodeFetcher,
NmsServices.Interface: NmsInterfaceFetcher,
NmsServices.IPAddress: NmsIPAddressFetcher,
NmsServices.IPSubnet: NmsIPSubnetFetcher,
NmsServices.L2Connection: NmsL2ConnectionFetcher,
NmsServices.L2Node: NmsL2NodeFetcher,
NmsServices.VLAN: NmsVLANFetcher,
NmsServices.Port: NmsPortFetcher,
NmsServices.Card: NmsCardFetcher,
}
def __init__(self, endpoint_proto, endpoint_host, endpoint_port,
auth_username, auth_password, ucmdb_api, configuration):
self.endpoint_proto = endpoint_proto
self.endpoint_host = endpoint_host
self.endpoint_port = endpoint_port
self.auth_username = auth_username
self.auth_password = auth_password
self.ucmdb_api = ucmdb_api
self.configuration = configuration
def __repr__(self):
return '%s(endpoint_proto = %r, endpoint_host = %r, endpoint_port = %r, auth_username = %r, auth_password = %r)' % (self.__class__.__name__, self.endpoint_proto, self.endpoint_host, self.endpoint_port, self.auth_username, self.auth_password)
def __str__(self):
return '<%s endpoint_proto=%r endpoint_host=%r endpoint_port=%r auth_username=%r auth_password=%r at 0x%.8X>' % (self.__class__.__name__, self.endpoint_proto, self.endpoint_host, self.endpoint_port, self.auth_username, self.auth_password, id(self))
def __getitem__(self, service):
return self.get_fetcher(service)
def get_fetcher(self, service):
return self.SERVICE_TO_FETCHER[service](self, self.endpoint_proto,
self.endpoint_host,
self.endpoint_port,
self.auth_username,
self.auth_password)
def get_related_topology_nodes(self, page_size=None, sub_filter=None):
return NmsNodeRelatedTopologyPager(self, page_size, sub_filter)
def get_related_topology_l2_connections(self, l2_connections=None, page_size=None):
if l2_connections:
return NmsL2OfflineConnectionRelatedTopologyPager(self, l2_connections, page_size)
return NmsL2ConnectionRelatedTopologyPager(self, page_size)
def get_related_topology_l2_node(self, page_size=None):
return NmsL2NodeRelatedTopologyPager(self, page_size=page_size)
def get_related_topology_vlans(self, page_size=None):
return NmsVLANRelatedTopologyPager(self, page_size=page_size)
def get_nodes(self, page_size=None, sub_filter=None):
''' Get nodes topology, split by pages '''
return NmsNodeTopologyPager(self, page_size, sub_filter)
def get_interfaces(self, page_size=None, sub_filter=None):
''' Get interfaces, split by pages '''
return NmsInterfaceTopologyPager(self, page_size, sub_filter)
def get_ip_adresses(self, page_size=None, sub_filter=None):
''' Get ips, split by pages '''
return NmsIpAddressTopologyPager(self, page_size, sub_filter)
def get_ip_subnets(self, page_size=None, sub_filter=None):
''' Get subnets, split by pages '''
return NmsIpSubnetTopologyPager(self, page_size, sub_filter)
def get_l2_connections(self, page_size=None, sub_filter=None):
''' Get l2 connections, split by pages '''
return NmsL2ConnectionTopologyPager(self, page_size, sub_filter)
def get_vlans(self, page_size=None, sub_filter=None):
''' Get vlans, split by pages '''
return NmsVlanTopologyPager(self, page_size, sub_filter)
def get_ports(self, page_size=None, sub_filter=None):
''' Get ports, split by pages '''
return NmsPortTopologyPager(self, page_size, sub_filter)
def get_cards(self, page_size=None, sub_filter=None):
''' Get ports, split by pages '''
return NmsCardTopologyPager(self, page_size, sub_filter)
def get_empty_collection(self, service):
''' -> NmsBaseCollection '''
fetcher = self.get_fetcher(service)
if fetcher is not None:
return fetcher.collection_class(fetcher, [])
def get_interfaces_non_paged(self, sub_filter=None):
''' -> NmsInterfaceCollection
Get interfaces with no pages '''
fetcher = self.get_fetcher(NmsServices.Interface)
collection = fetcher.fetch(0, page_size=NO_PAGE_SIZE, subfilter=sub_filter)
return collection
def getStringSizeInBytes(str_):
return len(String(str(str_)).getBytes('ASCII'))
def property_equals_condition(property_name, value):
condition_filter = FF.CONDITION(property_name, '==', value)
return condition_filter
def name_equals_condition(name_value):
return property_equals_condition('name', name_value)
def hosted_on_id_condition(id_value):
return property_equals_condition('hostedOnId', id_value)
FILTER_STR_LENGTH_LIMIT = 4 * 1024
def conditions_filter_generator_by_max_str(values, condition_fn, max_filter_str_length = FILTER_STR_LENGTH_LIMIT):
'''
iterable(values), func(value -> condition) -> filter
Generator produces subfilters of max string length using values and function
transforming value into subfilter
Conditions are concatenated by OR operations
'''
if not values:
return
current_subfilter = FF.EMPTY
current_length = getStringSizeInBytes(str(current_subfilter))
for value in values:
condition = condition_fn(value)
condition_length = getStringSizeInBytes(str(condition))
if current_length + condition_length < max_filter_str_length:
# append condition
current_subfilter |= condition
current_length = getStringSizeInBytes(str(current_subfilter))
else:
# return
yield current_subfilter
current_subfilter = condition
current_length = condition_length
yield current_subfilter
FILTER_MAX_COUNT = 25
def conditions_filter_generator_by_count(values, condition_fn, max_count = FILTER_MAX_COUNT):
'''
iterable(values), func(value -> condition) -> filter
Generator produces subfilters of specified number of subconditions using values and function
transforming value into subfilter
Conditions are concatenated by OR operations
'''
if not values:
return
current_subfilter = FF.EMPTY
current_count = 0
for value in values:
condition = condition_fn(value)
if current_count < max_count:
# append condition
current_subfilter |= condition
current_count += 1
else:
# return
yield current_subfilter
current_subfilter = condition
current_count = 1
yield current_subfilter
class BaseNmsTopology:
def __init__(self, collection):
self.collection = collection
def get_collection(self):
return self.collection
class NmsNodeRelatedTopology(BaseNmsTopology):
entry_service = NmsServices.Node
entry_collection_class = NmsNodeCollection
def __init__(self, nodes):
self.nodes = nodes
self.interfaces = self.nodes._get_rt_interface()
self.ip_addresses = self.nodes._get_rt_ip_address()
api = nodes.api
self.ports = api.get_empty_collection(NmsServices.Port)
self.cards = api.get_empty_collection(NmsServices.Card)
if api.configuration.discoverPhysicalPorts:
self.ports = self.nodes._get_rt_port()
self.cards = self.nodes._get_rt_card()
self.ip_subnets = self.ip_addresses._get_rt_ip_subnet()
class NmsL2ConnectionRelatedTopology(BaseNmsTopology):
entry_service = NmsServices.L2Connection
entry_collection_class = NmsL2ConnectionCollection
def __init__(self, l2_connections):
self.l2_connections = l2_connections
self.interfaces = self.l2_connections._get_rt_interface()
if self.interfaces:
self.nodes = self.interfaces._get_rt_node()
# need to get related interfaces of the nodes to be able to report
# nodes of layer 2 connection which have equal interface macs
self.interfaces = self.interfaces.merge(self.nodes._get_rt_interface())
self.ip_addresses = self.nodes._get_rt_ip_address()
else:
self.nodes = None
self.ip_addresses = None
class NmsL2NodeRelatedTopology(BaseNmsTopology):
entry_service = NmsServices.L2Node
entry_collection_class = NmsL2NodeCollection
def __init__(self, l2_connections):
self.l2_connections = l2_connections
class NmsVLANRelatedTopology(BaseNmsTopology):
entry_service = NmsServices.VLAN
entry_collection_class = NmsVLANCollection
def __init__(self, vlans):
self.vlans = vlans
self.ports = self.vlans._get_rt_port()
if self.ports:
self.nodes = self.ports._get_rt_node()
self.ports = self.ports.merge(self.nodes._get_rt_port())
self.interfaces = self.nodes._get_rt_interface()
self.cards = self.nodes._get_rt_card()
self.ip_addresses = self.nodes._get_rt_ip_address()
else:
self.nodes = None
self.interfaces = None
self.cards = None
self.ip_addresses = None
class NmsNodesTopology(BaseNmsTopology):
entry_service = NmsServices.Node
entry_collection_class = NmsNodeCollection
class NmsInterfacesTopology(BaseNmsTopology):
entry_service = NmsServices.Interface
entry_collection_class = NmsInterfaceCollection
class NmsIpAddressTopology(BaseNmsTopology):
entry_service = NmsServices.IPAddress
entry_collection_class = NmsIPAddressCollection
class NmsIpSubnetTopology(BaseNmsTopology):
entry_service = NmsServices.IPSubnet
entry_collection_class = NmsIPSubnetCollection
class NmsL2ConnectionTopology(BaseNmsTopology):
entry_service = NmsServices.L2Connection
entry_collection_class = NmsL2ConnectionCollection
class NmsVlanTopology(BaseNmsTopology):
entry_service = NmsServices.VLAN
entry_collection_class = NmsVLANCollection
class NmsPortTopology(BaseNmsTopology):
entry_service = NmsServices.Port
entry_collection_class = NmsPortCollection
class NmsCardTopology(BaseNmsTopology):
entry_service = NmsServices.Card
entry_collection_class = NmsCardCollection
class NmsFullTopology(BaseNmsTopology):
def __init__(self, nodes=None, interfaces=None, ip_addresses=None, ip_subnets=None, l2_connections=None,
vlans=None, ports=None, cards=None):
self.nodes = nodes
self.interfaces = interfaces
self.ip_addresses = ip_addresses
self.ip_subnets = ip_subnets
self.l2_connections = l2_connections
self.vlans = vlans
self.ports = ports
self.cards = cards
class BaseNmsRelatedTopologyPager:
def __init__(self, api, page_size=None, sub_filter=None):
self.api = api
self.page_size = page_size
self.sub_filter = sub_filter
def __getitem__(self, index):
if isinstance(index, types.TupleType):
page_index, page_size = index
else:
page_index, page_size = index, None
result = self.fetch(page_index, page_size, self.sub_filter)
if result is None:
raise IndexError()
return result
def fetch(self, page_index, page_size=None, subfilter=None):
fetcher = self.api.get_fetcher(self.related_topology_class.entry_service)
if page_size is None:
page_size = self.page_size
if page_size is None:
page_size = fetcher.page_size
if fetcher:
collection = fetcher.fetch(page_index=page_index,
page_size=page_size,
subfilter=subfilter)
if collection is None:
return None
return self.related_topology_class(collection)
class NmsNodeRelatedTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsNodeRelatedTopology
class NmsL2ConnectionRelatedTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsL2ConnectionRelatedTopology
class NmsL2OfflineConnectionRelatedTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsL2ConnectionRelatedTopology
def __init__(self, api, l2_connections, page_size=None, sub_filter=None):
BaseNmsRelatedTopologyPager.__init__(self, api, page_size, sub_filter)
self.l2_connections = l2_connections
def fetch(self, page_index, page_size=None, subfilter=None):
fetcher = self.api.get_fetcher(self.related_topology_class.entry_service)
if page_size is None:
page_size = self.page_size
if page_size is None:
page_size = fetcher.page_size
collection_class = self.related_topology_class.entry_collection_class
start_index = page_index * page_size
end_index = start_index + page_size
l2_connection_chunk = self.l2_connections[start_index:end_index]
if l2_connection_chunk:
collection = collection_class(fetcher,
l2_connection_chunk)
return self.related_topology_class(collection)
class NmsVLANRelatedTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsVLANRelatedTopology
class NmsL2NodeRelatedTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsL2NodeRelatedTopology
class NmsNodeTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsNodesTopology
class NmsInterfaceTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsInterfacesTopology
class NmsIpAddressTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsIpAddressTopology
class NmsIpSubnetTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsIpSubnetTopology
class NmsL2ConnectionTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsL2ConnectionTopology
class NmsVlanTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsVlanTopology
class NmsPortTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsPortTopology
class NmsCardTopologyPager(BaseNmsRelatedTopologyPager):
related_topology_class = NmsCardTopology
| [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
30607bd2e7f1b69e4506595e22f4151a1ae38093 | f67ec5bb21e564da87c3e29a9aff0b3393890fcc | /simobility/core/tools.py | 6e637d08daeac01e7b8f62c9baaa343931c90614 | [
"MIT"
] | permissive | LADDDUU/simobility | efc742cd38586b56c7228096d5bee70de294dd29 | 6411b8ca50523e1a7c436d4b00c5d11d9a45157b | refs/heads/master | 2022-12-16T05:40:05.148567 | 2020-09-12T05:18:32 | 2020-09-12T05:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,918 | py | import logging
import pandas as pd
import numpy as np
from datetime import datetime
import uuid
import random
from .itinerary import Itinerary
from .vehicle import Vehicle
from .booking import Booking
from .position import Position
def basic_booking_itinerary(
current_time: int, vehicle: Vehicle, booking: Booking
) -> Itinerary:
"""
Create a simple Itinerary: one vehicle picks up and drops off
one customer
"""
itinerary = Itinerary(current_time, vehicle)
itinerary.move_to(booking.pickup)
itinerary.pickup(booking)
itinerary.move_to(booking.dropoff)
itinerary.dropoff(booking)
return itinerary
class ReplayDemand:
def __init__(
self,
clock,
file_name: str,
from_datetime: datetime,
to_datetime: datetime,
round_to: str,
sample_size: int = None,
map_matcher=None,
seed=None,
):
"""
Expected columns:
- datetime
- pickup_lon
- pickup_lat
- dropoff_lon
- dropoff_lat
"""
self.clock = clock
self.data = pd.read_feather(file_name)
logging.debug(f"Total number of trips: {self.data.shape[0]}")
# # TODO: is it really needed???
# time_jitter = np.array([
# pd.to_timedelta("{} sec".format(np.round(i)))
# for i in np.random.normal(0, 120, self.data.datetime.shape[0])
# ])
# self.data.datetime = self.data.datetime.dt.to_pydatetime() + time_jitter
idx = (self.data.pickup_datetime >= from_datetime) & (
self.data.pickup_datetime < to_datetime
)
self.data = self.data[idx]
logging.debug(f"Time filtered number of trips: {self.data.shape[0]}")
# "local" randomizer, independent from the "global", simulation level
state = np.random.RandomState(seed)
if sample_size is not None:
replace = self.data.index.shape[0] < sample_size
index = state.choice(self.data.index, sample_size, replace=replace)
self.data = self.data.loc[index]
logging.debug(f"Sample size: {self.data.shape[0]}")
self.data.pickup_datetime = self.data.pickup_datetime.dt.round(round_to)
self.demand = {
g: item for g, item in self.data.groupby(self.data.pickup_datetime)
}
self.map_matcher = map_matcher
def next(self, key=None):
if key is None:
key = pd.to_datetime(self.clock.to_datetime())
bookings = []
seats = 1
if key in self.demand:
for b in self.demand[key].itertuples():
pu = Position(b.pickup_lon, b.pickup_lat)
do = Position(b.dropoff_lon, b.dropoff_lat)
if self.map_matcher:
original_pu = pu
original_do = do
pu = self.map_matcher.map_match(pu)
if pu.distance(original_pu) > 0.05:
logging.warning(
f"Map matched pickup is {pu.distance(original_pu)} away for the original"
)
# skip booking
continue
do = self.map_matcher.map_match(do)
if do.distance(original_do) > 0.05:
logging.warning(
f"Map matched dropoff is {do.distance(original_do)} away for the original"
)
# skip booking
continue
# TODO: move distance thresholds to config
if pu.distance(do) < 0.1:
# logging.warning(f"Pickup and dropoff are too close to each other {pu.distance(do)}")
continue
bookings.append(Booking(self.clock, pu, do, seats))
return bookings
| [
"sashkolysenko@gmail.com"
] | sashkolysenko@gmail.com |
2ea442f87bc48f849933829bc5a2f5a2e0326669 | 9782f131643a9301eec546ad7281592614417b05 | /vfeedbacknet/vfeedbacknet_simple_eccv_model1.py | 711625ecf75b45a5abcf3eab2438ed80f546003f | [] | no_license | StanfordVisionSystems/vfeedbacknet | 9d5bddb74a37c1fe8f01dddf2a48ca66151bbbf9 | 55865f595ac60891c594b225baf39c0ac5609800 | refs/heads/master | 2021-01-20T06:29:35.038083 | 2018-03-12T17:42:13 | 2018-03-12T17:42:13 | 89,887,050 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,169 | py | import numpy as np
import tensorflow as tf
import logging
from vfeedbacknet.vfeedbacknet_convLSTM import ConvLSTMCell
from vfeedbacknet.vfeedbacknet_utilities import ModelLogger
from vfeedbacknet.vfeedbacknet_base import VFeedbackNetBase
#from vfeedbacknet.vfeedbacknet_fb_base1 import Model as VFeedbackNetFBBase
class Model:
'''
TODO(jremmons) add description
'''
model_name = 'simple_eccv_model1'
def __init__(self, sess, num_classes, batch_size,
train_featurizer='FINE_TUNE', train_main_model='FINE_TUNE', train_fc='FINE_TUNE',
weights_filename=None, is_training=True):
self.sess = sess
if weights_filename is not None:
print('loading weights from: {}'.format(weights_filename))
self.weights = np.load(weights_filename) if weights_filename is not None else None
self.num_classes = num_classes
self.batch_size = batch_size
assert train_featurizer in ['NO', 'FINE_TUNE', 'FROM_SCRATCH'], 'train_featurizer must be either: NO, FINE_TUNE, or FROM_SCRATCH'
self.train_featurizer = train_featurizer if is_training else 'NO'
assert train_main_model in ['NO', 'FINE_TUNE', 'FROM_SCRATCH'], 'train_main_model must be either: NO, FINE_TUNE, or FROM_SCRATCH'
self.train_main_model = train_main_model if is_training else 'NO'
assert train_fc in ['NO', 'FINE_TUNE', 'FROM_SCRATCH'], 'train_fc must be either: NO, FINE_TUNE, or FROM_SCRATCH'
self.train_fc = train_fc if is_training else 'NO'
self.is_training = is_training
self.featurizer_variables = []
self.main_model_variables = []
self.fc_variables = []
self.vfeedbacknet_base = VFeedbackNetBase(sess, num_classes, train_vgg16=train_featurizer, is_training=is_training)
#self.vfeedbacknet_fb_base = VFeedbackNetFBBase(sess, num_classes, batch_size, train_featurizer='NO', train_main_model='NO', is_training=True, trainable=False)
self._declare_variables()
def _declare_variables(self):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('convlstm1'):
with tf.variable_scope('rnn'):
with tf.variable_scope('conv_lstm_cell'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
n = 128
m = 4*n
input_size = [28, 28, n]
kernel2d_size = [3, 3]
kernel_size = kernel2d_size + [2*n] + [m]
with tf.variable_scope('convlstm'):
kernel = tf.get_variable('kernel', kernel_size, initializer=initializer, regularizer=regularizer)
W_ci = tf.get_variable('W_ci', input_size, initializer=initializer, regularizer=regularizer)
W_cf = tf.get_variable('W_cf', input_size, initializer=initializer, regularizer=regularizer)
W_co = tf.get_variable('W_co', input_size, initializer=initializer, regularizer=regularizer)
bias = tf.get_variable('bias', [m], initializer=tf.zeros_initializer(), regularizer=regularizer)
self.convLSTMCell1 = ConvLSTMCell(input_size[:2], n, [3, 3])
with tf.variable_scope('convlstm2'):
with tf.variable_scope('rnn'):
with tf.variable_scope('conv_lstm_cell'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
n = 512
m = 4*n
input_size = [7, 7, n]
kernel2d_size = [3, 3]
kernel_size = kernel2d_size + [2*n] + [m]
with tf.variable_scope('convlstm'):
kernel = tf.get_variable('kernel', kernel_size, initializer=initializer, regularizer=regularizer)
W_ci = tf.get_variable('W_ci', input_size, initializer=initializer, regularizer=regularizer)
W_cf = tf.get_variable('W_cf', input_size, initializer=initializer, regularizer=regularizer)
W_co = tf.get_variable('W_co', input_size, initializer=initializer, regularizer=regularizer)
bias = tf.get_variable('bias', [m], initializer=tf.zeros_initializer(), regularizer=regularizer)
self.convLSTMCell2 = ConvLSTMCell(input_size[:2], n, [3, 3])
# with tf.variable_scope('convlstm3'):
# with tf.variable_scope('rnn'):
# with tf.variable_scope('conv_lstm_cell'):
# regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
# initializer = tf.contrib.layers.xavier_initializer()
# n = 512
# m = 4*n
# input_size = [7, 7, n]
# kernel2d_size = [3, 3]
# kernel_size = kernel2d_size + [2*n] + [m]
# with tf.variable_scope('convlstm'):
# kernel = tf.get_variable('kernel', kernel_size, initializer=initializer, regularizer=regularizer)
# W_ci = tf.get_variable('W_ci', input_size, initializer=initializer, regularizer=regularizer)
# W_cf = tf.get_variable('W_cf', input_size, initializer=initializer, regularizer=regularizer)
# W_co = tf.get_variable('W_co', input_size, initializer=initializer, regularizer=regularizer)
# bias = tf.get_variable('bias', [m], initializer=tf.zeros_initializer(), regularizer=regularizer)
# self.convLSTMCell3 = ConvLSTMCell(input_size[:2], n, [3, 3])
with tf.variable_scope('reshape_convs'):
with tf.variable_scope('conv1'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
kernel = tf.get_variable('kernel', shape=[7, 7, 3, 64], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
biases = tf.get_variable('biases', shape=[64], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
with tf.variable_scope('conv2'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
kernel = tf.get_variable('kernel', shape=[3, 3, 64, 128], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
biases = tf.get_variable('biases', shape=[128], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
with tf.variable_scope('conv3'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
kernel = tf.get_variable('kernel', shape=[3, 3, 128, 256], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
biases = tf.get_variable('biases', shape=[256], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
with tf.variable_scope('conv4'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
kernel = tf.get_variable('kernel', shape=[3, 3, 64, 128], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
biases = tf.get_variable('biases', shape=[128], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
with tf.variable_scope('conv5'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
kernel = tf.get_variable('kernel', shape=[3, 3, 128, 256], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
biases = tf.get_variable('biases', shape=[256], dtype=tf.float32, regularizer=regularizer, initializer=initializer)
with tf.variable_scope('feedback_block1'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
input_size = [56, 56]
kernel_size = [3, 3, 64, 64]
W_xf = tf.get_variable('W_xf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xi = tf.get_variable('W_xi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xc = tf.get_variable('W_xc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xo = tf.get_variable('W_xo', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hf = tf.get_variable('W_hf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hi = tf.get_variable('W_hi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hc = tf.get_variable('W_hc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ho = tf.get_variable('W_ho', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_cf = tf.get_variable('W_cf', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ci = tf.get_variable('W_ci', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_co = tf.get_variable('W_co', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
b_f = tf.get_variable('b_f', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_i = tf.get_variable('b_i', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_c = tf.get_variable('b_c', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_o = tf.get_variable('b_o', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
with tf.variable_scope('feedback_block2'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
input_size = [112, 112]
kernel_size = [3, 3, 64, 64]
W_xf = tf.get_variable('W_xf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xi = tf.get_variable('W_xi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xc = tf.get_variable('W_xc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xo = tf.get_variable('W_xo', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hf = tf.get_variable('W_hf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hi = tf.get_variable('W_hi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hc = tf.get_variable('W_hc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ho = tf.get_variable('W_ho', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_cf = tf.get_variable('W_cf', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ci = tf.get_variable('W_ci', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_co = tf.get_variable('W_co', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
b_f = tf.get_variable('b_f', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_i = tf.get_variable('b_i', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_c = tf.get_variable('b_c', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_o = tf.get_variable('b_o', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
with tf.variable_scope('feedback_block3'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
input_size = [56, 56]
kernel_size = [7, 7, 64, 64]
W_xf = tf.get_variable('W_xf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xi = tf.get_variable('W_xi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xc = tf.get_variable('W_xc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_xo = tf.get_variable('W_xo', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hf = tf.get_variable('W_hf', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hi = tf.get_variable('W_hi', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_hc = tf.get_variable('W_hc', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ho = tf.get_variable('W_ho', kernel_size, dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_cf = tf.get_variable('W_cf', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_ci = tf.get_variable('W_ci', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
W_co = tf.get_variable('W_co', [input_size[0],input_size[1],kernel_size[-1]], dtype=tf.float32, initializer=initializer, regularizer=regularizer)
b_f = tf.get_variable('b_f', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_i = tf.get_variable('b_i', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_c = tf.get_variable('b_c', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
b_o = tf.get_variable('b_o', [kernel_size[-1]], dtype=tf.float32, initializer=tf.zeros_initializer(), regularizer=regularizer)
with tf.variable_scope('fc1'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
trainable = False if self.train_fc == 'NO' else True
weight = tf.get_variable('weights', shape=[14*14*256, 256], dtype=tf.float32, initializer=initializer, regularizer=regularizer, trainable=trainable)
biases = tf.get_variable('biases', shape=[256], dtype=tf.float32, initializer=initializer, regularizer=regularizer, trainable=trainable)
with tf.variable_scope('fc2'):
regularizer = None # tf.contrib.layers.l2_regularizer(scale=0.25)
initializer = tf.contrib.layers.xavier_initializer()
trainable = False if self.train_fc == 'NO' else True
weight = tf.get_variable('weights', shape=[256, self.num_classes], dtype=tf.float32, initializer=initializer, regularizer=regularizer, trainable=trainable)
biases = tf.get_variable('biases', shape=[self.num_classes], dtype=tf.float32, initializer=initializer, regularizer=regularizer, trainable=trainable)
def get_variables(self):
return self.featurizer_variables + self.main_model_variables + self.fc_variables
def print_variables(self):
var_list = self.get_variables()
var_list_len = len(var_list)
for var,idx in zip(var_list, range(var_list_len)):
print(str(idx).zfill(3), var.name)
def initialize_variables(self):
logging.debug('--- begin variable initialization (vfeedbacknet) ---')
var_list = self.get_variables()
self.print_variables()
print('Number of variables in model: {}'.format( len(var_list) ))
if self.train_featurizer == 'FROM_SCRATCH':
logging.debug('vgg16:FROM_SCRATCH; using random initialization')
for var in self.featurizer_variables:
self.sess.run(var.initializer)
else:
for var in self.featurizer_variables:
logging.debug('LOADING FROM WEIGHTS_FILE {}: {}'.format(var.name, var.shape))
assert self.weights is not None, 'Need to specify weights file to load from for featurizer_variables'
self.sess.run(var.assign(self.weights[var.name]))
if self.train_main_model == 'FROM_SCRATCH':
logging.debug('feedback: FROM_SCRATCH; using random initialization')
for var in self.main_model_variables:
self.sess.run(var.initializer)
else:
for var in self.main_model_variables:
logging.debug(' LOADING FROM WEIGHTS_FILE {}: {}'.format(var.name, var.shape))
assert self.weights is not None, 'Need to specify weights file to load from for main_model_variables'
self.sess.run(var.assign(self.weights[var.name]))
if self.train_fc == 'FROM_SCRATCH':
logging.debug('fc: FROM_SCRATCH; using random initialization')
for var in self.fc_variables:
self.sess.run(var.initializer)
else:
for var in self.fc_variables:
logging.debug('LOADING FROM WEIGHTS_FILE {}: {}'.format(var.name, var.shape))
assert self.weights is not None, 'Need to specify weights file to load from for fc_variables'
self.sess.run(var.assign(self.weights[var.name]))
logging.debug('--- end variable initialization (vfeedbacknet) ---')
def export_variables(self, export_filename):
self.vfeedbacknet_base.export_variables(self.sess, self.get_variables(), export_filename)
def __call__(self, inputs, inputs_sequence_length):
#assert inputs.shape[1:] == (20, 112, 112), 'expected input shape of (20, 112, 112) but got {}'.format(inputs.shape)
ModelLogger.log('raw_input', inputs)
inputs = VFeedbackNetBase.split_video(inputs)
ModelLogger.log('input', inputs)
## featurizer ##
inputs = [ self.vfeedbacknet_base.vgg16_layer1(inp, var_list=self.featurizer_variables) for inp in inputs ]
ModelLogger.log('vgg-layer1', inputs)
# inputs = [ self.vfeedbacknet_fb_base.vfeedbacknet_base.vgg16_layer2(inp, var_list=self.featurizer_variables) for inp in inputs ]
# ModelLogger.log('vgg-layer2', inputs)
# inputs = [ self.vfeedbacknet_fb_base.vfeedbacknet_base.vgg16_layer3(inp, var_list=self.featurizer_variables) for inp in inputs ]
# ModelLogger.log('vgg-layer3', inputs)
## main model ##
sequence = []
featurizer_outputs = inputs
feedback_outputs = None
## feedback 1 ##
# feedback_outputs11 = [ self.feedback_block1(inp, var_list=self.main_model_variables) for inp in featurizer_outputs ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs11))
# ModelLogger.log('feedback_block1', inputs)
# inputs = [ self.reshape_conv_layer(inp, 1, var_list=self.main_model_variables) for inp in featurizer_outputs ]
# ModelLogger.log('reshape_conv_layer1', inputs)
# feedback_outputs21 = [ self.feedback_block2(inp, var_list=self.main_model_variables) for inp in inputs ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs21))
# ModelLogger.log('feedback_block2', inputs)
# inputs = [ self.reshape_conv_layer(inp, 2, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer2', inputs)
feedback_outputs31 = [ self.feedback_block3(inp, var_list=self.main_model_variables) for inp in inputs ]
inputs = list(map(lambda x : x['hidden_state'], feedback_outputs31))
ModelLogger.log('feedback_block3', inputs)
# inputs = [ self.reshape_conv_layer(inp, 3, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer3', inputs)
# add outputs to sequence
sequence.append(inputs)
## feedback 2 ##
# feedback_outputs12 = [ self.feedback_block1(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(featurizer_outputs, feedback_outputs11) ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs12))
# ModelLogger.log('feedback_block1', inputs)
# inputs = [ self.reshape_conv_layer(inp, 1, var_list=self.main_model_variables) for inp in featurizer_outputs ]
# ModelLogger.log('reshape_conv_layer1', inputs)
# feedback_outputs22 = [ self.feedback_block2(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(inputs, feedback_outputs21) ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs22))
# ModelLogger.log('feedback_block2', inputs)
# inputs = [ self.reshape_conv_layer(inp, 2, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer2', inputs)
feedback_outputs32 = [ self.feedback_block3(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(inputs, feedback_outputs31) ]
inputs = list(map(lambda x : x['hidden_state'], feedback_outputs32))
ModelLogger.log('feedback_block3', inputs)
# inputs = [ self.reshape_conv_layer(inp, 3, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer3', inputs)
# add outputs to sequence
sequence.append(inputs)
## feedback 3 ##
# feedback_outputs13 = [ self.feedback_block1(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(featurizer_outputs, feedback_outputs12) ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs13))
# ModelLogger.log('feedback_block1', inputs)
# inputs = [ self.reshape_conv_layer(inp, 1, var_list=self.main_model_variables) for inp in featurizer_outputs ]
# ModelLogger.log('reshape_conv_layer1', inputs)
# feedback_outputs23 = [ self.feedback_block2(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(inputs, feedback_outputs22) ]
# inputs = list(map(lambda x : x['hidden_state'], feedback_outputs23))
# ModelLogger.log('feedback_block2', inputs)
# inputs = [ self.reshape_conv_layer(inp, 2, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer2', inputs)
feedback_outputs33 = [ self.feedback_block3(inp, state=state, var_list=self.main_model_variables) for inp,state in zip(inputs, feedback_outputs32) ]
inputs = list(map(lambda x : x['hidden_state'], feedback_outputs33))
ModelLogger.log('feedback_block3', inputs)
# inputs = [ self.reshape_conv_layer(inp, 3, var_list=self.main_model_variables) for inp in inputs ]
# ModelLogger.log('reshape_conv_layer3', inputs)
# add outputs to sequence
sequence.append(inputs)
logits = []
for seq in sequence:
inputs = [ self.reshape_conv_layer(inp, 4, var_list=self.main_model_variables) for inp in seq ]
ModelLogger.log('reshape_conv_layer4', inputs)
inputs = self.convLSTM_layer1(inputs, inputs_sequence_length, var_list=self.main_model_variables)
ModelLogger.log('convLSTM1', inputs)
inputs = [ self.reshape_conv_layer(inp, 5, var_list=self.main_model_variables) for inp in inputs ]
ModelLogger.log('reshape_conv_layer5', inputs)
# inputs = self.convLSTM_layer2(inputs, 60, var_list=self.main_model_variables)
# ModelLogger.log('convLSTM2', inputs)
inputs = [ self.fc_layer(inp, 1, var_list=self.fc_variables) for inp in inputs ]
ModelLogger.log('fc1', inputs)
inputs = [ self.fc_layer(inp, 2, var_list=self.fc_variables) for inp in inputs ]
ModelLogger.log('fc2', inputs)
seq1 = tf.stack(inputs, axis=1)
logits.append(seq1)
logits = tf.stack(logits, axis=1)
ModelLogger.log('combined-feedback-logits', logits)
return logits
def feedback_block1(self, inputs, state=None, var_list=None):
return self.feedback_block(1, inputs, state=state, var_list=var_list)
def feedback_block2(self, inputs, state=None, var_list=None):
return self.feedback_block(2, inputs, state=state, var_list=var_list)
def feedback_block3(self, inputs, state=None, var_list=None):
return self.feedback_block(3, inputs, state=state, var_list=var_list)
def feedback_block(self, block_num, inputs, state=None, var_list=None):
hidden_state = None
cell_state = None
if state is not None:
hidden_state = state['hidden_state']
cell_state = state['cell_state']
assert (cell_state is None) == (hidden_state is None), 'cell_state and hidden_state must BOTH be supplied as arguments.'
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('feedback_block{}'.format(block_num), reuse=True):
W_xf = tf.get_variable('W_xf')
W_xi = tf.get_variable('W_xi')
W_xc = tf.get_variable('W_xc')
W_xo = tf.get_variable('W_xo')
W_hf = tf.get_variable('W_hf')
W_hi = tf.get_variable('W_hi')
W_hc = tf.get_variable('W_hc')
W_ho = tf.get_variable('W_ho')
W_cf = tf.get_variable('W_cf')
W_ci = tf.get_variable('W_ci')
W_co = tf.get_variable('W_co')
b_f = tf.get_variable('b_f')
b_i = tf.get_variable('b_i')
b_c = tf.get_variable('b_c')
b_o = tf.get_variable('b_o')
i_t = tf.sigmoid(
tf.nn.bias_add(
tf.nn.conv2d(inputs, W_xi, [1, 1, 1, 1], padding='SAME') +
(tf.nn.conv2d(hidden_state, W_hi, [1, 1, 1, 1], padding='SAME') if hidden_state is not None else tf.to_float(0)) +
(tf.multiply(cell_state, W_ci, name='element_wise_multipy') if cell_state is not None else tf.to_float(0)),
b_i)
)
#i_t = tf.contrib.layers.layer_norm(i_t)
f_t = tf.sigmoid(
tf.nn.bias_add(
tf.nn.conv2d(inputs, W_xf, [1, 1, 1, 1], padding='SAME') +
(tf.nn.conv2d(hidden_state, W_hf, [1, 1, 1, 1], padding='SAME') if hidden_state is not None else tf.to_float(0)) +
(tf.multiply(cell_state, W_cf, name='element_wise_multipy_ft') if cell_state is not None else tf.to_float(0)),
b_f)
)
#f_t = tf.contrib.layers.layer_norm(f_t)
j = tf.nn.bias_add(tf.nn.conv2d(inputs, W_xc, [1, 1, 1, 1], padding='SAME') +
(tf.nn.conv2d(hidden_state, W_hc, [1, 1, 1, 1], padding='SAME') if hidden_state is not None else tf.to_float(0)),
b_c)
#j = tf.contrib.layers.layer_norm(j)
new_cell_state = (tf.multiply(f_t, cell_state, name='element_wise_multipy_ct1') if cell_state is not None else tf.to_float(0)) + \
tf.multiply(i_t, tf.tanh( j ), name='element_wise_multipy_ct2')
#new_cell_state = tf.contrib.layers.layer_norm(new_cell_state)
o_t = tf.sigmoid(
tf.nn.bias_add(
tf.nn.conv2d(inputs, W_xo, [1, 1, 1, 1], padding='SAME') +
(tf.nn.conv2d(hidden_state, W_ho, [1, 1, 1, 1], padding='SAME') if hidden_state is not None else tf.to_float(0)) +
tf.multiply(new_cell_state, W_co, name='element_wise_multipy_ot'),
b_o)
)
#o_t = tf.contrib.layers.layer_norm(o_t)
new_hidden_state = tf.multiply(o_t, tf.tanh(new_cell_state), name='element_wise_multipy_it')
if var_list is not None:
for var in [W_xf, W_xi, W_xc, W_xo,
W_hf, W_hi, W_hc, W_ho,
W_cf, W_ci, W_co,
b_f, b_i, b_c, b_o]:
if var not in var_list:
var_list.append(var)
return { 'hidden_state' : new_hidden_state, 'cell_state' : new_cell_state }
def convLSTM_layer1(self, inputs, inputs_sequence_length, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('convlstm1'):
with tf.variable_scope('rnn'):
with tf.variable_scope('conv_lstm_cell'):
with tf.variable_scope('convlstm', reuse=True):
kernel = tf.get_variable('kernel')
W_ci = tf.get_variable('W_ci')
W_cf = tf.get_variable('W_cf')
W_co = tf.get_variable('W_co')
bias = tf.get_variable('bias')
if var_list is not None and kernel not in var_list:
var_list.append(kernel)
if var_list is not None and W_ci not in var_list:
var_list.append(W_ci)
if var_list is not None and W_cf not in var_list:
var_list.append(W_cf)
if var_list is not None and W_co not in var_list:
var_list.append(W_co)
if var_list is not None and bias not in var_list:
var_list.append(bias)
inputs, state = tf.nn.dynamic_rnn(
self.convLSTMCell1,
tf.stack(inputs, axis=1),
dtype=tf.float32,
sequence_length=None,
)
inputs = tf.unstack(inputs, axis=1)
return inputs
def convLSTM_layer2(self, inputs, inputs_sequence_length, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('convlstm2'):
with tf.variable_scope('rnn'):
with tf.variable_scope('conv_lstm_cell'):
with tf.variable_scope('convlstm', reuse=True):
kernel = tf.get_variable('kernel')
W_ci = tf.get_variable('W_ci')
W_cf = tf.get_variable('W_cf')
W_co = tf.get_variable('W_co')
bias = tf.get_variable('bias')
if var_list is not None and kernel not in var_list:
var_list.append(kernel)
if var_list is not None and W_ci not in var_list:
var_list.append(W_ci)
if var_list is not None and W_cf not in var_list:
var_list.append(W_cf)
if var_list is not None and W_co not in var_list:
var_list.append(W_co)
if var_list is not None and bias not in var_list:
var_list.append(bias)
inputs, state = tf.nn.dynamic_rnn(
self.convLSTMCell2,
tf.stack(inputs, axis=1),
dtype=tf.float32,
sequence_length=None,
)
inputs = tf.unstack(inputs, axis=1)
return inputs
def convLSTM_layer3(self, inputs, inputs_sequence_length, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('convlstm3'):
with tf.variable_scope('rnn'):
with tf.variable_scope('conv_lstm_cell'):
with tf.variable_scope('convlstm', reuse=True):
kernel = tf.get_variable('kernel')
W_ci = tf.get_variable('W_ci')
W_cf = tf.get_variable('W_cf')
W_co = tf.get_variable('W_co')
bias = tf.get_variable('bias')
if var_list is not None and kernel not in var_list:
var_list.append(kernel)
if var_list is not None and W_ci not in var_list:
var_list.append(W_ci)
if var_list is not None and W_cf not in var_list:
var_list.append(W_cf)
if var_list is not None and W_co not in var_list:
var_list.append(W_co)
if var_list is not None and bias not in var_list:
var_list.append(bias)
inputs, state = tf.nn.dynamic_rnn(
self.convLSTMCell3,
tf.stack(inputs, axis=1),
dtype=tf.float32,
sequence_length=None,
)
inputs = tf.unstack(inputs, axis=1)
return inputs
def reshape_conv_layer(self, inputs, conv_num, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('reshape_convs'):
with tf.variable_scope('conv{}'.format(conv_num), reuse=True):
kernel = tf.get_variable('kernel')
biases = tf.get_variable('biases')
inputs = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME')
inputs = tf.nn.bias_add(inputs, biases)
inputs = tf.nn.relu(inputs)
inputs = tf.nn.max_pool(inputs,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
if var_list is not None and kernel not in var_list:
var_list.append(kernel)
if var_list is not None and biases not in var_list:
var_list.append(biases)
return inputs
def conv_layer(self, inputs, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name)):
with tf.variable_scope('process_featurizer_output'):
with tf.variable_scope('conv1', reuse=True):
kernel = tf.get_variable('kernel')
biases = tf.get_variable('biases')
inputs = tf.nn.conv2d(inputs, kernel, [1, 1, 1, 1], padding='SAME')
inputs = tf.nn.bias_add(inputs, biases)
inputs = tf.nn.relu(inputs)
inputs = tf.nn.max_pool(inputs,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='VALID')
if var_list is not None and kernel not in var_list:
var_list.append(kernel)
if var_list is not None and biases not in var_list:
var_list.append(biases)
return inputs
def fc_layer(self, inputs, fc_num, var_list=None):
with tf.variable_scope('vfeedbacknet_{}'.format(Model.model_name), reuse=True):
with tf.variable_scope('fc{}'.format(fc_num)):
weights = tf.get_variable('weights')
biases = tf.get_variable('biases')
#h, w, c, = inputs.shape[1:]
#size = int(h) * int(w) * int(c)
size = 1
for d in inputs.shape[1:]:
size *= int(d)
inputs = tf.reshape(inputs, [-1, size])
inputs = tf.matmul(inputs, weights)
inputs = tf.nn.bias_add(inputs, biases)
if var_list is not None and weights not in var_list:
var_list.append(weights)
if var_list is not None and biases not in var_list:
var_list.append(biases)
return inputs
if __name__ == '__main__':
sess = tf.Session()
video_length = 20
x = tf.placeholder(tf.float32, [None, video_length, 112, 112, 3], name='inputs')
x_len = tf.placeholder(tf.float32, [None], name='inputs_len')
zeros = tf.placeholder(tf.float32, [video_length], name='inputs_len')
labels = tf.placeholder(tf.float32, [None], name='inputs_len')
model = Model(sess, 27, 16, train_featurizer='NO', train_main_model='FROM_SCRATCH', train_fc='FROM_SCRATCH', weights_filename='/home/jemmons/data/vfeedbacknet_base_weights.npz')
logits = model(x, x_len)
ModelLogger.log('logits', logits)
model.initialize_variables()
model.export_variables('/tmp/weights.npz')
#model.print_variables()
# print out the model
# graph = tf.get_default_graph()
# for op in graph.get_operations():
# print((op.name))
| [
"emmons.john@gmail.com"
] | emmons.john@gmail.com |
a09c3b7a2ffba2536acef7ea26411b186b8daf9e | 989190bd7ea5160695da7248dec739c4d459c76d | /remember/handle_args.py | 36a4194ccea931d43f7616beb06c93d1891e276d | [
"MIT"
] | permissive | behroozkhorashadi/remember-commands | 0092576d3111259d9033c420b667578ef36d89f5 | 74e226c98cdcd9560f984346f0f18cb9ca4ae156 | refs/heads/master | 2020-04-06T04:00:57.442209 | 2019-11-10T20:13:55 | 2019-11-10T20:13:55 | 83,071,865 | 6 | 2 | MIT | 2019-11-08T19:59:29 | 2017-02-24T18:40:48 | Python | UTF-8 | Python | false | false | 2,581 | py | import argparse
def setup_args_for_update():
parser = argparse.ArgumentParser()
add_search(parser)
add_json(parser)
add_sql(parser)
parser.add_argument(
"-u",
"--updateinfo",
help="Search the commands AND the extra command context info .",
action="store_true")
parser.add_argument(
"-d",
"--delete",
help="Delete mode where you able to delete commands from the store.",
action="store_true")
add_required_terms(parser, False)
return parser.parse_args()
def setup_for_migrate():
parser = argparse.ArgumentParser()
add_json(parser)
add_save_dir(parser)
return parser.parse_args()
def setup_args_for_search():
parser = argparse.ArgumentParser()
add_json(parser)
add_sql(parser)
add_search(parser)
parser.add_argument(
"-e",
"--execute",
help="Execute the searched commands.",
action="store_true")
add_required_terms(parser, True)
return parser.parse_args()
def setup_args_for_generate():
parser = argparse.ArgumentParser()
add_json(parser)
add_sql(parser)
parser.add_argument(
"historyfile",
help="The path to the history file. ex: '~/.bash_history'")
parser.add_argument(
"save_dir", help="The directory path. ex: ~/dir/where/serializedfile/is")
return parser.parse_args()
def add_json(parser):
parser.add_argument(
"-j",
"--json",
help="Use jsonpickle to serialize/deserialize the store.",
action="store_true")
def add_sql(parser):
parser.add_argument(
"-q",
"--sql",
help="Use sql to back the store.",
action="store_true")
def add_search(parser):
parser.add_argument(
"-a",
"--all",
help="Search the commands AND the extra command context info .",
action="store_true")
parser.add_argument(
"-s",
"--startswith",
help="Show only commands that strictly start with input command.",
action="store_true")
def add_save_dir(parser):
parser.add_argument(
"save_dir",
help="The directory path. ex: ~/dir/where/serializedfile/is")
def add_required_terms(parser, add_history_arg=False):
add_save_dir(parser)
if add_history_arg:
parser.add_argument(
"history_file_path",
help="The path to the history file. ex: ~/.bash_history")
parser.add_argument(
"query",
nargs='+',
help="The term to search for. ex: 'git pull' or git")
| [
"beh@khorashadi.com"
] | beh@khorashadi.com |
88eb299875c8809ce432865ddb2e6437ff0b6dde | 1ca2f86fff8c1e99a0ea8bc7750ddb50aeccb74b | /Homework 2/Zylabs_7_25_HW.py | b8d88bf656cd5b73b5aff7dd6f2adf159d42a1ee | [] | no_license | LGonzales930/My-Project | d27b310ef975c200fea2e78a5a3244eb7aa87c2a | dd6b89e84941796c78f09907bb0b05bfa1a1064f | refs/heads/main | 2023-04-24T01:51:29.555504 | 2021-05-13T04:39:11 | 2021-05-13T04:39:11 | 332,325,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # Lorenzo Gonzales
# ID: 1934789
# Use Functions
# Exact Change Function program
input_val = int(input())
def exact_change(user_total):
for numdollars in range(100, 200):
for numquarters in range(25, 99):
for numdimes in ranage(10, 24):
for numnickles in range(5, 9):
for numpennies in range(1, 4):
user_total = input_val
return
if user_total == numpennies:
print('pennies')
elif user_total == numnickles:
print('Nickles')
elif user_total == numdimes:
print('dimes')
elif user_total == numquarters:
print('quarters')
elif user_total == numdollars:
print('dollars')
else:
print('no change')
| [
"noreply@github.com"
] | noreply@github.com |
7fda317f3d51b3ef4a2d8943169411f809d09276 | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /server/auth/libpwquality/actions.py | 57d01c98bb75acba7733f963e4afcf4c5c26b8b8 | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 780 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/copyleft/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--prefix=/usr \
--sysconfdir=/etc \
--with-securedir=/lib/security \
--disable-python-bindings \
--disable-static")
pisitools.dosed("libtool", " -shared ", " -Wl,-O1,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("ABOUT*", "AUTHORS", "ChangeLog", "COPYING", "README")
| [
"ertugrulerata@gmail.com"
] | ertugrulerata@gmail.com |
62be565d1ad0e2bc743c1f5b5682cd2bdeef76c1 | 2e9ffd88923b1eb90047fe5c6a633a6d29c111a8 | /muddery/typeclasses/players.py | 76d46460f1c6c62028e28ae5e66dedef392932d5 | [
"BSD-3-Clause"
] | permissive | externIE/muddery | 4f7424abf2eac4280baef86ba5752e8d8ddee16d | ee4165e97e1510e06fa1e8120a35878a6c2862b7 | refs/heads/master | 2020-04-06T06:48:41.501309 | 2016-08-16T12:58:47 | 2016-08-16T12:58:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | """
This is adapt from evennia/evennia/players/players.py.
The licence of Evennia can be found in evennia/LICENSE.txt.
Player
The Player represents the game "account" and each login has only one
Player object. A Player is what chats on default channels but has no
other in-game-world existance. Rather the Player puppets Objects (such
as Characters) in order to actually participate in the game world.
Guest
Guest players are simple low-level accounts that are created/deleted
on the fly and allows users to test the game without the committment
of a full registration. Guest accounts are deactivated by default; to
activate them, add the following line to your settings file:
GUEST_ENABLED = True
You will also need to modify the connection screen to reflect the
possibility to connect with a guest account. The setting file accepts
several more options for customizing the Guest account system.
"""
import json
from evennia.utils.utils import make_iter
from evennia.players.players import DefaultPlayer, DefaultGuest
class MudderyPlayer(DefaultPlayer):
"""
This class describes the actual OOC player (i.e. the user connecting
to the MUD). It does NOT have visual appearance in the game world (that
is handled by the character which is connected to this). Comm channels
are attended/joined using this object.
It can be useful e.g. for storing configuration options for your game, but
should generally not hold any character-related info (that's best handled
on the character level).
Can be set using BASE_PLAYER_TYPECLASS.
"""
def msg(self, text=None, from_obj=None, session=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
Args:
text (str, optional): text data to send
from_obj (Object or Player, optional): Object sending. If given,
its at_msg_send() hook will be called.
session (Session or list, optional): Session object or a list of
Sessions to receive this send. If given, overrules the
default send behavior for the current
MULTISESSION_MODE.
Notes:
All other keywords are passed on to the protocol.
"""
raw = kwargs.get("raw", False)
if not raw:
try:
text = json.dumps(text)
except Exception, e:
text = json.dumps({"err": "There is an error occurred while outputing messages."})
logger.log_tracemsg("json.dumps failed: %s" % e)
else:
text = to_str(text, force_string=True) if text else ""
# set raw=True
if kwargs:
kwargs["raw"] = True
else:
kwargs = {"raw": True}
if from_obj:
# call hook
try:
from_obj.at_msg_send(text=text, to_obj=self, **kwargs)
except Exception:
pass
# session relay
sessions = make_iter(session) if session else self.sessions.all()
for session in sessions:
session.msg(text=text, **kwargs)
class MudderyGuest(DefaultGuest):
"""
This class is used for guest logins. Unlike Players, Guests and their
characters are deleted after disconnection.
"""
pass
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
5d22c427ce31827178896189f398088506fd1a21 | 601949a8b82ebf88933531fc11b182281c026db6 | /Final Year Project - 031218/bin/Debug/knn_Algo_DataSet_2.py | d04448338d49b5403bb0cbd6446f43c44a1ec2d2 | [] | no_license | ase3002/FYP-Face_Recognition_Based_Attendance_System | 9968efc4d789332c85c3e3300c405b0a0b08af40 | 92b9c3e6a26a3f73845ead79db2f3a2df6203a81 | refs/heads/master | 2020-04-24T08:34:44.507823 | 2018-12-05T14:01:11 | 2018-12-05T14:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,644 | py | import importlib
import imp
import math
from sklearn import neighbors
import os
import sys
import pickle
from PIL import Image, ImageDraw
import numpy as np
import face_recognition
from face_recognition.face_recognition_cli import image_files_in_folder
import cv2
import knn_predict as fra
knp = fra.knn_prediction()
# ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
# BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# img_dir = os.path.join(BASE_DIR, "../images")
# training_data = os.path.join(img_dir, "Training Image")
# def testAlgo(img):
# #pil_image = Image.open('Image.jpg').convert('RGB')
# #print(img[3])
# open_cv_image = np.array(img[5])
# imageRGB = cv2.cvtColor(open_cv_image, cv2.COLOR_BGR2RGB)
# open_cv_image.shape[:2]
# print(len(img))
# cv2.imshow('image', open_cv_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def testAlgo(image , DIR_NAME):
if knp.get_model_path() is None:
knp.set_model_path(DIR_NAME)
return knp.predict(image)
def trainAlgo(imageArr,labelArr, DIR_NAME):
X_train = []
y_labels = []
model_save_path = str(DIR_NAME) + "_knn.clf"
n_neighbors = 3
#model_save_path = None
#n_neighbors = None
knn_algo = 'ball_tree'
verbose = False
proto = "ML/deploy.prototxt.txt"
caffmodel = "ML/res10_300x300_ssd_iter_140000.caffemodel"
confid = 0.7
net = cv2.dnn.readNetFromCaffe(proto, caffmodel)
for x in range(len(imageArr)):
#print("Training Identity " + labelArr[x] + " " + str(x))
sys.stdout.write("\r" + str(x + 1) + " of " + str(len(imageArr)) + " has been processed");
sys.stdout.flush()
try:
count = 0
imageA = np.array(imageArr[x])
#imageRGB = cv2.cvtColor(imageA, cv2.COLOR_BGR2RGB)
(h, w) = imageA.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(imageA, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
for i in range(0, detections.shape[2]):
# print(detections.shape)
count += 1
confidence = detections[0, 0, i, 2]
if confidence > confid and count == 1:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
#face_bounding_boxes = "("+startX+","+endX+","+startY+","+endY+")"
roi = imageA[startY:endY, startX: endX]
#print(face_recognition.face_encodings(roi))
X_train.append(face_recognition.face_encodings(roi)[0])
y_labels.append(labelArr[x])
except Exception as e:
print ("")
print (e)
if n_neighbors is None:
n_neighbors = int(round(math.sqrt(len(X))))
if verbose:
print("Chose n_neighbors automatically:", n_neighbors)
# Create and train the KNN classifier
knn_clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm=knn_algo, weights='distance')
knn_clf.fit(X_train, y_labels)
# Save the trained KNN classifier
if model_save_path is not None:
with open(model_save_path, 'wb') as f:
pickle.dump(knn_clf, f)
print("**Training Completed**")
return knn_clf
def main():
pythonFile = "benchmarker"
nameList = []
nameList.clear()
try:
bm = importlib.import_module(pythonFile, ".")
menu = True
spam_info = imp.find_module(pythonFile)
print("Import Benchmark successful")
######## Dataset 1-4###################
DS_DIR = "Dataset 2"
imageArr, labelArr, DS_DIR = bm.fetchTrainingData(DS_DIR)
print("Run Successful")
trainAlgo(imageArr, labelArr, DS_DIR)
imgArr = bm.fetchTestQuestion()
# #print(len(imgArr))
for i in range(len(imgArr)):
name = testAlgo(imgArr[i], DS_DIR)
nameList.append(name)
bm.submitAnswer(nameList)
########## Dataset 5 ######################
#DS_DIR = "Dataset 5"
#imageArr, labelArr, DS = bm.fetchTrainingData(DS_DIR)
#print("Run Successful")
#trainAlgo(imageArr, labelArr, DS)
#imgArr = bm.fetchTestQuestion()
#print(len(imgArr))
#for i in range(len(imgArr)):
# name = testAlgo(imgArr[i], DS)
# nameList.append(name)
#correctAns1, wrongAns1, acc1 = bm.submitAnswer(nameList)
#nameList.clear()
### Training for Mixed Training
#imageArr2, labelArr2, DS_DIR2 = bm.fetchTrainingData(DS_DIR)
#print("Run Successful 2")
#trainAlgo(imageArr2, labelArr2, DS_DIR2+"2")# +"2" is to save the file under different name. If u want to replace the previous file, you need not +"2"
### Fetching 2nd test - Angle
#imgArr2 = bm.fetchTestQuestion()
#for i in range(len(imgArr)):
# name = testAlgo(imgArr2[i], DS_DIR2+"2")
# nameList.append(name)
#correctAns2, wrongAns2, acc2 = bm.submitAnswer(nameList)
#nameList.clear()
#### Fetching 3rd Test - Lighting
#imgArr3 = bm.fetchTestQuestion()
# print(len(imgArr))
#for i in range(len(imgArr3)):
# name = testAlgo(imgArr3[i], DS_DIR2+"2")
# nameList.append(name)
#correctAns3, wrongAns3, acc3 = bm.submitAnswer(nameList)
#nameList.clear()
#print("================= Test 1 (Pure Faces) Results ========================")
#print ("No of correct answer: " + str(correctAns1))
#print ("No of wrong answer: " + str(wrongAns1))
#print ("Accuracy: " + str(acc1))
#print("")
#print("============= Test 2 (Faces of different angle) Results ==============")
#print ("No of correct answer: " + str(correctAns2))
#print ("No of wrong answer: " + str(wrongAns2))
#print ("Accuracy: " + str(acc2))
#print("")
#print("============= Test 3 (Faces of different lighting) Results ==============")
#print ("No of correct answer: " + str(correctAns3))
#print ("No of wrong answer: " + str(wrongAns3))
#print ("Accuracy: " + str(acc3))
except Exception as e:
print (e)
if __name__ == "__main__":
main()
| [
"MOKW0007@e.ntu.edu.sg"
] | MOKW0007@e.ntu.edu.sg |
7f2c50d07ca30681b73fb9d909e7f0aa975630a4 | 96646f705ccda76fed95005fbd507953bf803de6 | /code/python/age_groupnum_scenario_I/obfuscations.py | a555539748865d9d6aedbffdb6ea6f53d79c301b | [] | no_license | scottshufe/HyObscure | dc6868a760826575bf6097dcc1cfeaac037117c3 | 14e3c480b00cd7384bfbb499a260e5180fa29570 | refs/heads/master | 2022-07-13T02:56:01.891252 | 2021-02-07T02:45:31 | 2021-02-07T02:45:31 | 238,379,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,492 | py | import os
import copy
import pandas as pd
import numpy as np
import funcs
import matlab.engine
import scipy.io
import scipy.spatial.distance as dist
from sklearn.metrics.pairwise import cosine_similarity
def HyObscure(df_train, df_test, df_test_rec_items, df_item_age_uid,
age_group_dict, group_age_dict, cluster_num, age_group_number,
age_list, deltaX, k_threshold, l_threshold, pp):
df_test_copy = copy.deepcopy(df_test)
df_test_copy['age_group'] = pd.Series(np.zeros(df_test_copy.shape[0]), index=df_test_copy.index,
dtype='int32')
xpgg = np.ones((cluster_num * age_group_number, cluster_num * age_group_number)) * 0.00000001
JSD_Mat = np.ones((cluster_num * age_group_number, cluster_num * age_group_number))
pgy = np.ones((len(age_list), cluster_num * age_group_number)) * 0.00000001
group_min_age_dict = {}
group_usersize_dict = {}
for op in range(0, 5):
age_xpgg_dict = {}
###### Compute JSD, pgy, xpgg
JSD_Mat_dict = {}
pgy_dict = {}
for ag in range(age_group_number):
group_min_age_dict[ag] = group_age_dict[ag][0]
print(group_min_age_dict[ag])
df_test_ag = df_test.loc[df_test['age_group'] == ag]
age_list_ag = group_age_dict[ag]
group_usersize_dict[ag] = df_test_ag.shape[0]
JSD_Mat_dict[ag] = funcs.cal_JSD_Matrix_withoutAgeGroup(df_test_ag, cluster_num, 4)
print(ag, cluster_num, age_list_ag)
pgy_dict[ag] = funcs.cal_pgy_withoutAgeGroup(df_test_ag, cluster_num, age_list_ag)
pd.DataFrame(JSD_Mat_dict[ag]).to_csv('tmp/JSDM_ageGroup_hyobscure.csv', index=False, header=None)
pd.DataFrame(pgy_dict[ag]).to_csv('tmp/pgy_ageGroup_hyobscure.csv', index=False, header=None)
eng = matlab.engine.start_matlab()
eng.edit('../../matlab/age_groupnum_scenario_I/HyObscure', nargout=0)
eng.cd('../../matlab/age_groupnum_scenario_I', nargout=0)
age_xpgg_dict[ag], distortion_budget = np.array(eng.HyObscure(deltaX, nargout=2))
age_xpgg_dict[ag] = np.array(age_xpgg_dict[ag])
for ag in range(age_group_number):
for age in group_age_dict[ag]:
for col in range(cluster_num):
pgy[age - group_min_age_dict[0], ag + col * age_group_number] = pgy_dict[ag][age -
group_min_age_dict[
ag], col] * \
group_usersize_dict[
ag] / \
df_test.shape[0]
for ag in range(age_group_number):
for row in range(cluster_num):
for col in range(cluster_num):
xpgg[ag + row * age_group_number, ag + col * age_group_number] = age_xpgg_dict[ag][
row, col]
JSD_Mat[ag + row * age_group_number, ag + col * age_group_number] = JSD_Mat_dict[ag][
row, col]
# pd.DataFrame(xpgg).to_csv('xpgg.csv', index=False, header=None)
# pd.DataFrame(pgy).to_csv('pgy_full.csv', index=False, header=None)
# pd.DataFrame(JSD_Mat).to_csv('JSD_full.csv', index=False, header=None)
min_JSD_Mat = JSD_Mat
min_pgy = pgy
### change age group by greedy approach
mean_Utility = funcs.Mean_JSD(JSD_Mat, xpgg)
mean_Privacy = funcs.Mean_KL_div(pgy, xpgg)
min_mean_Utility = mean_Utility
min_mean_Privacy = mean_Privacy
adjustable_groups, reducible_groups = funcs.age_group_adjust_greedy(df_item_age_uid, group_age_dict,
k_threshold, np.log(l_threshold))
min_group = 0
for i in adjustable_groups:
age_group_dict_cur = {}
for group, group_age_list in adjustable_groups[i].items():
for age in group_age_list:
age_group_dict_cur[age] = group
df_test_new = funcs.update_age_group(df_test, age_group_dict_cur)
new_JSD_Mat = funcs.cal_JSD_Matrix_withAgeGroup(df_test_new, cluster_num, age_group_number, 4)
new_pgy = funcs.cal_pgy_withAgeGroup(df_test_new, cluster_num, age_group_number, age_list)
new_mean_Utility = funcs.Mean_JSD(new_JSD_Mat, xpgg)
new_mean_Privacy = funcs.Mean_KL_div(new_pgy, xpgg)
if new_mean_Utility < min_mean_Utility and new_mean_Privacy < min_mean_Privacy:
min_mean_Utility = new_mean_Utility
min_mean_Privacy = new_mean_Privacy
min_group_age_dict = copy.deepcopy(adjustable_groups[i])
min_age_group_dict = copy.deepcopy(age_group_dict_cur)
min_JSD_Mat = new_JSD_Mat
min_pgy = new_pgy
min_group = i
print(op, i, min_group, mean_Privacy, mean_Utility, min_mean_Privacy, min_mean_Utility,
new_mean_Privacy, new_mean_Utility)
if min_mean_Privacy < mean_Privacy and min_mean_Utility < mean_Utility:
print("find a better age group:", group_age_dict)
age_group_dict = min_age_group_dict
group_age_dict = min_group_age_dict
df_test = funcs.update_age_group(df_test, age_group_dict)
else:
break
user_num = df_test_copy.shape[0]
X_ori = {}
for k in range(user_num):
user_id = df_test_copy['uid'][k]
X_ori[user_id] = df_test_copy[df_test_copy['uid'] == user_id].values[0, :-1]
for k in X_ori.keys():
user_age = X_ori[k][-2]
X_ori[k][-3] = age_group_dict[user_age]
df_test = funcs.update_age_group(df_test, age_group_dict)
df_train = funcs.update_age_group(df_train, age_group_dict)
df_test_rec_items = funcs.update_age_group(df_test_rec_items, age_group_dict)
model_rf = funcs.train_rf_model(df_train)
model_xgb = funcs.train_xgb_model(df_train)
print("model train over, start obfuscating...")
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_obf_X(df_test, xpgg, pp)
return X_obf_dict, X_ori, model_rf, model_xgb
def YGen(df_train, df_test, df_test_rec_items, df_item_age_uid, age_group_number, cluster_num,
deltaX, age_list, age_group_dict, group_age_dict, k_threshold, l_threshold, pp):
df_test_copy = copy.deepcopy(df_test)
df_test_copy['age_group'] = pd.Series(np.zeros(df_test_copy.shape[0]), index=df_test_copy.index,
dtype='int32')
xpgg = np.ones((cluster_num * age_group_number, cluster_num * age_group_number)) * 0.00000001
JSD_Mat = np.ones((cluster_num * age_group_number, cluster_num * age_group_number))
pgy = np.ones((len(age_list), cluster_num * age_group_number)) * 0.00000001
group_min_age_dict = {}
group_usersize_dict = {}
age_xpgg_dict = {}
JSD_Mat_dict = {}
pgy_dict = {}
distortion_budget_dict = {}
for ag in range(age_group_number):
group_min_age_dict[ag] = group_age_dict[ag][0]
print(group_min_age_dict[ag])
df_test_ag = df_test.loc[df_test['age_group'] == ag]
age_list_ag = group_age_dict[ag]
group_usersize_dict[ag] = df_test_ag.shape[0]
JSD_Mat_dict[ag] = funcs.cal_JSD_Matrix_withoutAgeGroup(df_test_ag, cluster_num, 4)
pgy_dict[ag] = funcs.cal_pgy_withoutAgeGroup(df_test_ag, cluster_num, age_list_ag)
pd.DataFrame(JSD_Mat_dict[ag]).to_csv('tmp/JSDM_ageGroup_ygen.csv', index=False, header=None)
pd.DataFrame(pgy_dict[ag]).to_csv('tmp/pgy_ageGroup_ygen.csv', index=False, header=None)
eng = matlab.engine.start_matlab()
eng.edit('../../matlab/age_groupnum_scenario_I/YGen', nargout=0)
eng.cd('../../matlab/age_groupnum_scenario_I', nargout=0)
age_xpgg_dict[ag], distortion_budget = np.array(eng.YGen(deltaX, nargout=2))
age_xpgg_dict[ag] = np.array(age_xpgg_dict[ag])
for ag in range(age_group_number):
for age in group_age_dict[ag]:
for col in range(cluster_num):
pgy[age - group_min_age_dict[0], ag + col * age_group_number] = pgy_dict[ag][
age - group_min_age_dict[
ag], col] * \
group_usersize_dict[ag] / \
df_test.shape[0]
for ag in range(age_group_number):
for row in range(cluster_num):
for col in range(cluster_num):
xpgg[ag + row * age_group_number, ag + col * age_group_number] = age_xpgg_dict[ag][row, col]
JSD_Mat[ag + row * age_group_number, ag + col * age_group_number] = JSD_Mat_dict[ag][row, col]
JSD_Mat = np.ones((cluster_num * age_group_number, cluster_num * age_group_number))
pgy = np.ones((len(age_list), cluster_num * age_group_number)) * 0.00000001
group_min_age_dict = {}
group_usersize_dict = {}
JSD_Mat_dict = {}
pgy_dict = {}
for ag in range(age_group_number):
group_min_age_dict[ag] = group_age_dict[ag][0]
print(group_min_age_dict[ag])
df_test_ag = df_test.loc[df_test['age_group'] == ag]
age_list_ag = group_age_dict[ag]
group_usersize_dict[ag] = df_test_ag.shape[0]
JSD_Mat_dict[ag] = funcs.cal_JSD_Matrix_withoutAgeGroup(df_test_ag, cluster_num, 4)
print(ag, cluster_num, age_list_ag)
pgy_dict[ag] = funcs.cal_pgy_withoutAgeGroup(df_test_ag, cluster_num, age_list_ag)
for ag in range(age_group_number):
for age in group_age_dict[ag]:
for col in range(cluster_num):
pgy[age - group_min_age_dict[0], ag + col * age_group_number] = pgy_dict[ag][age -
group_min_age_dict[
ag], col] * \
group_usersize_dict[
ag] / \
df_test.shape[0]
for ag in range(age_group_number):
for row in range(cluster_num):
for col in range(cluster_num):
JSD_Mat[ag + row * age_group_number, ag + col * age_group_number] = JSD_Mat_dict[ag][
row, col]
min_JSD_Mat = JSD_Mat
min_pgy = pgy
### change age group by greedy approach
mean_Utility = funcs.Mean_JSD(JSD_Mat, xpgg)
mean_Privacy = funcs.Mean_KL_div(pgy, xpgg)
min_mean_Utility = mean_Utility
min_mean_Privacy = mean_Privacy
adjustable_groups, reducible_groups = funcs.age_group_adjust_greedy(df_item_age_uid, group_age_dict,
k_threshold, np.log(l_threshold))
min_group = 0
print('start adjusting...')
better_group_flag = 0
for i in adjustable_groups:
age_group_dict_cur = {}
for group, group_age_list in adjustable_groups[i].items():
for age in group_age_list:
age_group_dict_cur[age] = group
df_test_new = funcs.update_age_group(df_test, age_group_dict_cur)
new_JSD_Mat = funcs.cal_JSD_Matrix_withAgeGroup(df_test_new, cluster_num, age_group_number, 4)
new_pgy = funcs.cal_pgy_withAgeGroup(df_test_new, cluster_num, age_group_number, age_list)
new_mean_Utility = funcs.Mean_JSD(new_JSD_Mat, xpgg)
new_mean_Privacy = funcs.Mean_KL_div(new_pgy, xpgg)
print(new_mean_Utility)
print(new_mean_Privacy)
if new_mean_Utility < min_mean_Utility and new_mean_Privacy < min_mean_Privacy:
min_mean_Utility = new_mean_Utility
min_mean_Privacy = new_mean_Privacy
min_group_age_dict = copy.deepcopy(adjustable_groups[i])
min_age_group_dict = copy.deepcopy(age_group_dict_cur)
min_JSD_Mat = new_JSD_Mat
min_pgy = new_pgy
min_group = i
print('Find better group!')
better_group_flag = 1
print(i, min_group, mean_Privacy, mean_Utility, min_mean_Privacy, min_mean_Utility,
new_mean_Privacy, new_mean_Utility)
if better_group_flag == 1:
age_group_dict = min_age_group_dict
group_age_dict = min_group_age_dict
else:
print("find better group failed.")
user_num = df_test_copy.shape[0]
X_ori = {}
for k in range(user_num):
user_id = df_test_copy['uid'][k]
X_ori[user_id] = df_test_copy[df_test_copy['uid'] == user_id].values[0, :-1]
for k in X_ori.keys():
user_age = X_ori[k][-2]
X_ori[k][-3] = age_group_dict[user_age]
df_test = funcs.update_age_group(df_test, age_group_dict)
df_train = funcs.update_age_group(df_train, age_group_dict)
df_test_rec_items = funcs.update_age_group(df_test_rec_items, age_group_dict)
# random forest
model_rf = funcs.train_rf_model(df_train)
# XGBoost
model_xgb = funcs.train_xgb_model(df_train)
print("model train over, start obfuscating...")
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_obf_X(df_test, xpgg, pp)
return X_obf_dict, X_ori, model_rf, model_xgb
def XObf(df_train, df_test, age_group_number, cluster_num, deltaX, age_list, group_age_dict, pp):
# random forest
model_rf = funcs.train_rf_model(df_train)
# xgboost
model_xgb = funcs.train_xgb_model(df_train)
xpgg = np.ones((cluster_num * age_group_number, cluster_num * age_group_number)) * 0.00000001
JSD_Mat = np.ones((cluster_num * age_group_number, cluster_num * age_group_number))
pgy = np.ones((len(age_list), cluster_num * age_group_number)) * 0.00000001
group_min_age_dict = {}
group_usersize_dict = {}
age_xpgg_dict = {}
JSD_Mat_dict = {}
pgy_dict = {}
for ag in range(age_group_number):
group_min_age_dict[ag] = group_age_dict[ag][0]
print(group_min_age_dict[ag])
df_test_ag = df_test.loc[df_test['age_group'] == ag]
age_list_ag = group_age_dict[ag]
group_usersize_dict[ag] = df_test_ag.shape[0]
JSD_Mat_dict[ag] = funcs.cal_JSD_Matrix_withoutAgeGroup(df_test_ag, cluster_num, 4)
pgy_dict[ag] = funcs.cal_pgy_withoutAgeGroup(df_test_ag, cluster_num, age_list_ag)
pd.DataFrame(JSD_Mat_dict[ag]).to_csv('tmp/JSDM_ageGroup_XObf.csv', index=False, header=None)
pd.DataFrame(pgy_dict[ag]).to_csv('tmp/pgy_ageGroup_XObf.csv', index=False, header=None)
eng = matlab.engine.start_matlab()
eng.edit('../../matlab/age_groupnum_scenario_I/XObf', nargout=0)
eng.cd('../../matlab/age_groupnum_scenario_I', nargout=0)
age_xpgg_dict[ag], distortion_budget = np.array(eng.XObf(deltaX, nargout=2))
age_xpgg_dict[ag] = np.array(age_xpgg_dict[ag])
for ag in range(age_group_number):
for age in group_age_dict[ag]:
for col in range(cluster_num):
pgy[age - group_min_age_dict[0], ag + col * age_group_number] = pgy_dict[ag][
age - group_min_age_dict[
ag], col] * \
group_usersize_dict[ag] / \
df_test.shape[0]
for ag in range(age_group_number):
for row in range(cluster_num):
for col in range(cluster_num):
xpgg[ag + row * age_group_number, ag + col * age_group_number] = age_xpgg_dict[ag][row, col]
JSD_Mat[ag + row * age_group_number, ag + col * age_group_number] = JSD_Mat_dict[ag][row, col]
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_obf_X(df_test, xpgg, pp)
_, X_ori = funcs.get_obf_X(df_test, xpgg, pp)
return X_obf_dict, X_ori, model_rf, model_xgb
def PrivCheck(df_train, df_test, df_test_rec_items, age_group_number, cluster_num,
deltaX, age_list, age_group_dict, group_age_dict, pp):
funcs.update_age_group(df_train, age_group_dict)
# random forest
model_rf = funcs.train_rf_model(df_train)
# xgboost
model_xgb = funcs.train_xgb_model(df_train)
pd.DataFrame(funcs.cal_pgy_withAgeGroup(df_test, cluster_num, 1, age_list)).to_csv(
'tmp/pgy_ageGroup_privcheck.csv',
index=False, header=None)
funcs.update_age_group(df_test, age_group_dict)
JSD_Mat_dict = np.zeros((cluster_num, cluster_num, age_group_number))
group_min_age_dict = {}
group_usersize_dict = {}
for ag in range(age_group_number):
group_min_age_dict[ag] = group_age_dict[ag][0]
df_test_ag = df_test.loc[df_test['age_group'] == ag]
age_list_ag = group_age_dict[ag]
group_usersize_dict[ag] = df_test_ag.shape[0]
JSD_Mat_dict[:, :, ag] = funcs.cal_JSD_Matrix_withoutAgeGroup(df_test_ag, cluster_num, 4)
scipy.io.savemat('tmp/JSDM_ageGroup_privcheck.mat', {"JSD_Mat_input": JSD_Mat_dict})
pd.DataFrame(JSD_Mat_dict[ag]).to_csv('tmp/JSDM_ageGroup_yang.csv', index=False, header=None)
eng = matlab.engine.start_matlab()
eng.edit('../../matlab/age_groupnum_scenario_I/PrivCheck', nargout=0)
eng.cd('../../matlab/age_groupnum_scenario_I', nargout=0)
xpgg, distortion_budget = np.array(eng.PrivCheck(deltaX, nargout=2))
xpgg = np.array(xpgg)
df_test['age_group'] = pd.Series(np.zeros(df_test.shape[0]), index=df_test.index, dtype='int32')
funcs.update_age_group(df_test_rec_items, age_group_dict)
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_obf_X(df_test, xpgg, pp)
_, X_ori = funcs.get_obf_X(df_test, xpgg, pp)
for i in X_ori.keys():
user_age = X_ori[i][-2]
X_ori[i][-3] = age_group_dict[user_age]
for j in range(1):
X_obf_dict[j][i][-1] = age_group_dict[user_age]
return X_obf_dict, X_ori, model_rf, model_xgb
def differential_privacy(df_train, df_test, df_test_rec_items, age_group_dict, beta):
funcs.update_age_group(df_train, age_group_dict)
model_rf = funcs.train_rf_model(df_train)
model_xgb = funcs.train_xgb_model(df_train)
print("model training over...")
funcs.update_age_group(df_test_rec_items, age_group_dict)
print("generate distance matrix...")
dist_mat = dist.squareform(dist.pdist(df_test, 'jaccard'))
print("start obfuscating...")
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_DP_obf_X(df_test, dist_mat, beta)
_, X_ori = funcs.get_DP_obf_X(df_test, dist_mat, beta)
print("obfuscating done.")
for i in X_ori.keys():
user_age = X_ori[i][-2]
X_ori[i][-3] = age_group_dict[user_age]
for j in range(100):
X_obf_dict[j][i][-1] = age_group_dict[user_age]
return X_obf_dict, X_ori, model_rf, model_xgb
def Frapp(df_train, df_test, df_test_rec_items, age_group_dict, gamma, pp):
funcs.update_age_group(df_train, age_group_dict)
model_rf = funcs.train_rf_model(df_train)
model_xgb = funcs.train_xgb_model(df_train)
print("model training over...")
funcs.update_age_group(df_test_rec_items, age_group_dict)
print("start obfuscating...")
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_frapp_obf_X(df_test, gamma, pp)
_, X_ori = funcs.get_frapp_obf_X(df_test, gamma, pp)
print("obfuscating done.")
for i in X_ori.keys():
user_age = X_ori[i][-2]
X_ori[i][-3] = age_group_dict[user_age]
for j in range(100):
X_obf_dict[j][i][-1] = age_group_dict[user_age]
return X_obf_dict, X_ori, model_rf, model_xgb
def Random(df_train, df_test, df_test_rec_items, age_group_dict, p_rand, pp):
funcs.update_age_group(df_train, age_group_dict)
model_rf = funcs.train_rf_model(df_train)
model_xgb = funcs.train_xgb_model(df_train)
print("model training over...")
funcs.update_age_group(df_test_rec_items, age_group_dict)
print("start obfuscating...")
X_obf_dict = {}
for i in range(100):
X_obf_dict[i], _ = funcs.get_random_obf_X(df_test, p_rand, pp)
_, X_ori = funcs.get_random_obf_X(df_test, p_rand, pp)
print("obfuscating done.")
for i in X_ori.keys():
user_age = X_ori[i][-2]
X_ori[i][-3] = age_group_dict[user_age]
for j in range(100):
X_obf_dict[j][i][-1] = age_group_dict[user_age]
return X_obf_dict, X_ori, model_rf, model_xgb
def Similarity(df_train, df_test, df_test_rec_items, age_group_dict, pp):
funcs.update_age_group(df_train, age_group_dict)
# random forest
model_rf = funcs.train_rf_model(df_train)
# xgboost
model_xgb = funcs.train_xgb_model(df_train)
print("model training over...")
funcs.update_age_group(df_test_rec_items, age_group_dict)
print("start obfuscating...")
X_obf_dict = {}
# get similarity matrix
itemCols = df_test.columns[:-4]
df_items = df_test[itemCols]
sim_mat = cosine_similarity(df_items.values)
for i in range(100):
X_obf_dict[i], _ = funcs.get_similarity_obf_X(sim_mat, df_test, pp)
_, X_ori = funcs.get_similarity_obf_X(sim_mat, df_test, pp)
print("obfuscating done.")
for i in X_ori.keys():
user_age = X_ori[i][-2]
X_ori[i][-3] = age_group_dict[user_age]
for j in range(100):
X_obf_dict[j][i][-1] = age_group_dict[user_age]
return X_obf_dict, X_ori, model_rf, model_xgb
| [
"scottshufe@gmail.com"
] | scottshufe@gmail.com |
00e2fbc37e5d8aa5a588fc4185c7bc8bab4c4f22 | a39ed5db6c75c9ae1f5e05118794c64102dc5f7a | /2020/01_1/solution.py | 091874824ee82bf49cb18909afad5b2272562b7c | [
"MIT"
] | permissive | budavariam/advent_of_code | b656d5caf5d05113b82357754eb225e61e89ac0d | 635be485ec691f9c0cdeb83f944de190f51c1ba3 | refs/heads/master | 2022-12-25T18:12:00.981365 | 2022-12-20T08:20:51 | 2022-12-20T08:20:51 | 114,570,426 | 1 | 1 | MIT | 2022-12-09T09:29:06 | 2017-12-17T21:36:00 | Python | UTF-8 | Python | false | false | 656 | py | """ Advent of code 2020 day 1/1 """
import math
from os import path
def solution(data):
""" Solution to the problem """
lines = data.split("\n")
precalculate = dict()
for line_value_str in lines:
precalculate[2020 - int(line_value_str)] = True
for line_value_str in lines:
current_value = int(line_value_str)
inverse = 2020 - current_value
if (precalculate.get(current_value) == True):
return current_value * inverse
return None
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
| [
"budavariam@gmail.com"
] | budavariam@gmail.com |
cf9920ffe8f52a7cee208a43eb994c063c4385c1 | c31762e13ac3a5c5974b700c001b1fd789f47be8 | /dbtools.py | dcbfbdb24c0ec4b08bf89c3ab0dca55b8f3a22f0 | [] | no_license | harkdi/dbtools | 7b811e6d2357458b58201f59768428cbee15b1a9 | e5c86545f31f31d52f8333f50fa8f297b28e3a0d | refs/heads/master | 2020-12-02T16:16:56.216002 | 2017-07-10T08:57:22 | 2017-07-10T08:57:22 | 96,528,668 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,244 | py | #!/usr/bin/env python
# coding: utf-8
import os
import json
import string
import subprocess
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#--------------------- Load the configuration file ---------------------------------
sys.path.append('/oma/deploy/scripts/')
with open('/oma/deploy/scripts/robotConf.json') as f:
robotConf = json.load(f)
#---Import and export mysql table --------------------------------------------------
def export_mysql(ENV, db_name, tab_name):
"First lead to the proxy host, and then copy the sql file to the console host"
proxyIP = robotConf[ENV]["proxyIP"]
mysql_ip = robotConf[ENV]["mysql"][0]
username=robotConf[ENV]["mysql_account"][0]
passwd = robotConf[ENV]["mysql_account"][1]
dir1="/tmp/"
files="%s%s.sql" % (dir1, tab_name)
export_cmd="/usr/bin/mysqldump -u%s -p%s -h%s --single-transaction \
--set-gtid-purged=OFF \
%s %s > %s 2> /dev/null" \
% (username, passwd, mysql_ip, db_name, tab_name, files )
subprocess.call('%s%s "%s"' % ('ssh ', proxyIP, export_cmd ), shell=True)
## If the host is not controlled,
## copy the exported file from the proxy host to the control host
if ENV != 'pre':
subprocess.call('%s%s:%s %s' % ('scp ', proxyIP, files, dir1), shell=True)
print "\nexport %s ENV mysql tab %s to %s dir" % (ENV, tab_name, dir1)
def import_mysql(ENV, db_name, tab_name, rename=0):
"First copy the sql file to the proxy host, and then import to mysql"
proxyIP = robotConf[ENV]["proxyIP"]
mysql_ip = robotConf[ENV]["mysql"][0]
username=robotConf[ENV]["mysql_account"][0]
passwd = robotConf[ENV]["mysql_account"][1]
dir1="/tmp/"
files="%s%s.sql" % (dir1, tab_name)
import_cmd="/usr/bin/mysql -u%s -p%s -h%s %s < %s 2> /dev/null" % \
(username, passwd, mysql_ip, db_name, files)
if not os.path.exists(files):
print "file %s is no exis ,exit!!!" % files
sys.exit()
#If you need to rename, modify the export SQL file,
#just modify the table name in the contents of the SQL file
if rename != 0:
subprocess.call("sed -i 's/`%s`/`%s`/g' %s" % (tab_name, rename, files), shell=True)
###If it is not a control host, copy files from the control host to the agent
if ENV != 'pre':
subprocess.call('scp %s %s:%s ' % (files, proxyIP, dir1), shell=True)
subprocess.call('%s%s "%s"' % ('ssh ', proxyIP, import_cmd ), shell=True)
print "import %s dir mysql table %s to %s ENV" % (dir1, tab_name, ENV)
def mysql_env_to_env(source_env, target_env, db_name, tab_name, rename=0):
export_mysql(source_env, db_name, tab_name)
if rename == 0:
import_mysql(target_env, db_name, tab_name)
else:
import_mysql(target_env, db_name, tab_name, rename)
#---Import and export MongoDB coll----------------------------------------------------------
def export_mongodb(ENV, db_name, tab_name):
proxyIP = robotConf[ENV]["proxyIP"]
mongo_ip=robotConf[ENV]["mongodb"][0]
dir1="/tmp/"
files="%s%s.json" % (dir1, tab_name)
export_cmd="/usr/bin/mongoexport -h %s -d %s -c %s -o %s" % \
(mongo_ip, db_name, tab_name, files)
subprocess.call('%s%s "%s"' % ('ssh ', proxyIP, export_cmd ), shell=True)
if ENV != 'pre':
subprocess.call('%s%s:%s %s' % ('scp ', proxyIP, files, dir1), shell=True)
print "\nexport %s ENV MongoDB collection %s to %s dir" % (ENV, tab_name, dir1)
def import_mongodb(ENV, db_name, tab_name, rename=0):
proxyIP = robotConf[ENV]["proxyIP"]
mongo_ip=robotConf[ENV]["mongodb"][0]
dir1="/tmp/"
files="%s%s.json" % (dir1, tab_name)
if rename == 0:
import_cmd="/usr/bin/mongoimport -h %s -d %s -c %s %s" % \
(mongo_ip, db_name, tab_name, files)
else:
import_cmd="/usr/bin/mongoimport -h %s -d %s -c %s %s" % \
(mongo_ip, db_name, rename, files)
if not os.path.exists(files):
print "file %s is no exis ,exit!!!" % files
sys.exit()
if ENV != 'pre':
subprocess.call('scp %s %s:%s ' % (files, proxyIP, dir1), shell=True)
subprocess.call('%s%s "%s"' % ('ssh ', proxyIP, import_cmd ), shell=True)
print "import %s dir mongoDB coll %s to %s ENV" % (dir1, tab_name, ENV)
def mongodb_env_to_env(source_env, target_env, db_name, tab_name, rename=0):
export_mongodb(source_env, db_name, tab_name)
if rename == 0:
import_mongodb(target_env, db_name, tab_name)
else:
import_mongodb(target_env, db_name, tab_name, rename)
_env={'1': 'test', '2': 'pre', '3': 'pro'}
menu_list={
"db_type":
"""
----------------------------------------------------
DBtools
----------------------------------------------------
1 MySQL
2 MongoDB
q Exit
Please select the number: """,
"mysql_db_menu":
"""
----------------------------------------------------
1 export mysql table
2 import mysql table
3 Cross-environment migration mysql table
q exit
Please select the number: """,
"mongodb_menu":
"""
----------------------------------------------------
1 export mongodb collection
2 import mongodb collection
3 Cross-environment migration mongodb collection
q exit
Please select the number: """,
"env_menu":
"""
----------------------------------------------------
1 test 2 pre 3 pro
Please select the number: """,
"source_env_menu":
"""
----------------------------------------------------
1 test 2 pre 3 pro
Please select the "source" environment : """,
"target_env_menu":
'Please select the "Target" environment: '
}
#id_dist = {"drj":"direnjie",
# "wp":"wandapeng",
# "lcj":"liangchangliang"
# }
def quit_page():
print '\n%s' % ('-' * 50)
print string.center('Exit!', 50)
print '%s\n' % ('-' * 50)
sys.exit(0)
def home_page():
try:
choose = raw_input(menu_list["db_type"]).strip()
if choose == '1':
mysql_page()
elif choose == '2':
mongodb_page()
else:
quit_page()
except (KeyboardInterrupt, EOFError):
quit_page()
def mysql_page():
try:
choose=raw_input(menu_list["mysql_db_menu"]).strip()
if choose == '1':
env = _env[raw_input(menu_list["env_menu"]).strip()]
db_name = raw_input("Please enter the MySQL database name: ").strip()
tab_name = raw_input("Please enter a table name: ").strip()
export_mysql(env, db_name, tab_name)
elif choose == "2":
env = _env[raw_input(menu_list["env_menu"]).strip()]
db_name = raw_input("Please enter MySQL DBname: ").strip()
tab_name = raw_input("Please enter a table name: ").strip()
rename_status=raw_input("Whether you need to rename y/n: ").strip()
if rename_status == 'y':
rename = raw_input("Please enter a new name: ").strip()
import_mysql(env, db_name, tab_name, rename)
else:
import_mysql(env, db_name, tab_name)
elif choose == "3":
#ID=getpass.getpass("Please enter certification ID:").strip()
#if ID in id_dist:
# print "\nHello %s\n" % id_dist[ID]
#else:
# sys.exit()
while True:
source_env = _env[raw_input(menu_list["source_env_menu"]).strip()]
target_env = _env[raw_input(menu_list["target_env_menu"]).strip()]
db_name = raw_input("Please enter MySQL DBname: ").strip()
tab_name = raw_input("Please enter a table name: ").strip()
rename_status=raw_input("Whether you need to rename y/n: ").strip()
if rename_status == 'y':
rename = raw_input("Please enter a new name: ").strip()
else:
rename = 0
print "\nfrom \033[0;31m%s\033[0m ENV ====> \033[0;31m%s\033[0m ENV" % (source_env, target_env)
print "DB name ======= \033[0;31m%s\033[0m" % (db_name)
print "table name ======= \033[0;31m%s\033[0m" % (tab_name)
if rename != 0:print "RENAME ======= \033[0;31m%s\033[0m" % (rename)
_choose=raw_input("Enter y to continue, n re-select\nPlease select(y/n): ")
if _choose != 'y':continue
mysql_env_to_env(source_env, target_env, db_name, tab_name, rename)
break
elif choose == 'r':
home_page()
except (KeyboardInterrupt, EOFError):
quit_page()
def mongodb_page():
try:
choose=raw_input(menu_list["mongodb_menu"]).strip()
if choose == '1':
env = _env[raw_input(menu_list["env_menu"]).strip()]
db_name = raw_input("Please enter database name: ").strip()
tab_name = raw_input("Please enter collection name: ").strip()
export_mongodb(env, db_name, tab_name)
elif choose == "2":
env = _env[raw_input(menu_list["env_menu"]).strip()]
db_name = raw_input("Please enter database name: ").strip()
tab_name = raw_input("Please enter collection name: ").strip()
rename_status=raw_input("Whether you need to rename y/n: ").strip()
if rename_status == 'y':
rename = raw_input("Please enter new name: ").strip()
import_mongodb(env, db_name, tab_name, rename)
else:
import_mongodb(env, db_name, tab_name)
elif choose == "3":
while True:
source_env = _env[raw_input(menu_list["source_env_menu"]).strip()]
target_env = _env[raw_input(menu_list["target_env_menu"]).strip()]
db_name = raw_input("Please enter database name: ").strip()
tab_name = raw_input("Please enter collection name: ").strip()
rename_status=raw_input("Whether you need to rename y/n: ").strip()
if rename_status == 'y':
rename = raw_input("Please enter new name: ").strip()
else:
rename = 0
print "\nfrom \033[0;31m%s\033[0m ENV ====> \033[0;31m%s\033[0m ENV" % (source_env, target_env)
print "DB name ======= \033[0;31m%s\033[0m" % (db_name)
print "coll name ======= \033[0;31m%s\033[0m" % (tab_name)
if rename != 0:print "RENAME ======= \033[0;31m%s\033[0m" % (rename)
_choose=raw_input("Enter y to continue, n re-select\nPlease select(y/n): ")
if _choose != 'y':continue
mongodb_env_to_env(source_env, target_env, db_name, tab_name, rename)
break
elif choose == 'r':
home_page()
except (KeyboardInterrupt, EOFError):
quit_page()
#-----------------------------------------------------------------------------------------------
if __name__ == '__main__':
home_page()
| [
"harkdi@126.com"
] | harkdi@126.com |
ca18e55478a75ece6dbb496ea73cb385ee095b34 | d2503df4d2baab1b7d13dd624ad0a0e32e6198f2 | /blob_detect.py | 218c80c3b1a33fe290b5ac2c0a452afa97388a59 | [] | no_license | tpkelliher/drone_project | b02adb284d6aeb0c00e33f51f458a6b5389b5c96 | 8638b4e298b884e31b57d2e1d555a996e561cc92 | refs/heads/master | 2020-12-30T16:40:16.388671 | 2017-05-05T17:06:16 | 2017-05-05T17:06:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | from skimage import io, color
from skimage.filters.rank import median
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.morphology import disk
from matplotlib import pyplot as plt
#Reads in image and runs it through a gray layer to help filter
img = io.imread('test.jpg')
img_gray = color.rgb2gray(img)
img_filtered = median(img_gray)
coords = []
#intializing and creating the plot that will print at the end to show blob detection results
plt.figure()
plt.imshow(img_filtered, cmap='gray')
plt.title('Filtered Image')
blobs = blob_doh(img_filtered)
#creates circles around all the blobs found
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color='red', linewidth=2, fill=False)
plt.gca().add_patch(c)
coords += [[x,y]]
#return coordinates and shows the final blob detection run
print coords
plt.show()
| [
"angelesadilene@gmail.com"
] | angelesadilene@gmail.com |
e93a54321ac2e1dd04cf8be3bd8441186da7436b | a1e488fbe7fa0a8b149900b113d21ed3819b20a4 | /emotion_svmloss.py | c3dd5fe480fd61f91185479539d29b9847dff436 | [] | no_license | henniekim/emotion_recognition | 8c59d8ffe7c3d0203c7f336c480b04f31f14b242 | 19666ac1fc8f8d78d693e02fb51475f0b5bd73d3 | refs/heads/master | 2020-03-08T07:27:53.756586 | 2018-05-15T04:57:34 | 2018-05-15T04:57:34 | 127,994,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,056 | py | import numpy as np
def L_i(X, Y, W): # W : weight vector [7 x 2304] // x : [2304 x n] // y : [7 x n]
delta = 1.0
scores = W.dot(X)
scores2 = scores[Y, np.arange(scores.shape[1])]
margins = np.maximum(0, scores - scores[Y, np.arange(scores.shape[1])] + delta )
margins[Y, np.arange(Y.shape[0])] = 0
loss = np.sum(margins)
return loss
Xtr = np.load('./data/data_set_fer2013.npy')
Ytr = np.load('./data/data_labels_fer2013.npy')
Xte = np.load('./data/test_set_fer2013.npy')
Yte = np.load('./data/test_labels_fer2013.npy')
x = Xtr.shape[0]
Xtr_cols = Xtr.reshape(48*48, Xtr.shape[0])
Xte_cols = Xte.reshape(48*48, Xte.shape[0])
bestloss = float("inf")
for num in range(1000):
W = np.random.randn(7, 2304) * 0.0001
loss = L_i(Xtr_cols, Ytr, W)
if loss < bestloss:
bestloss = loss
bestW = W
print (' in attempt %d the loss was %f, best %f' % (num, loss, bestloss))
scores = bestW.dot(Xte_cols)
Yte_predict = np.argmax(scores, axis = 0)
print (' accuracy : %f ' % np.mean(Yte_predict == Yte))
| [
"seru_s@me.com"
] | seru_s@me.com |
83095e99b9defb7aa6b20ed455273c71322c712c | 9249fd887b943353cf59f94c115e508e67e83918 | /lib/util.py | d0d52e695bf1b05e846ec31fb3f326fe6467a5b5 | [] | no_license | fmidev/sasse-polygon-process | f4af2e442e26af61640fa2333a60f352e78b8853 | 8ade2764a4bcd3bb01a34c316120dcd065c59ea2 | refs/heads/master | 2023-03-04T19:56:43.949324 | 2020-12-16T09:44:44 | 2020-12-16T09:44:44 | 215,499,326 | 0 | 0 | null | 2020-06-22T12:57:48 | 2019-10-16T08:41:10 | Jupyter Notebook | UTF-8 | Python | false | false | 27,393 | py | import numpy as np
import os, math, pyproj, yaml, logging, joblib, dask
from shapely.ops import transform
from functools import partial
import pandas as pd
from sklearn.model_selection import RandomizedSearchCV, TimeSeriesSplit
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, mean_squared_error, mean_absolute_error, r2_score, classification_report, make_scorer
from sklearn.preprocessing import label_binarize
from scipy.stats import expon
from scipy import interp
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from dask.distributed import Client
#from dask import delayed
#import dask.dataframe as dd
import dask, ast, itertools
import dask_ml.model_selection as dcv
from sklearn.metrics import roc_curve, auc, precision_score, f1_score, recall_score, average_precision_score, precision_recall_curve, confusion_matrix, classification_report
def evaluate(model, options, data=None, dataset_file=None, fh=None, viz=None):
"""
Evaluate dataset
"""
if data is not None:
X, y = data
else:
data = pd.read_csv(options.dataset_file)
data = data.loc[data['weather_parameter'] == 'WindGust']
X = data.loc[:, options.feature_params]
y = data.loc[:, options.label].values.ravel()
y_pred = model.predict(X)
y_pred_proba = model.predict_proba(X)
acc = accuracy_score(y, y_pred)
precision = precision_score(y, y_pred, average='macro')
recall = recall_score(y, y_pred, average='macro')
f1 = f1_score(y, y_pred, average='macro')
logging.info('Accuracy: {}'.format(acc))
logging.info('Precision: {}'.format(precision))
logging.info('Recall: {}'.format(recall))
logging.info('F1 score: {}'.format(f1))
if fh is not None:
error_data = {'acc': [acc],
'precision': [precision],
'recall': [recall],
'f1': [f1]}
fname = '{}/test_validation_errors.csv'.format(options.output_path)
fh.write_csv(error_data, filename=fname)
# Confusion matrices
fname = '{}/confusion_matrix_testset.png'.format(options.output_path)
viz.plot_confusion_matrix(y, y_pred, np.arange(4), filename=fname)
fname = '{}/confusion_matrix_testset_normalised.png'.format(options.output_path)
viz.plot_confusion_matrix(y, y_pred, np.arange(4), True, filename=fname)
# Precision-recall curve
fname = '{}/precision-recall-curve_testset.png'.format(options.output_path)
viz.prec_rec_curve(y, y_pred_proba, n_classes=4, filename=fname)
# ROC
fname = '{}/roc_testset.png'.format(options.output_path)
viz.plot_roc(y, y_pred_proba, n_classes=4, filename=fname)
# Feature importance
if options.model == 'rfc':
fname = '{}/feature_importance.png'.format(options.output_path)
viz.rfc_feature_importance(model.feature_importances_, fname, feature_names=options.feature_params)
logging.info('Validation report:\n{}'.format(classification_report(y_pred, y)))
def param_grid(model):
""" Get params for KFold CV """
if model == 'rfc':
param_grid = {"n_estimators": [10, 100, 200, 800],
"max_depth": [3, 20, None],
"max_features": ["auto", "sqrt", "log2", None],
"min_samples_split": [2,5,10],
"min_samples_leaf": [1, 2, 4, 10],
"bootstrap": [True, False]}
elif model == 'svc':
param_grid = {"C": expon(scale=100),
"kernel": ['rbf', 'linear', 'sigmoid', 'poly'],
'degree': range(1,5)}
else:
raise "Not implemented"
return param_grid
def cv(model, param_grid, X, y, n_iter=20):
""" Cross validation """
cv_results = None
print('..performing cv search...')
searches = []
# Define jobs
random_search = dcv.RandomizedSearchCV(model,
param_grid,
n_iter=n_iter,
cv=5,
scoring=['f1_macro'], #, 'accuracy'],
return_train_score=True,
refit=False).fit(X, y)
# Gather results
cv_results = pd.DataFrame(random_search.cv_results_) #.head(1)
cv_results.sort_values(by=['mean_test_f1_macro'], inplace=True, ascending=False, ignore_index=True)
print(cv_results.head())
best_params = cv_results.loc[0,'params']
model = model.set_params(**best_params)
print('Using configuration: {}'.format(best_params))
with joblib.parallel_backend('dask'):
model.fit(X, y)
return model, cv_results
def cv_(X, y, model, options, fh):
"""
Cross-validate
X : DataFrame | Array
Features
y : list
labels
model : obj
scikit model
options : obj
options with at leas model, n_iter_search and output_path attributes
fh : FileHandler
file handler instance to report and store results
return : model
"""
scoring = {'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score, average = 'macro'),
'recall': make_scorer(recall_score, average = 'macro'),
'f1_macro': make_scorer(f1_score, average = 'macro'),
'f1_weighted': make_scorer(f1_score, average = 'weighted')}
random_search = RandomizedSearchCV(model,
param_distributions=param_grid(options.model),
n_iter=int(options.n_iter_search),
scoring=scoring,
cv=TimeSeriesSplit(),
return_train_score=True,
refit=False, # it's probably faster to retrain separately than keep probability True
n_jobs=-1)
logging.info('Starting 5-fold random search cross validation with {} iterations... X size is {}.'.format(options.n_iter_search, len(X)))
#with joblib.parallel_backend('dask'):
random_search.fit(X, y)
logging.info("RandomizedSearchCV done.")
t = scoring.keys()
scores = []
for s in t:
scores.append('mean_test_{}'.format(s))
cv_results = pd.DataFrame(random_search.cv_results_)
cv_results.sort_values(by=['mean_test_f1_macro'], inplace=True, ascending=False, ignore_index=True)
fname = options.output_path+'/random_search_cv_results.txt'
fh.df_to_csv(cv_results, fname)
logging.info("\n{}".format(cv_results.loc[:, scores]))
# Fit with best params
best_params = cv_results.loc[0,'params']
if options.model in ['svc']:
best_params['probability'] = True
model.set_params(**best_params)
#with joblib.parallel_backend('dask'):
model.fit(X, y)
#return random_search.best_estimator_
return model
def feature_selection(X, y, model, options, fh):
"""
Run feature selection process following:
1. find feature importance by fitting RFC
2. drop least important features one-by-one and run CV for new, supressed, dataset
3. Store CV score of each step and draw graph
"""
logging.info('Starting feature selection process...')
logging.info('..traingin {} with {} samples'.format(options.model, len(X)))
#with joblib.parallel_backend('dask'):
model.fit(X, y)
# Sort feature importances in descending order and rearrange feature names accordingly
indices = np.argsort(model.feature_importances_)[::1]
names = [options.feature_params[i] for i in indices]
cv_results = None
logging.info('..performing cv search...')
#for i in range(0,len(names)-4):
for i in range(0,5):
logging.info('...with {} parameters'.format(len(names)-i))
data = X.loc[:,names[i:]]
random_search = RandomizedSearchCV(model,
param_distributions=param_grid(options.model),
n_iter=int(options.n_iter_search),
scoring=['f1_macro', 'f1_micro', 'accuracy'],
return_train_score=True,
refit=False,
n_jobs=-1)
#try:
# with joblib.parallel_backend('dask'):
random_search.fit(data, y)
# except AttributeError:
# logging.warning('AttributeError while fitting. Trying again.')
# with joblib.parallel_backend('dask'):
# random_search.fit(data, y)
if cv_results is None:
cv_results = pd.DataFrame(random_search.cv_results_) #.head(1)
cv_results['Number of parameters'] = len(names)-i
else:
res_df = pd.DataFrame(random_search.cv_results_) #.head(1)
res_df['Number of parameters'] = len(names)-i
cv_results = pd.concat([cv_results, res_df], ignore_index=True)
#cv_results.append(dask.delayed(train)(random_search, data, y))
#cv_results = dask.compute(*cv_results)
logging.info('..cv search done')
print(cv_results)
cv_results.sort_values(by=['mean_test_f1_macro'], inplace=True)
print(cv_results)
# Save and visualise results
fname = '{}/feature_selection_results.csv'.format(options.output_path)
fh.df_to_csv(cv_results, fname, fname)
logging.info('..refitting with best model params')
model.set_params(**cv_results.loc[0,'params'])
params = names[cv_results.loc[0, 'Number of parameters']:]
data = X.loc[:, params]
#with joblib.parallel_backend('dask'):
model.fit(data, y)
return model, params
def get_param_names(config_filename, shortnames=True):
""" Get param names, partly from config and partly as hard coded """
with open(config_filename) as f:
file_content = f.read()
config_dict = yaml.load(file_content, Loader=yaml.FullLoader)
params = config_dict['params']
met_params = set()
for param, info in params.items():
for f in info['aggregation']:
if shortnames:
met_params.add(f[1:]+' '+info['name'])
else:
met_params.add(f+'{'+param+'}')
met_params = list(met_params)
polygon_params = ['speed_self', 'angle_self', 'area_m2', 'area_diff', 'low_limit']
features = polygon_params + met_params
meta_params = ['id', 'storm_id', 'point_in_time', 'weather_parameter', 'high_limit', 'transformers', 'all_customers', 'outages', 'customers']
labels = ['class', 'class_customers']
all_params = features + meta_params + labels
return features, meta_params, labels, all_params
def surrounding_indexes(pixel_coord, window, boundary):
""" Returns the indexes of the pixels surrounding the given
pixel coordinate.
"""
def circular_kernel(radius, fill_value):
"""Returns a rectangular numpy array of shape (2*radius+1, 2*radius+1)
where all values within radius from the center are set to fill_value
"""
kernel = np.zeros((2*radius+1, 2*radius+1))
y,x = np.ogrid[-radius:radius+1, -radius:radius+1]
mask = x**2 + y**2 <= radius**2
kernel[mask] = fill_value
return kernel
def minimum_nonzero_bbox(base_array):
"""Returns a minimum bounding box for non zero values
"""
tmp_array = np.argwhere(base_array)
(ystart, xstart), (ystop, xstop) = tmp_array.min(0), tmp_array.max(0)
return (ystart, ystop, xstart, xstop)
def rectangle_intersection(r1, r2):
"""Returns the intersection of two rectangular areas
in form (ul_y, ul_x, w, h). Retruns None if rectangles
do not intersect.
"""
if r1[0] > r2[0]:
y = r1[0]
else:
y = r2[0]
if r1[1] > r2[1]:
x = r1[1]
else :
x = r2[1]
w = min((r1[1] + r1[2], r2[1] + r2[2])) - x
h = min((r1[0] + r1[3], r2[0] + r2[3])) - y
if w <= 0 or h <= 0:
return None
else:
return (y, x, w, h)
def insert_points(base_array, points, accumulate=False):
if accumulate:
for y, x in iter(points):
try:
base_array[y, x] += 1
except IndexError:
pass
else:
for y, x in iter(points):
try:
base_array[y,x] = 1
except IndexError:
pass
def insert_array(base_array, window, y, x, accumulate=False, insertmax=False):
"""
function inserts the values of window array to base_array so that
the upper left corner of the window is at point y, x. If window
positioned at y, x doesn't intersect base_array, base_array stays
unchanged.
Parameters
----------
base_array : np.ndarray 2d
Array where new values area inserted
window : np.nd_array
Array that is inserted to base_array
y : int
insertion row coordinate
x : int
insertion column coordinate
accumulate : bool
If accumulate is set to True window values are accumulated on base_array values
otherwise widow values overwrite the base_array values.
insertmax : bool
If base array contains values where window should be inserted, choose the max values
at each position to be inserted on base_array
"""
h1, w1 = base_array.shape
h2, w2 = window.shape
# x and y are within base array
if 0 <= y < h1:
y_min1 = y
y_min2 = 0
if y + h2 > h1:
y_max1 = h1
y_max2 = h1 - y
else:
y_max1 = y + h2
y_max2 = h2
elif -h2 < y < 0:
y_min1 = 0
y_max1 = y + h2
y_min2 = -y
y_max2 = h2
if 0 <= x < w1:
x_min1 = x
x_min2 = 0
if x + w2 > w1:
x_max1 = w1
x_max2 = h1 - x
else:
x_max1 = x + w2
x_max2 = w2
elif -w2 < x < 0:
x_min1 = 0
x_max1 = x + w2
x_min2 = -x
x_max2 = w2
try:
if accumulate:
base_array[y_min1:y_max1, x_min1:x_max1] += window[y_min2:y_max2, x_min2:x_max2]
elif insertmax:
# if base_array contains values at the area of window, select the maximum values from window and base_array
max_window = np.amax([base_array[y_min1:y_max1, x_min1:x_max1], window[y_min2:y_max2, x_min2:x_max2]], axis=0)
base_array[y_min1:y_max1, x_min1:x_max1] = max_window
else:
base_array[y_min1:y_max1, x_min1:x_max1] = window[y_min2:y_max2, x_min2:x_max2]
except:
pass
def insert_array2(base_array, window, samples, accumulate=False):
"""
function inserts the values of window array to base_array so that
the upper left corner of the window is at point y, x. If window
positioned at y, x doesn't intersect base_array, base_array stays
unchanged.
Parameters
----------
base_array : np.ndarray 2d
Array where new values area inserted
window : np.nd_array
Array that is inserted to base_array
y : int
insertion row coordinate
x : int
insertion column coordinate
accumulate : bool
If accumulate is set to True window values are accumulated on base_array values
otherwise widow values overwrite the base_array values.
"""
for y, x in samples:
h1, w1 = base_array.shape
h2, w2 = window.shape
# x and y are within base array
if 0 <= y < h1:
y_min1 = y
y_min2 = 0
if y + h2 > h1:
y_max1 = h1
y_max2 = h1 - y
else:
y_max1 = y + h2
y_max2 = h2
elif -h2 < y < 0:
y_min1 = 0
y_max1 = y + h2
y_min2 = -y
y_max2 = h2
if 0 <= x < w1:
x_min1 = x
x_min2 = 0
if x + w2 > w1:
x_max1 = w1
x_max2 = h1 - x
else:
x_max1 = x + w2
x_max2 = w2
elif -w2 < x < 0:
x_min1 = 0
x_max1 = x + w2
x_min2 = -x
x_max2 = w2
try:
if accumulate:
base_array[y_min1:y_max1, x_min1:x_max1] += window[y_min2:y_max2, x_min2:x_max2]
else:
base_array[y_min1:y_max1, x_min1:x_max1] = window[y_min2:y_max2, x_min2:x_max2]
except:
pass
def bearing(pt1, pt2):
x_diff = pt2.x - pt1.x
y_diff = pt2.y - pt1.y
angle = math.degrees(math.atan2(y_diff, x_diff))
if angle < 0: angle += 360
return angle
def speed(row1, row2, threshold = None, missing = None):
c1 = transform(
partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(
proj='aea',
lat_1=row1.geom.bounds[1],
lat_2=row1.geom.bounds[3])),
row1.geom).centroid
c2 = transform(
partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(
proj='aea',
lat_1=row2.geom.bounds[1],
lat_2=row2.geom.bounds[3])),
row2.geom).centroid
dist = c1.distance(c2) / 1000
if threshold is not None and dist > threshold:
dist = missing
return dist
def gridcv(model, param_grid, X, y):
cv_results = None
print('..performing cv search...')
searches = []
# Define jobs
grid_search = dcv.GridSearchCV(model,
param_grid,
scoring=['f1_macro', 'f1_micro', 'accuracy'],
return_train_score=True,
refit=False,
n_jobs=-1).fit(X, y)
# Gather results
cv_results = pd.DataFrame(grid_search.cv_results_) #.head(1)
cv_results.sort_values(by=['mean_test_f1_macro'], inplace=True, ascending=False, ignore_index=True)
print(cv_results.head())
best_params = cv_results.loc[0,'params']
model = model.set_params(**best_params)
print('Using configuration: {}'.format(best_params))
with joblib.parallel_backend('dask'):
model.fit(X, y)
return model, cv_results
##############################################
# Visualisation functions
##############################################
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
cmap=plt.cm.YlOrBr,
filename=None,
fontsize=20):
"""
Normalization can be applied by setting `normalize=True`.
"""
plt.clf()
plt.rc('font', size=fontsize)
fig, ax = plt.subplots(figsize=(6,6))
np.set_printoptions(precision=2)
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.grid(False, which='major')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
tick_marks = np.arange(len(classes))
ax.xaxis.tick_top()
plt.xticks(tick_marks, classes) #, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
fig.subplots_adjust(bottom=0.12)
plt.ylabel('True label')
plt.xlabel('Predicted label')
print(classification_report(y_true, y_pred))
def prec_rec_curve(y, y_pred, n_classes, fontsize=20):
"""
Precision - Recall Curve
"""
plt.rc('font', size=fontsize)
colors=['xkcd:sky blue', 'xkcd:forest green', 'xkcd:dark red', 'xkcd:dark yellow']
y = label_binarize(y, classes=np.arange(n_classes))
# For each class
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y[:, i], y_pred[:, i])
average_precision[i] = average_precision_score(y[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y.ravel(), y_pred.ravel())
average_precision["micro"] = average_precision_score(y, y_pred, average="micro")
print('Average precision score, micro-averaged over all classes: {0:0.2f}'.format(average_precision["micro"]))
plt.figure(figsize=(12, 12))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y_ = f_score * x / (2 * x - f_score)
l, = plt.plot(x[y_ >= 0], y_[y_ >= 0], color='gray', alpha=0.5)
plt.annotate('F1={0:0.1f}'.format(f_score), xy=(0.9, y_[45] + 0.02))
lines.append(l)
labels.append('F1 curves')
l, = plt.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('Micro-average (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
l, = plt.plot(recall[i], precision[i], lw=2, color=colors[i])
lines.append(l)
labels.append('Class {0} (area = {1:0.2f})'.format(i, average_precision[i]))
fig = plt.gcf()
fig.subplots_adjust(bottom=0.25)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xticks(np.arange(.2, 1., .2))
plt.xlabel('Recall', labelpad=20)
plt.ylabel('Precision', labelpad=20)
plt.title('Precision-Recall Curve', pad=20)
plt.legend(lines, labels, loc=(0, -.2), ncol=2)
def feature_importance(data, feature_names = None, fontsize=20):
""" Plot feature importance """
fig, ax = plt.subplots(figsize=(24,18))
plt.clf()
plt.rc('font', size=fontsize)
if feature_names is None:
feature_names = range(0,len(data))
else:
plt.xticks(rotation=90, fontsize=fontsize)
fig.subplots_adjust(bottom=0.5)
plt.yticks(fontsize=fontsize*2/3)
plt.bar(feature_names, data, align='center')
plt.xlabel('Components', fontsize=fontsize, labelpad=20)
plt.ylabel('Importance', fontsize=fontsize, labelpad=20)
#ax.tick_params(axis='both', which='major', labelsize=fontsize)
#ax.tick_params(axis='both', which='minor', labelsize=fontsize)
# plt.tight_layout()
#fig.subplots_adjust(bottom=0.5)
#self._save(plt, filename)
def read_data(fname_train, fname_test, options):
""" Read data from csv file """
# Train
data_train = pd.read_csv(fname_train)
X_train = data_train.loc[:, options.feature_params]
y_train = data_train.loc[:, options.label].values.ravel()
print('Train data shape: {}'.format(X_train.shape))
# Test
if fname_test is not None:
data_test = pd.read_csv(fname_test)
X_test = data_test.loc[:, options.feature_params]
y_test = data_test.loc[:, options.label].values.ravel()
print('Test data shape: {}'.format(X_test.shape))
else:
X_test, y_test = None, None
return X_train, y_train, X_test, y_test
def plot_class_hist(data_train, data_test,title='', fontsize=10):
fig, ax = plt.subplots(figsize=(15,4))
plt.rc('font', size=fontsize)
tickfontsize=0.8*fontsize
##### Plot 1
ax = plt.subplot(1,2,1)
data_train.loc[:, 'class'].hist(ax=ax, color='xkcd:tea')
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin, ymax*1.1))
plt.title('Train set', fontsize=fontsize)
plt.ylabel('Record count', fontsize=fontsize)
plt.xlabel('Class', fontsize=fontsize)
plt.yticks(fontsize=tickfontsize)
plt.xticks(fontsize=tickfontsize)
i=0
for rect in ax.patches:
if rect.get_height() > 0:
height = rect.get_height()
ax.annotate(f'{int(height)}', xy=(rect.get_x()+rect.get_width()/2, height),
xytext=(0, 5), textcoords='offset points', ha='center', va='bottom')
i+=1
plt.grid(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
##### Plot 2
ax = plt.subplot(1,2,2)
data_test.loc[:, 'class'].hist(ax=ax, color='xkcd:dust')
ymin, ymax = ax.get_ylim()
ax.set_ylim((ymin, ymax*1.1))
plt.grid(False)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.title('Test set', fontsize=fontsize)
plt.ylabel('Record count', fontsize=fontsize)
plt.xlabel('Class', fontsize=fontsize)
plt.yticks(fontsize=tickfontsize)
plt.xticks(fontsize=tickfontsize)
i=0
for rect in ax.patches:
if rect.get_height() > 0:
height = rect.get_height()
ax.annotate(f'{int(height)}', xy=(rect.get_x()+rect.get_width()/2, height),
xytext=(0, 5), textcoords='offset points', ha='center', va='bottom')
i+=1
plt.suptitle(title, x=.22, y=1.03)
def plot_roc(y, y_pred, n_classes=4, fontsize=20):
"""
Plot multiclass ROC
"""
colors=['xkcd:sky blue', 'xkcd:forest green', 'xkcd:dark red', 'xkcd:dark yellow']
fig, ax1 = plt.subplots(figsize=(12,12))
plt.clf()
plt.rc('font', size=fontsize)
y = label_binarize(y, classes=np.arange(n_classes))
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], threshhold = roc_curve(y[:, i], y_pred[:,i])
roc_auc[i] = auc(fpr[i], tpr[i])
print('AUC for class {} is {}'.format(i, roc_auc[i]))
# Compute average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.plot([0, 1], [0, 1], 'k--')
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], color=colors[i], label="Class {0} (AUC: {1:0.2f})".format(i, roc_auc[i]))
plt.plot(fpr["macro"], tpr["macro"],
label='Average (AUC: {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
plt.xlabel('False positive rate', fontsize=fontsize)
plt.ylabel('True positive rate', fontsize=fontsize)
plt.title('ROC curve')
plt.legend(loc="lower right")
plt.yticks(fontsize=fontsize*2/3)
plt.xticks(fontsize=fontsize*2/3) | [
"roope.tervo@fmi.fi"
] | roope.tervo@fmi.fi |
e00b001540311fda894268668d373470d1a5a152 | 327e1a902370289e04be2e6f05c8e3b6fa96f960 | /leopulence_landingpage/about/views.py | b32d70ffc0efbc957bf0450dc87a7b8b747807ed | [] | no_license | leopulence/web-leopulence | eab30ac9a66a6b404f2c0f3adf6f4d6ecf9b541d | 4bf631c2ba7956aebe75de7241839d2e172b896a | refs/heads/master | 2020-04-10T18:41:09.083364 | 2019-03-28T22:06:37 | 2019-03-28T22:06:37 | 161,209,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .models import Pages, About
from django.utils import timezone
# Create your views here.
def about(request):
return render(request, "about/about.html")
class PagesDetailView(DetailView):
model = Pages
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
return context
class AboutDetailView(ListView):
model = About | [
"pugliesesm@gmail.com"
] | pugliesesm@gmail.com |
ebee29d527fc121a6fd629371d6c54f8e2bf964b | 3d85f716aaeb42592adcdeabd7cb76152b8104ed | /pugh_torch/tests/modules/test_meta.py | f0d9b2b90df93e06909ff520d3ab4bd9e412b746 | [
"MIT"
] | permissive | BrianPugh/pugh_torch | a78836b9c286814b8df5df57d67b8dbfc8d6412d | d620a518d78ec03556c5089bfc76e4cf7bd0cd70 | refs/heads/master | 2023-02-14T16:53:07.625650 | 2020-11-03T16:23:22 | 2020-11-03T16:23:22 | 294,986,957 | 4 | 1 | MIT | 2020-11-03T16:23:23 | 2020-09-12T16:54:00 | Python | UTF-8 | Python | false | false | 664 | py | import pytest
import torch
from torch import nn
import torch.nn.functional as F
import pugh_torch as pt
def test_batch_linear():
data = torch.rand(10, 2)
feat_in = 2
feat_out = 4
linear = pt.modules.meta.BatchLinear(feat_in, feat_out)
weight = linear.weight.clone()
bias = linear.bias.clone()
batch_weight = weight.unsqueeze(0)
batch_bias = bias.unsqueeze(0)
vanilla_output = linear(data)
batch_output = linear(data, weight=batch_weight, bias=batch_bias)
assert batch_output.shape[0] == 1
assert vanilla_output.shape == batch_output.shape[1:]
assert torch.isclose(vanilla_output, batch_output[0]).all()
| [
"noreply@github.com"
] | noreply@github.com |
9a328d6d6b1a72fcd0b7857e9fa683554aff05df | 8a171056b5d8d2ac184d66afa2c4573bf54ec8a0 | /Examples/Python/PrivateDict.py | f0be4c7e9a780bf748503242cb905588592de166 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | radpointhq/GDCM | 7b03d5a27c0f1f733d336fa24317d58329a3cf03 | 63bd2fc37b6ed3630183a3c2d8ee561027a9e574 | refs/heads/master | 2022-05-21T19:05:01.961938 | 2022-03-15T17:36:51 | 2022-03-15T17:36:51 | 189,388,523 | 1 | 0 | NOASSERTION | 2022-03-15T17:36:54 | 2019-05-30T09:50:16 | C++ | UTF-8 | Python | false | false | 1,170 | py | ############################################################################
#
# Program: GDCM (Grassroots DICOM). A DICOM library
#
# Copyright (c) 2006-2011 Mathieu Malaterre
# All rights reserved.
# See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
############################################################################
"""
"""
import gdcm
import sys,os
if __name__ == "__main__":
#gdcm.Trace.DebugOn()
globInst = gdcm.Global.GetInstance()
# Try to load Part3.xml file
# This fils is too big for being accessible directly at runtime.
globInst.LoadResourcesFiles()
# Get a private tag from the runtime dicts. LoadResourcesFiles could
# have failed but this has no impact on the private dict
d = globInst.GetDicts()
print d.GetDictEntry( gdcm.Tag(0x0029,0x0010) ,"SIEMENS CSA HEADER" )
pd = d.GetPrivateDict()
print pd.GetDictEntry( gdcm.PrivateTag(0x0029,0x0010,"SIEMENS CSA HEADER") )
| [
"mathieu.malaterre@gmail.com"
] | mathieu.malaterre@gmail.com |
92c59a1156df87073eec8744b9a4011e1e6fd657 | f07e66293cc41a9fe71fc44f765b432fd7a0997c | /selfdrive/controls/lib/cluster/SConscript | 97eb4300d4da6618962e0430ca534fc43fb0640f | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | kegman/openpilot | c9ba96a72d905956f02c684e065091e023942883 | b35291c91783657a5fc83abfff012d3bb49dd89f | refs/heads/kegman-ultimate | 2022-05-22T17:07:16.656336 | 2021-10-25T13:35:28 | 2021-10-25T13:35:28 | 229,979,925 | 105 | 212 | MIT | 2022-03-13T05:47:51 | 2019-12-24T17:27:11 | C | UTF-8 | Python | false | false | 185 | Import('env')
fc = env.SharedLibrary("fastcluster", "fastcluster.cpp")
# TODO: how do I gate on test
#env.Program("test", ["test.cpp"], LIBS=[fc])
#valgrind --leak-check=full ./test
| [
"user@comma.ai"
] | user@comma.ai | |
6db8257a238dd3a6e98bbf555a2b2824bd7ba4f8 | d8f79f29454c406b9bfaa40a6a8a6f3786ed8f9a | /project/leads/migrations/0001_initial.py | 1044ead7c13c25581bf6044c8b19b43700a869c6 | [] | no_license | deniztetik/marshmallows | b89164d32ffacf1711713c5167fddb8821bd85d2 | 9b33912df141c0f221dd92635961435470d10b8f | refs/heads/master | 2020-03-27T20:16:16.948638 | 2018-10-09T19:43:39 | 2018-10-09T19:43:39 | 147,053,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | # Generated by Django 2.1 on 2018-08-31 01:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lead',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('message', models.CharField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"deniz.tetik@oracle.com"
] | deniz.tetik@oracle.com |
f68c67977383e7d333b30f0ea34c322410459cb5 | 4fee75068edcf2fb64074e84b150ad7a744e55df | /stock_market.py | 68549a9b79b0e8db27072edb80be07758f86993f | [] | no_license | satee143/zebuapi | 5311e1b7011dc86e311fddc0355f02cc89474205 | 1fa57ffc1802fac2bfa6bee06125a2ea68c0756a | refs/heads/master | 2022-12-07T00:20:42.126203 | 2020-08-26T18:15:28 | 2020-08-26T18:15:28 | 280,687,836 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | from nsetools import Nse
nse = Nse()
q = nse.get_quote('infy')
print(type(q))
# pprint(q)
| [
"sdoosa@insulet.com"
] | sdoosa@insulet.com |
f3ad98569986ba7cd60f02f1aa968eddfee2e72a | e742a41f3a8f4454fd02153e71e4e0a455547249 | /selenium/implicitWait.py | e9a31496ed1775ad6bb0951f998c1874ad18fcd7 | [] | no_license | Javierggonzalvez/cursos | 25b3709351234a5734dbb92ff5b5456c65b94125 | e6fada12b3b543c70e7dabe0c3c1a2a73de52a8e | refs/heads/master | 2022-12-08T11:38:34.372208 | 2019-11-21T20:50:24 | 2019-11-21T20:50:24 | 223,262,972 | 0 | 0 | null | 2022-11-22T02:24:33 | 2019-11-21T20:48:56 | Python | UTF-8 | Python | false | false | 685 | py | import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class usando_unittest(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(
executable_path="/opt/google/chrome/chromedriver")
def test_implicitWait(self):
driver = self.driver
# tiempo para intentarlo, al encontrarlo sale
driver.implicitly_wait(5)
driver.get("http:/www.google.com")
myDynamicElement = driver.find_element_by_name('q')
if __name__ == '__main__':
unittest.main()
| [
"javierggonzalvez@gmail.com"
] | javierggonzalvez@gmail.com |
582ffbfd29829a394d74241d4460e2a4b0b883f8 | 1a788746fc76424947a0c6d56fcea9e4ddedd259 | /Question3.py | 6e4bb66a6feff67bfffe87631565b488771cac90 | [] | no_license | Akhilgupta17/Acadview_Assignment5 | dfad17209e6cdc07630b923267506dcd2e4ce16e | 42600beb03bd7608b494bfd0799866c3bf220130 | refs/heads/master | 2020-03-19T23:54:08.227451 | 2018-06-12T07:23:48 | 2018-06-12T07:23:48 | 137,024,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | a=int(input("First:"))
b=int(input("Second:"))
c=int(input("Third:"))
if a>b and a>c:
print("first Is Older")
elif b>a and b>c:
print("Second Is Older")
elif a==b==c:
print("All Are Equal")
else:
print("Third Is Older")
if a<b and a<c:
print("first Is Younger")
elif b>a and b>c:
print("Second Is Younger")
elif a==b==c:
print("All Are Equal")
else:
print("Third Is Younger")
| [
"Akhilgupta1711@gmail.com"
] | Akhilgupta1711@gmail.com |
55080cdf9d5d59207c01870952c49ae87b944c6b | eaf1eaa2756ffdb4405b7d5048212f10747c8d5a | /karatsuba_multiplication.py | 6c8f851380fe250d7fff02d68be02552d4974674 | [] | no_license | ingridcrant/algorithms-specialization-course | e87ebfd21e64150ebacf4c316553c20bb1d69422 | d87c075e365a2e70a6c87db276f2adf4f52626d8 | refs/heads/master | 2023-06-19T23:53:48.023782 | 2021-07-20T23:55:55 | 2021-07-20T23:55:55 | 381,830,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from math import log10
# karatsuba multiplication
# python implementation from Tim Roughgarden's Algorithms course
def karatsuba(x, y):
if x < 10 or y < 10:
return x * y
else:
n = max(int(log10(x)), int(log10(y))) + 1 # max number of digits
n_over_2 = n // 2
# split x in two halves
a = x // 10**n_over_2
b = x % 10**n_over_2
# split y in two halves
c = y // 10**n_over_2
d = y % 10**n_over_2
ac = karatsuba(a, c)
bd = karatsuba(b, d)
ad_plus_bc = karatsuba(a+b, c+d) - ac - bd
return (ac * 10**(2*n_over_2)) + (ad_plus_bc * 10**n_over_2) + bd | [
"ingridcrant@gmail.com"
] | ingridcrant@gmail.com |
618eea552428cab909fb49cb511903632933d2f5 | 9497e845b38c2f1db1951d4d0d6ec9bbd1521d37 | /user/views.py | eaf7aeebe654bf99b2540d0a3f25241d3588f999 | [] | no_license | li2356927598/fresh | d2e5477253946ea4a290d467e18204babe5fc04e | 849cb449a9cc3259d28f8ab8dce4c02d07c7bfdb | refs/heads/master | 2020-04-01T23:44:36.867111 | 2018-10-18T01:02:49 | 2018-10-18T01:02:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,082 | py | from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponseRedirect
from django.contrib.auth.hashers import make_password, check_password
from django.core.paginator import Paginator
from user.models import UserModel
from user.forms import UserRegisterForm, UserLoginForm
from user.utils import login_required
from goods.models import GoodsModel
from order.models import OrderModel
# Create your views here.
# 只接受POST请求
def register_post(request):
"""只接受post请求的注册接口"""
if request.method == "POST":
user = UserLoginForm(request.POST)
# print(user)
# 验证表单数据
if not user.is_valid():
# print(user.errors.get_json_data)
# # print(user.)
# # print(vaild)
return JsonResponse(user.errors.get_json_data(), safe=False)
user = UserLoginForm()
return render(request, "user/register_post.html", {"user": user})
def register(request):
"""注册接口"""
cookie = request.COOKIES
print(cookie)
if request.method == "POST":
# 忽略参数为空的情况
username = request.POST.get("username", "")
if not username:
return JsonResponse({"error": "请输入用户名"})
password = request.POST.get("password", "")
phone = request.POST.get("phone", "")
address = request.POST.get("address", "")
email = request.POST.get("email", "")
# 新建用户
user = UserModel()
user.username = username
# 密码加密后存储
user.password = make_password(password)
user.phone = phone
user.address = address
user.email = email
user.save()
return JsonResponse({"user": "success"})
return render(request, "user/register.html")
# return render(request, "test.html")
def login(request):
"""登陆接口"""
if request.method == "POST":
username = request.POST.get("username", "")
password = request.POST.get("password", "")
# 当这个jizhu有值的时候, 就是这个复选框被勾选的时候的值1,没有的话是0
jizhu = request.POST.get("jizhu", 0)
# 根据用户名查用户对象
user = UserModel.objects.filter(username=username)
# 判断,如果没有查到说明用户名错误, 如果查到判断密码是否正确
# 密码错误: 返回登陆页面,并且提示密码错误
if user:
user = user[0]
# 检查密码是否正确
is_password = check_password(password, user.password)
if not is_password:
# 密码错误
return render(request, "user/login.html", {"username": username, "is_password": 1, "is_user": 0})
else:
# 密码正确
# 先生成一个response对象
next_url = request.COOKIES.get("next_url", "/account/login/")
response = HttpResponseRedirect(next_url)
# 记住用户名
# 设置cookie
if jizhu != 0:
response.set_cookie("username", username)
else:
response.set_cookie("username", "", max_age=-1) # max_age指的是过期时间, 当为-1的时候立即过期
# 把用户id和username放入session中
request.session["user_id"] = user.id
request.session["username"] = username
return response
# return render(request, "user/index.html", {"username": user.username})
else:
return render(request, "user/login.html", {"username": username, "is_user": 1, "is_password": 0})
# cookie = request.COOKIES
# 查看cookie有什么
# print(cookie)
return render(request, "user/login.html")
# 退出登陆
def logout(request):
del request.session["user_id"]
del request.session["username"]
return redirect("/account/login")
@login_required
def info(request):
"""用户个人信息"""
user_id = request.session["user_id"]
user = UserModel.objects.get(id=user_id)
user_info = {
"username": user.username,
"phone": user.phone,
"address": user.address
}
# 从session中拿到商品id的列表(在商品详情里写入session的)
goods_id_list = request.session.get(str(user_id), [])
# 用户最近浏览的商品记录
# aa
goods_list = []
# 通过便利商品id列表,拿到商品对象组成了一个有序的商品对象列表
for goods_id in goods_id_list:
goods_list.append(GoodsModel.objects.get(id=goods_id))
context = {"user_info": user_info,
"goods_list": goods_list,
"title": "用户中心",
"active": "info"}
return render(request, "user/user_center_info.html", context)
@login_required
def all_order(request, page_num):
"""全部订单"""
# 查询当前登陆用户的所有订单信息
user_id = request.session.get("user_id")
all_order = OrderModel.objects.filter(user_id=user_id)
# 每一页展示2个
paginator = Paginator(all_order, 2)
page = paginator.page(page_num)
context = {
"page": page,
"page_num": page_num,
"title": "全部订单",
"active": "all_order"
}
return render(request, "user/user_center_order.html", context)
def upload(request):
"""上传接口"""
if request.method == "GET":
return render(request, "upload.html")
if request.method == "POST":
myfile = request.FILES.get("myfile")
ext = myfile.name.split(".")[-1]
filename = "test." + ext
with open(filename, "wb") as fp:
for chunk in myfile.chunks():
fp.write(chunk)
return JsonResponse({"result": "success"})
| [
"yangguanyu_2006@126.com"
] | yangguanyu_2006@126.com |
854841144a32380166855fef783a5a114d60a6a8 | c3fc7fef9b80121fdc9171f243afc18f7270c7ad | /testrun.py | f450f0eecd98619d82f4649aa8eba141c26022c6 | [] | no_license | Olaitan74/Wave2-solutions | 4b5682028063746a2814dcc9e9e31345c4481e21 | ea4f73872d1948365ee2c0730daf5b28849b5c3c | refs/heads/master | 2022-11-29T13:18:41.115945 | 2020-08-03T19:15:15 | 2020-08-03T19:15:15 | 284,054,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | pac = {'A' : 1, 'B' : 2, 'C' : 3}
print(pac.keys(), sorted(pac.keys())) | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.